Extensible middleware for customizing agent behavior with lifecycle hooks and state management.
function createMiddleware<
TSchema extends InteropZodObject | undefined = undefined,
TContextSchema extends InteropZodObject | InteropZodDefault<InteropZodObject> | InteropZodOptional<InteropZodObject> | undefined = undefined
>(
middleware: AgentMiddleware<TSchema, TContextSchema>
): AgentMiddleware<TSchema, TContextSchema>;
interface AgentMiddleware<TSchema = any, TContextSchema = any, TFullContext = any> {
name: string;
stateSchema?: TSchema;
contextSchema?: TContextSchema;
tools?: (ClientTool | ServerTool)[];
wrapToolCall?: WrapToolCallHook<TSchema, TFullContext>;
wrapModelCall?: WrapModelCallHook<TSchema, TFullContext>;
beforeAgent?: BeforeAgentHook<TSchema, TFullContext>;
beforeModel?: BeforeModelHook<TSchema, TFullContext>;
afterModel?: AfterModelHook<TSchema, TFullContext>;
afterAgent?: AfterAgentHook<TSchema, TFullContext>;
}Example:
import { createMiddleware, createAgent } from "langchain";
import { z } from "zod";
const loggingMiddleware = createMiddleware({
name: "logging",
stateSchema: z.object({
logCount: z.number().default(0),
}),
beforeModel: async (state, runtime) => {
console.log("Calling model");
return { logCount: state.logCount + 1 };
},
});
const agent = createAgent({
model: "openai:gpt-4o",
tools: [searchTool],
middleware: [loggingMiddleware],
});type BeforeAgentHook<TSchema = any, TContext = unknown> =
| BeforeAgentHandler<NormalizedSchemaInput<TSchema>, TContext>
| { hook: BeforeAgentHandler<NormalizedSchemaInput<TSchema>, TContext>; canJumpTo?: JumpToTarget[] };
type BeforeAgentHandler<TSchema, TContext> = (
state: TSchema,
runtime: Runtime<TContext>
) => Promise<MiddlewareResult<Partial<TSchema>>> | MiddlewareResult<Partial<TSchema>>;
type BeforeModelHook<TSchema = any, TContext = unknown> =
| BeforeModelHandler<NormalizedSchemaInput<TSchema>, TContext>
| { hook: BeforeModelHandler<NormalizedSchemaInput<TSchema>, TContext>; canJumpTo?: JumpToTarget[] };
type BeforeModelHandler<TSchema, TContext> = (
state: TSchema,
runtime: Runtime<TContext>
) => Promise<MiddlewareResult<Partial<TSchema>>> | MiddlewareResult<Partial<TSchema>>;
type AfterModelHook<TSchema = any, TContext = unknown> =
| AfterModelHandler<NormalizedSchemaInput<TSchema>, TContext>
| { hook: AfterModelHandler<NormalizedSchemaInput<TSchema>, TContext>; canJumpTo?: JumpToTarget[] };
type AfterModelHandler<TSchema, TContext> = (
state: TSchema,
runtime: Runtime<TContext>
) => Promise<MiddlewareResult<Partial<TSchema>>> | MiddlewareResult<Partial<TSchema>>;
type AfterAgentHook<TSchema = any, TContext = unknown> =
| AfterAgentHandler<NormalizedSchemaInput<TSchema>, TContext>
| { hook: AfterAgentHandler<NormalizedSchemaInput<TSchema>, TContext>; canJumpTo?: JumpToTarget[] };
type AfterAgentHandler<TSchema, TContext> = (
state: TSchema,
runtime: Runtime<TContext>
) => Promise<MiddlewareResult<Partial<TSchema>>> | MiddlewareResult<Partial<TSchema>>;
type MiddlewareResult<TState> = (TState & { jumpTo?: JumpToTarget }) | void;type WrapToolCallHook<TSchema = any, TContext = unknown> = (
request: ToolCallRequest<NormalizedSchemaInput<TSchema>, TContext>,
handler: ToolCallHandler<NormalizedSchemaInput<TSchema>, TContext>
) => Promise<ToolMessage | Command> | ToolMessage | Command;
type ToolCallHandler<TSchema = Record<string, unknown>, TContext = unknown> = (
request: ToolCallRequest<TSchema, TContext>
) => Promise<ToolMessage | Command> | ToolMessage | Command;
interface ToolCallRequest<TState extends Record<string, unknown> = Record<string, unknown>, TContext = unknown> {
toolCall: ToolCall;
tool: ClientTool | ServerTool;
state: TState & AgentBuiltInState;
runtime: Runtime<TContext>;
}
type WrapModelCallHook<TSchema = any, TContext = unknown> = (
request: ModelRequest<NormalizedSchemaInput<TSchema>, TContext>,
handler: WrapModelCallHandler<TSchema, TContext>
) => Promise<AIMessage> | AIMessage;
type WrapModelCallHandler<TSchema = any, TContext = unknown> = (
request: Omit<ModelRequest<NormalizedSchemaInput<TSchema>, TContext>, "systemPrompt" | "systemMessage"> & {
systemPrompt?: string;
systemMessage?: SystemMessage;
}
) => Promise<AIMessage> | AIMessage;
interface ModelRequest<TState extends Record<string, unknown> = Record<string, unknown>, TContext = unknown> {
model: LanguageModelLike;
messages: BaseMessage[];
systemPrompt: string;
systemMessage: SystemMessage;
toolChoice?: "auto" | "none" | "required" | { type: "function"; function: { name: string } };
tools: (ClientTool | ServerTool)[];
state: TState & AgentBuiltInState;
runtime: Runtime<TContext>;
modelSettings?: Record<string, unknown>;
}interface Runtime<TContext = unknown> {
metadata: Record<string, any>;
signal: AbortSignal;
context: TContext;
interrupt: (value: any) => void;
configurable?: {
thread_id?: string;
[key: string]: unknown;
};
}function humanInTheLoopMiddleware(config: HumanInTheLoopMiddlewareConfig): AgentMiddleware;
type HumanInTheLoopMiddlewareConfig = {
interruptOn?: Record<string, boolean | InterruptOnConfig>;
descriptionPrefix?: string;
};
interface InterruptOnConfig {
allowedDecisions: DecisionType[];
description?: string | DescriptionFactory;
argsSchema?: Record<string, any>;
}
type DescriptionFactory = (
toolCall: ToolCall,
state: AgentBuiltInState,
runtime: Runtime<unknown>
) => string | Promise<string>;
type DecisionType = "approve" | "edit" | "reject";
interface HITLRequest {
actionRequests: ActionRequest[];
reviewConfigs: ReviewConfig[];
}
interface ActionRequest {
name: string;
args: Record<string, any>;
description?: string;
}
interface ReviewConfig {
actionName: string;
allowedDecisions: DecisionType[];
argsSchema?: Record<string, any>;
}
interface HITLResponse {
decisions: Decision[];
}
type Decision = ApproveDecision | EditDecision | RejectDecision;
interface ApproveDecision {
type: "approve";
}
interface EditDecision {
type: "edit";
editedAction: Action;
}
interface RejectDecision {
type: "reject";
message?: string;
}
interface Action {
name: string;
args: Record<string, any>;
}Example:
import { humanInTheLoopMiddleware, createAgent } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
tools: [deleteFileTool, searchTool],
middleware: [
humanInTheLoopMiddleware({
interruptOn: { delete_file: true },
}),
],
});function summarizationMiddleware(config: SummarizationMiddlewareConfig): AgentMiddleware;
interface SummarizationMiddlewareConfig {
contextSize: ContextSize;
keepSize: KeepSize;
summarizationModel?: LanguageModelLike;
summaryPrompt?: string;
tokenCounter?: TokenCounter;
}
type ContextSize = number | ((messages: BaseMessage[]) => number);
type KeepSize = number | ((messages: BaseMessage[]) => number);
type TokenCounter = (messages: BaseMessage[]) => number;
const DEFAULT_SUMMARY_PROMPT: string;Example:
import { summarizationMiddleware, createAgent } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
tools: [searchTool],
middleware: [
summarizationMiddleware({
contextSize: 4000,
keepSize: 10,
}),
],
});function dynamicSystemPromptMiddleware(
fn: (state: AgentBuiltInState, runtime: Runtime) => string | SystemMessage | Promise<string | SystemMessage>
): AgentMiddleware;Example:
import { dynamicSystemPromptMiddleware, createAgent } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
tools: [searchTool],
middleware: [
dynamicSystemPromptMiddleware((state, runtime) => {
const time = new Date().toLocaleTimeString();
return `You are a helpful assistant. Current time: ${time}`;
}),
],
});function llmToolSelectorMiddleware(config: LLMToolSelectorConfig): AgentMiddleware;
interface LLMToolSelectorConfig {
model?: LanguageModelLike;
selectionPrompt?: string;
maxTools?: number;
}Example:
import { llmToolSelectorMiddleware, createAgent } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
tools: [searchTool, calculatorTool, weatherTool, databaseTool],
middleware: [
llmToolSelectorMiddleware({ maxTools: 2 }),
],
});function piiMiddleware(piiType: BuiltInPIIType | string, options?: PIIMiddlewareOptions): AgentMiddleware;
function piiRedactionMiddleware(config?: PIIRedactionMiddlewareConfig): AgentMiddleware;
type BuiltInPIIType = "email" | "credit_card" | "ip_address" | "mac_address" | "url";
interface PIIRedactionMiddlewareConfig {
types?: BuiltInPIIType[];
customDetectors?: PIIDetector[];
}
interface PIIMatch {
text: string;
start: number;
end: number;
}
type PIIDetector = (content: string) => PIIMatch[];
type PIIStrategy = "redact" | "mask" | "hash" | ((match: PIIMatch) => string);
interface RedactionRuleConfig {
piiType: BuiltInPIIType | string;
strategy: PIIStrategy;
detector?: PIIDetector | RegExp | string;
}
class PIIDetectionError extends Error {
constructor(public readonly piiType: string, public readonly matches: PIIMatch[]);
}
function detectEmail(content: string): PIIMatch[];
function detectCreditCard(content: string): PIIMatch[];
function detectIP(content: string): PIIMatch[];
function detectMacAddress(content: string): PIIMatch[];
function detectUrl(content: string): PIIMatch[];
function applyStrategy(content: string, matches: PIIMatch[], strategy: PIIStrategy, piiType: string): string;
function resolveRedactionRule(config: RedactionRuleConfig): ResolvedRedactionRule;Example:
import { piiMiddleware, createAgent } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
tools: [searchTool],
middleware: [
piiMiddleware({
strategy: "mask",
types: ["email", "credit_card"],
}),
],
});function contextEditingMiddleware(config: ContextEditingMiddlewareConfig): AgentMiddleware;
interface ContextEditingMiddlewareConfig {
edits: ContextEdit[];
tokenCountMethod?: "approx" | "model";
}
interface ContextEdit {
apply(params: {
messages: BaseMessage[];
countTokens: TokenCounter;
model?: BaseLanguageModel;
}): void | Promise<void>;
}
class ClearToolUsesEdit implements ContextEdit {
constructor(config?: ClearToolUsesEditConfig);
apply(params: { messages: BaseMessage[]; model: BaseLanguageModel; countTokens: TokenCounter }): Promise<void>;
}
interface ClearToolUsesEditConfig {
trigger?: ContextSize | ContextSize[];
keep?: KeepSize;
clearToolInputs?: boolean;
excludeTools?: string[];
placeholder?: string;
}
interface ContextSize {
tokens?: number;
messages?: number;
fraction?: number;
}
interface KeepSize {
messages?: number;
tokens?: number;
fraction?: number;
}Example:
import { contextEditingMiddleware, ClearToolUsesEdit, createAgent } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
tools: [searchTool],
middleware: [
contextEditingMiddleware({
edits: [new ClearToolUsesEdit({ keepResults: true })],
}),
],
});// Tool Call Limit
function toolCallLimitMiddleware(config: ToolCallLimitConfig): AgentMiddleware;
interface ToolCallLimitConfig {
toolName?: string;
threadLimit?: number;
runLimit?: number;
exitBehavior?: "continue" | "error" | "end";
}
class ToolCallLimitExceededError extends Error {
threadCount: number;
runCount: number;
threadLimit: number | undefined;
runLimit: number | undefined;
toolName: string | undefined;
}
// Model Call Limit
function modelCallLimitMiddleware(config: ModelCallLimitMiddlewareConfig): AgentMiddleware;
interface ModelCallLimitMiddlewareConfig {
maxModelCalls: number;
errorMessage?: string;
}
// Model Retry
function modelRetryMiddleware(config?: ModelRetryMiddlewareConfig): AgentMiddleware;
interface ModelRetryMiddlewareConfig {
maxRetries?: number;
initialDelay?: number;
backoffMultiplier?: number;
maxDelay?: number;
shouldRetry?: (error: Error) => boolean;
}
// Model Fallback
function modelFallbackMiddleware(...fallbackModels: (string | LanguageModelLike)[]): AgentMiddleware;
// Tool Retry
function toolRetryMiddleware(config?: ToolRetryMiddlewareConfig): AgentMiddleware;
interface ToolRetryMiddlewareConfig {
maxRetries?: number;
initialDelay?: number;
backoffMultiplier?: number;
maxDelay?: number;
shouldRetry?: (error: Error, toolName: string) => boolean;
}Examples:
import {
toolCallLimitMiddleware,
modelCallLimitMiddleware,
modelRetryMiddleware,
modelFallbackMiddleware,
toolRetryMiddleware,
createAgent
} from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
tools: [searchTool, calculatorTool],
middleware: [
toolCallLimitMiddleware({ maxToolCalls: 5 }),
modelCallLimitMiddleware({ maxModelCalls: 10 }),
modelRetryMiddleware({ maxRetries: 3 }),
modelFallbackMiddleware("openai:gpt-3.5-turbo", "anthropic:claude-3-sonnet"),
toolRetryMiddleware({ maxRetries: 3 }),
],
});// Tool Emulator
function toolEmulatorMiddleware(config: ToolEmulatorOptions): AgentMiddleware;
interface ToolEmulatorOptions {
emulatedTools: Record<string, any | ((args: Record<string, any>) => any)>;
logCalls?: boolean;
}
// Todo List
function todoListMiddleware(options?: TodoListMiddlewareOptions): AgentMiddleware;
interface TodoListMiddlewareOptions {
systemPrompt?: string;
toolDescription?: string;
}
const TODO_LIST_MIDDLEWARE_SYSTEM_PROMPT: string;
interface TodoMiddlewareState {
todos: Todo[];
}
interface Todo {
content: string;
status: "pending" | "in_progress" | "completed";
}Examples:
import { toolEmulatorMiddleware, todoListMiddleware, createAgent } from "langchain";
const testAgent = createAgent({
model: "openai:gpt-4o",
tools: [weatherTool, databaseTool],
middleware: [
toolEmulatorMiddleware({
emulatedTools: {
get_weather: { temperature: 72, condition: "sunny" },
query_database: (args) => `Mocked result for: ${args.query}`,
},
}),
],
});
const taskAgent = createAgent({
model: "openai:gpt-4o",
tools: [searchTool, calculatorTool],
middleware: [todoListMiddleware()],
});// OpenAI Moderation
function openAIModerationMiddleware(config?: OpenAIModerationMiddlewareOptions): AgentMiddleware;
interface OpenAIModerationMiddlewareOptions {
apiKey?: string;
throwOnFlag?: boolean;
onFlagged?: (categories: Record<string, boolean>) => void;
}
// Anthropic Prompt Caching
function anthropicPromptCachingMiddleware(config?: PromptCachingMiddlewareConfig): AgentMiddleware;
interface PromptCachingMiddlewareConfig {
strategy?: "auto" | "custom";
addCacheControl?: (messages: BaseMessage[]) => BaseMessage[];
}Examples:
import { openAIModerationMiddleware, anthropicPromptCachingMiddleware, createAgent } from "langchain";
const moderatedAgent = createAgent({
model: "openai:gpt-4o",
tools: [searchTool],
middleware: [
openAIModerationMiddleware({
throwOnFlag: false,
onFlagged: (categories) => console.log("Flagged:", categories),
}),
],
});
const cachedAgent = createAgent({
model: "anthropic:claude-3-5-sonnet",
tools: [searchTool],
systemPrompt: "You are a helpful assistant with extensive knowledge...",
middleware: [
anthropicPromptCachingMiddleware({ strategy: "auto" }),
],
});function countTokensApproximately(messages: BaseMessage[]): number;