Low-level orchestration framework for building stateful, multi-actor applications with LLMs
Ready-to-use agent patterns including ReAct agents with tool calling, tool execution nodes, and routing utilities. These prebuilt components simplify common agent architectures.
Create a ReAct-style agent with reasoning and action loop. The agent uses a language model to reason about which tools to call and processes tool results iteratively.
function createReactAgent<ModelType>(
params: CreateReactAgentParams<ModelType>
): CompiledStateGraph;
interface CreateReactAgentParams<ModelType> {
llm: ModelType;
tools: (StructuredToolInterface | DynamicTool | RunnableToolLike | Record<string, unknown>)[];
messageModifier?: MessageModifier;
stateModifier?: StateModifier;
stateSchema?: AnnotationRoot<any> | InteropZodObject;
checkpointSaver?: BaseCheckpointSaver;
interruptBefore?: ("agent" | "tools")[];
interruptAfter?: ("agent" | "tools")[];
store?: BaseStore;
}
type MessageModifier =
| string
| SystemMessage
| ((messages: BaseMessage[]) => BaseMessage[] | Promise<BaseMessage[]>)
| Runnable<BaseMessage[], BaseMessage[]>;
type StateModifier =
| string
| SystemMessage
| ((state: typeof MessagesAnnotation.State) => BaseMessage[] | Promise<BaseMessage[]>)
| Runnable<typeof MessagesAnnotation.State, BaseMessage[]>;import { createReactAgent } from "@langchain/langgraph/prebuilt";
import { ChatOpenAI } from "@langchain/openai";
import { tool } from "@langchain/core/tools";
import { z } from "zod";
const getWeather = tool((input) => {
if (["sf", "san francisco"].includes(input.location.toLowerCase())) {
return "It's 60 degrees and foggy.";
}
return "It's 90 degrees and sunny.";
}, {
name: "get_weather",
description: "Get the current weather for a location",
schema: z.object({
location: z.string().describe("The city name")
})
});
const model = new ChatOpenAI({
model: "gpt-4",
temperature: 0
});
const agent = createReactAgent({
llm: model,
tools: [getWeather]
});
const result = await agent.invoke({
messages: [{ role: "user", content: "What's the weather in SF?" }]
});
console.log(result.messages[result.messages.length - 1].content);const agent = createReactAgent({
llm: model,
tools: [searchTool, calculatorTool],
messageModifier: "You are a helpful research assistant. Always explain your reasoning."
});const stateModifier = (state: typeof MessagesAnnotation.State) => {
const systemMessage = new SystemMessage("You are a helpful assistant");
return [systemMessage, ...state.messages];
};
const agent = createReactAgent({
llm: model,
tools: tools,
stateModifier
});import { MemorySaver } from "@langchain/langgraph-checkpoint";
const agent = createReactAgent({
llm: model,
tools: tools,
checkpointSaver: new MemorySaver()
});
await agent.invoke(input, {
configurable: { thread_id: "conversation-1" }
});const agent = createReactAgent({
llm: model,
tools: tools,
checkpointSaver: new MemorySaver(),
interruptBefore: ["tools"] // Pause before tool execution
});
// First call - will interrupt
await agent.invoke(input, { configurable: { thread_id: "1" } });
// Review state
const state = await agent.getState({ configurable: { thread_id: "1" } });
console.log(state.next); // ["tools"]
// Resume
await agent.invoke(null, { configurable: { thread_id: "1" } });import { Annotation } from "@langchain/langgraph";
const CustomState = Annotation.Root({
messages: Annotation<BaseMessage[]>({
reducer: (a, b) => a.concat(b),
default: () => []
}),
context: Annotation<string>
});
const agent = createReactAgent({
llm: model,
tools: tools,
stateSchema: CustomState
});Create typed annotation for ReAct agent state.
function createReactAgentAnnotation<T>(): AnnotationRoot<{
messages: BaseMessage[];
} & T>;const AgentState = createReactAgentAnnotation<{
customField: string;
}>();
type State = typeof AgentState.State;
// { messages: BaseMessage[], customField: string }Node for executing tools from tool calls in messages. Processes all tool calls in the last AI message and returns tool results.
class ToolNode<T = any> {
constructor(
tools: (StructuredToolInterface | DynamicTool | RunnableToolLike)[],
options?: ToolNodeOptions
);
invoke(
state: T | BaseMessage[],
config?: RunnableConfig
): Promise<{ messages: ToolMessage[] } | ToolMessage[]>;
}
interface ToolNodeOptions {
name?: string;
tags?: string[];
handleToolErrors?: boolean;
}import { ToolNode } from "@langchain/langgraph/prebuilt";
import { tool } from "@langchain/core/tools";
import { z } from "zod";
const calculator = tool((input) => {
return eval(input.expression).toString();
}, {
name: "calculator",
description: "Evaluate a mathematical expression",
schema: z.object({
expression: z.string()
})
});
const toolNode = new ToolNode([calculator]);
// Use in graph
const graph = new StateGraph(MessagesAnnotation)
.addNode("agent", callModel)
.addNode("tools", toolNode)
.addConditionalEdges("agent", shouldContinue)
.addEdge("tools", "agent")
.compile();const toolNode = new ToolNode(tools, {
name: "tool_executor",
handleToolErrors: true // Catch errors and return them as tool messages
});import { AIMessage } from "@langchain/core/messages";
const message = new AIMessage({
content: "",
tool_calls: [{
name: "get_weather",
args: { location: "SF" },
id: "call_123",
type: "tool_call"
}]
});
const result = await toolNode.invoke({ messages: [message] });
// { messages: [ToolMessage] }Routing condition for tool execution. Returns "tools" if the last message has tool calls, otherwise returns END.
function toolsCondition(
state: { messages: BaseMessage[] } | BaseMessage[]
): string;import { toolsCondition } from "@langchain/langgraph/prebuilt";
import { END } from "@langchain/langgraph";
const shouldContinue = (state: typeof MessagesAnnotation.State) => {
return toolsCondition(state);
// Returns "tools" if last message has tool calls
// Returns "__end__" otherwise
};
const graph = new StateGraph(MessagesAnnotation)
.addNode("agent", callModel)
.addNode("tools", new ToolNode(tools))
.addEdge("__start__", "agent")
.addConditionalEdges("agent", shouldContinue, {
tools: "tools",
[END]: END
})
.addEdge("tools", "agent")
.compile();State interface for ReAct agents.
interface AgentState<StructuredResponseType extends Record<string, any> = Record<string, any>> {
messages: BaseMessage[];
structuredResponse?: StructuredResponseType;
}Executor for tool invocations (legacy interface).
class ToolExecutor {
constructor(args: ToolExecutorArgs);
invoke(input: ToolInvocationInterface): Promise<unknown>;
}
interface ToolExecutorArgs {
tools: (StructuredToolInterface | DynamicTool | RunnableToolLike)[];
}
interface ToolInvocationInterface {
tool: string;
toolInput: unknown;
}Add agent name to messages for multi-agent tracking.
function withAgentName(
node: RunnableLike,
agentName: string,
options?: { mode?: AgentNameMode }
): Runnable;
type AgentNameMode = "prepend" | "append";import { withAgentName } from "@langchain/langgraph/prebuilt";
const namedAgent = withAgentName(agentNode, "ResearchAgent", {
mode: "prepend"
});
const graph = new StateGraph(MessagesAnnotation)
.addNode("researcher", namedAgent)
.compile();Types for human interrupt and response handling.
interface HumanInterrupt {
value?: unknown;
}
interface HumanResponse {
response: unknown;
}
interface ActionRequest {
action: string;
args?: unknown;
}
interface HumanInterruptConfig {
version?: string;
}State type for legacy agent executors.
interface AgentExecutorState {
messages: BaseMessage[];
agentOutcome?: AgentAction | AgentFinish;
}State type for function calling executors.
interface FunctionCallingExecutorState {
messages: BaseMessage[];
}Legacy ReAct agent executor (deprecated - use createReactAgent instead).
function createAgentExecutor(options: {
agentRunnable: Runnable;
tools: (StructuredToolInterface | DynamicTool | RunnableToolLike)[];
messageModifier?: MessageModifier;
checkpointSaver?: BaseCheckpointSaver;
}): CompiledStateGraph;Function-calling agent executor.
function createFunctionCallingExecutor(options: {
model: BaseChatModel;
tools: (StructuredToolInterface | DynamicTool | RunnableToolLike)[];
messageModifier?: MessageModifier;
checkpointSaver?: BaseCheckpointSaver;
}): CompiledStateGraph;import { createReactAgent } from "@langchain/langgraph/prebuilt";
import { ChatOpenAI } from "@langchain/openai";
import { tool } from "@langchain/core/tools";
import { z } from "zod";
import { MemorySaver } from "@langchain/langgraph-checkpoint";
// Define tools
const searchTool = tool(async (input) => {
// Simulate search
return `Search results for: ${input.query}`;
}, {
name: "search",
description: "Search the web for information",
schema: z.object({
query: z.string().describe("Search query")
})
});
const calculatorTool = tool((input) => {
return eval(input.expression).toString();
}, {
name: "calculator",
description: "Evaluate mathematical expressions",
schema: z.object({
expression: z.string().describe("Math expression to evaluate")
})
});
const weatherTool = tool((input) => {
return `The weather in ${input.location} is sunny and 72°F`;
}, {
name: "get_weather",
description: "Get weather for a location",
schema: z.object({
location: z.string()
})
});
// Create agent
const model = new ChatOpenAI({
model: "gpt-4",
temperature: 0
});
const agent = createReactAgent({
llm: model,
tools: [searchTool, calculatorTool, weatherTool],
messageModifier: `You are a helpful assistant. When using tools:
1. Always explain what you're doing
2. Show your work for calculations
3. Provide sources when searching`,
checkpointSaver: new MemorySaver()
});
// Use the agent
const result = await agent.invoke({
messages: [{
role: "user",
content: "What's 25 * 4, and what's the weather in Tokyo?"
}]
}, {
configurable: { thread_id: "conversation-1" }
});
console.log(result.messages[result.messages.length - 1].content);
// Agent will use both calculator and weather tools
// Continue conversation
const result2 = await agent.invoke({
messages: [{
role: "user",
content: "Can you search for famous landmarks there?"
}]
}, {
configurable: { thread_id: "conversation-1" }
});import { StateGraph, MessagesAnnotation } from "@langchain/langgraph";
import { ToolNode, toolsCondition } from "@langchain/langgraph/prebuilt";
import { ChatOpenAI } from "@langchain/openai";
const model = new ChatOpenAI({ model: "gpt-4" }).bindTools(tools);
const callModel = async (state: typeof MessagesAnnotation.State) => {
const response = await model.invoke(state.messages);
return { messages: [response] };
};
const graph = new StateGraph(MessagesAnnotation)
.addNode("agent", callModel)
.addNode("tools", new ToolNode(tools))
.addEdge("__start__", "agent")
.addConditionalEdges("agent", toolsCondition)
.addEdge("tools", "agent")
.compile({ checkpointSaver: new MemorySaver() });
// This is essentially what createReactAgent does internally