docs
This guide covers creating and using LangChain agents for building production-ready applications.
import { createAgent } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
tools: [],
systemPrompt: "You are a helpful assistant.",
});
const result = await agent.invoke({
messages: [{ role: "user", content: "Hello!" }],
});
console.log(result.messages[result.messages.length - 1].content);import { createAgent, tool } from "langchain";
import { z } from "zod";
const calculator = tool(
async ({ expression }) => String(eval(expression)),
{
name: "calculator",
description: "Evaluate mathematical expressions",
schema: z.object({
expression: z.string().describe("Mathematical expression"),
}),
}
);
const agent = createAgent({
model: "openai:gpt-4o",
tools: [calculator],
systemPrompt: "You are a helpful math assistant.",
});
const result = await agent.invoke({
messages: [{ role: "user", content: "What is 25 * 4?" }],
});import { createAgent } from "langchain";
import { z } from "zod";
const StateSchema = z.object({
sessionId: z.string(),
conversationCount: z.number().default(0),
userPreferences: z.object({
theme: z.string(),
language: z.string(),
}).optional(),
});
const agent = createAgent({
model: "openai:gpt-4o",
tools: [],
stateSchema: StateSchema,
});
const result = await agent.invoke({
messages: [{ role: "user", content: "Hello" }],
sessionId: "session-123",
conversationCount: 1,
});
// Access custom state
console.log(result.sessionId);
console.log(result.conversationCount);import { createAgent } from "langchain";
import { z } from "zod";
const SummarySchema = z.object({
mainPoints: z.array(z.string()),
sentiment: z.enum(["positive", "neutral", "negative"]),
actionItems: z.array(z.string()),
});
const agent = createAgent({
model: "openai:gpt-4o",
tools: [],
responseFormat: SummarySchema,
});
const result = await agent.invoke({
messages: [{ role: "user", content: "Summarize: Great meeting today. We decided to launch next month." }],
});
console.log(result.structuredResponse);
// { mainPoints: [...], sentiment: "positive", actionItems: [...] }import { createAgent, humanInTheLoopMiddleware } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
tools: [dangerousTool],
middleware: [
humanInTheLoopMiddleware({ interruptOn: "tools" }),
],
checkpointer: myCheckpointer, // Required for interrupts
});import { createAgent } from "langchain";
import { MemorySaver } from "@langchain/langgraph";
const checkpointer = new MemorySaver();
const agent = createAgent({
model: "openai:gpt-4o",
tools: [],
checkpointer: checkpointer,
});
// First conversation
await agent.invoke(
{ messages: [{ role: "user", content: "My name is Alice" }] },
{ configurable: { thread_id: "thread-1" } }
);
// Later conversation (state is restored)
const result = await agent.invoke(
{ messages: [{ role: "user", content: "What's my name?" }] },
{ configurable: { thread_id: "thread-1" } }
);
// Agent remembers: "Your name is Alice"import { createAgent, InMemoryStore } from "langchain";
const store = new InMemoryStore();
// Pre-populate with data
await store.mset([
["user:preferences", { theme: "dark", language: "en" }],
["user:history", { lastVisit: "2024-01-15" }],
]);
const agent = createAgent({
model: "openai:gpt-4o",
tools: [],
store: store,
});
// Agent can access store during executionconst result = await agent.invoke({
messages: [{ role: "user", content: "Hello" }],
});
// Access final state
console.log(result.messages);
console.log(result.structuredResponse); // If responseFormat set// Stream full state updates
const stream = agent.stream(
{ messages: [{ role: "user", content: "Tell me a story" }] },
{ streamMode: "values" }
);
for await (const state of stream) {
const lastMessage = state.messages[state.messages.length - 1];
console.log(lastMessage.content);
}// Stream only updates
const stream = agent.stream(
{ messages: [{ role: "user", content: "Write code" }] },
{ streamMode: "updates" }
);
for await (const update of stream) {
console.log("Update:", update);
}// Stream detailed events
const stream = agent.streamEvents({
messages: [{ role: "user", content: "Research AI" }],
});
for await (const event of stream) {
if (event.event === "on_tool_start") {
console.log(`Tool started: ${event.name}`);
} else if (event.event === "on_tool_end") {
console.log(`Tool completed: ${event.name}`);
}
}const results = await agent.batch([
{ messages: [{ role: "user", content: "What is 2+2?" }] },
{ messages: [{ role: "user", content: "What is the capital of France?" }] },
{ messages: [{ role: "user", content: "Name a color" }] },
]);
results.forEach((result, i) => {
console.log(`Result ${i}:`, result.messages[result.messages.length - 1].content);
});const result = await agent.invoke(
{
messages: [{ role: "user", content: "Hello" }],
},
{
// Thread ID for persistence
configurable: {
thread_id: "thread-123",
user_id: "user-456",
},
// Context (read-only, not persisted)
context: {
requestId: "req-789",
ipAddress: "192.168.1.1",
},
// Abort signal for cancellation
signal: AbortSignal.timeout(30000),
}
);await agent.invoke({
messages: ["Hello!"], // Treated as user message
});await agent.invoke({
messages: [
{ role: "system", content: "You are helpful." },
{ role: "user", content: "Hello" },
{ role: "assistant", content: "Hi there!" },
{ role: "user", content: "How are you?" },
],
});import { HumanMessage, AIMessage, SystemMessage } from "langchain";
await agent.invoke({
messages: [
new SystemMessage("You are helpful."),
new HumanMessage("Hello"),
new AIMessage("Hi there!"),
new HumanMessage("How are you?"),
],
});import { HumanMessage } from "langchain";
await agent.invoke({
messages: [
new HumanMessage([
{ type: "text", text: "What's in this image?" },
{ type: "image_url", image_url: "https://example.com/image.jpg" },
]),
],
});import { createAgent, initChatModel } from "langchain";
const agent = createAgent({
model: (state) => {
// Use faster model for simple queries
if (state.messages.length <= 2) {
return initChatModel("openai:gpt-4o-mini");
}
// Use powerful model for complex conversations
return initChatModel("openai:gpt-4o");
},
tools: [],
});const agent = createAgent({
model: "openai:gpt-4o",
tools: [],
systemPrompt: (state) => {
const time = new Date().getHours();
if (time < 12) {
return "Good morning! You are a helpful assistant.";
} else if (time < 18) {
return "Good afternoon! You are a helpful assistant.";
} else {
return "Good evening! You are a helpful assistant.";
}
},
});import { createAgent } from "langchain";
import { z } from "zod";
const StateSchema = z.object({
conversationCount: z.number().default(0),
lastTopic: z.string().optional(),
});
const ContextSchema = z.object({
userId: z.string(),
requestId: z.string(),
ipAddress: z.string(),
});
const agent = createAgent({
model: "openai:gpt-4o",
tools: [],
stateSchema: StateSchema, // Persisted across invocations
contextSchema: ContextSchema, // Read-only, per-invocation
});
const result = await agent.invoke(
{
messages: [{ role: "user", content: "Hello" }],
conversationCount: 5, // State
},
{
context: { // Context
userId: "user-123",
requestId: "req-456",
ipAddress: "192.168.1.1",
},
}
);const controller = new AbortController();
// Cancel after 5 seconds
setTimeout(() => controller.abort(), 5000);
try {
const result = await agent.invoke(
{ messages: [{ role: "user", content: "Long task" }] },
{ signal: controller.signal }
);
} catch (error) {
if (error.name === "AbortError") {
console.log("Operation cancelled");
}
}const result = await agent.invoke(
{ messages: [{ role: "user", content: "Quick question" }] },
{ signal: AbortSignal.timeout(30000) } // 30 second timeout
);// Track conversation history
let messages = [];
// Turn 1
let result = await agent.invoke({
messages: [...messages, { role: "user", content: "My name is Alice" }],
});
messages = result.messages;
// Turn 2
result = await agent.invoke({
messages: [...messages, { role: "user", content: "What's my name?" }],
});
messages = result.messages;
// Agent responds: "Your name is Alice"
// Turn 3
result = await agent.invoke({
messages: [...messages, { role: "user", content: "Thanks!" }],
});// OpenAI
const openaiAgent = createAgent({
model: "openai:gpt-4o",
tools: [],
});
// Anthropic
const anthropicAgent = createAgent({
model: "anthropic:claude-3-5-sonnet-20241022",
tools: [],
});
// Local Ollama
const localAgent = createAgent({
model: "ollama:llama3.1",
tools: [],
});
// All have the same interface
const result = await localAgent.invoke({
messages: [{ role: "user", content: "Hello" }],
});// Include agent name in all messages
const agent1 = createAgent({
model: "openai:gpt-4o",
tools: [],
name: "ResearchAgent",
includeAgentName: true,
});
// Include agent name only in tool messages
const agent2 = createAgent({
model: "openai:gpt-4o",
tools: [],
name: "TaskAgent",
includeAgentName: "tool_messages",
});
// Don't include agent name
const agent3 = createAgent({
model: "openai:gpt-4o",
tools: [],
name: "ChatAgent",
includeAgentName: false,
});import {
ToolInvocationError,
StructuredOutputParsingError,
} from "langchain";
try {
const result = await agent.invoke({
messages: [{ role: "user", content: "Do something" }],
});
} catch (error) {
if (error instanceof ToolInvocationError) {
console.error(`Tool ${error.toolName} failed:`, error.message);
console.error("Input:", error.toolInput);
console.error("Cause:", error.cause);
} else if (error instanceof StructuredOutputParsingError) {
console.error("Failed to parse structured output:", error.message);
console.error("Raw output:", error.rawOutput);
} else {
console.error("Unknown error:", error);
}
}async function invokeWithRetry(agent, input, maxRetries = 3) {
for (let i = 0; i < maxRetries; i++) {
try {
return await agent.invoke(input);
} catch (error) {
if (i === maxRetries - 1) throw error;
console.log(`Attempt ${i + 1} failed, retrying...`);
await new Promise(resolve => setTimeout(resolve, 1000 * (i + 1)));
}
}
}
const result = await invokeWithRetry(agent, {
messages: [{ role: "user", content: "Hello" }],
});stateSchema for data that persists across invocationscontextSchema for read-only per-invocation datagpt-4o-mini or similar for simple tasksSee Agent API Reference for complete API documentation.