docs
Quick lookup for "I want to..." tasks mapped directly to minimal code solutions. For complete implementations, follow the guide links.
import { createAgent } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
tools: [],
systemPrompt: "You are a helpful assistant.",
});
const result = await agent.invoke({
messages: [{ role: "user", content: "Hello!" }],
});→ Full Agent Guide | API Reference
const agent = createAgent({
model: "openai:gpt-4o",
systemPrompt: "You are a Python expert. Always provide code examples with explanations.",
});→ Agent Guide - System Prompts
import { createAgent, tool } from "langchain";
import { z } from "zod";
const searchTool = tool(
async ({ query }) => {
// Your search implementation
return `Results for: ${query}`;
},
{
name: "search",
description: "Search for information",
schema: z.object({
query: z.string().describe("Search query"),
}),
}
);
const agent = createAgent({
model: "openai:gpt-4o",
tools: [searchTool],
});→ Tool Guide | Tool API Reference
import { createAgent } from "langchain";
import { MemorySaver } from "@langchain/langgraph";
const checkpointer = new MemorySaver();
const agent = createAgent({
model: "openai:gpt-4o",
checkpointer: checkpointer,
});
// First conversation
await agent.invoke(
{ messages: [{ role: "user", content: "My name is Alice" }] },
{ configurable: { thread_id: "user-123" } }
);
// Later conversation - remembers context
const result = await agent.invoke(
{ messages: [{ role: "user", content: "What's my name?" }] },
{ configurable: { thread_id: "user-123" } }
);→ Agent Guide - Persistence | Quick Reference
import { z } from "zod";
const StateSchema = z.object({
userId: z.string(),
preferences: z.object({
theme: z.string(),
language: z.string(),
}).optional(),
conversationCount: z.number().default(0),
});
const agent = createAgent({
model: "openai:gpt-4o",
stateSchema: StateSchema,
checkpointer: checkpointer,
});
const result = await agent.invoke({
messages: [{ role: "user", content: "Hello" }],
userId: "user-123",
preferences: { theme: "dark", language: "en" },
}, { configurable: { thread_id: "user-123" } });→ Agent Guide - Custom State | Quick Reference
import { InMemoryStore } from "langchain";
const store = new InMemoryStore();
// Pre-populate with user data
await store.mset([
["user:alice:preferences", { theme: "dark", language: "en" }],
["user:alice:history", { lastLogin: "2024-01-15" }],
]);
const agent = createAgent({
model: "openai:gpt-4o",
store: store,
});→ Storage Guide | Storage API Reference
import { tool } from "langchain";
import { z } from "zod";
const weatherTool = tool(
async ({ location }) => {
const response = await fetch(`https://api.weather.com/${location}`);
const data = await response.json();
return JSON.stringify(data);
},
{
name: "get_weather",
description: "Get current weather for a location",
schema: z.object({
location: z.string().describe("City name or zip code"),
}),
}
);
const agent = createAgent({
model: "openai:gpt-4o",
tools: [weatherTool],
});→ Tool Guide | Tool API Reference
const searchTool = tool(
async ({ query, filters, limit }) => {
const results = await database.search({ query, filters, limit });
return JSON.stringify(results);
},
{
name: "search_database",
description: "Search database with filters",
schema: z.object({
query: z.string().describe("Search query"),
filters: z.object({
category: z.string().optional(),
dateFrom: z.string().optional(),
dateTo: z.string().optional(),
}).optional(),
limit: z.number().default(10),
}),
}
);→ Tool Guide | Tool API Examples
const agent = createAgent({
model: "openai:gpt-4o",
tools: [searchTool, weatherTool, calculatorTool, emailTool],
});import { createAgent } from "langchain";
import { z } from "zod";
const ContactSchema = z.object({
name: z.string(),
email: z.string().email(),
phone: z.string(),
});
const agent = createAgent({
model: "openai:gpt-4o",
responseFormat: ContactSchema,
});
const result = await agent.invoke({
messages: [
{ role: "user", content: "Extract: John Doe, john@example.com, 555-1234" }
],
});
console.log(result.structuredResponse);
// { name: "John Doe", email: "john@example.com", phone: "555-1234" }→ Structured Output Guide | API Reference
const OrderSchema = z.object({
orderId: z.string(),
items: z.array(z.object({
name: z.string(),
quantity: z.number(),
price: z.number(),
})),
total: z.number(),
status: z.enum(["pending", "confirmed", "shipped"]),
});
const agent = createAgent({
model: "openai:gpt-4o",
responseFormat: OrderSchema,
});→ Structured Output Guide - Complex Schema
const EmailSchema = z.object({
type: z.literal("email"),
to: z.string().email(),
subject: z.string(),
body: z.string(),
});
const TaskSchema = z.object({
type: z.literal("task"),
title: z.string(),
dueDate: z.string(),
});
const agent = createAgent({
model: "openai:gpt-4o",
responseFormat: [EmailSchema, TaskSchema], // Union type
});→ Structured Output Guide - Union Types
import { createAgent, humanInTheLoopMiddleware } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
tools: [dangerousTool],
middleware: [
humanInTheLoopMiddleware({ interruptOn: "tools" }),
],
checkpointer: checkpointer, // Required
});
// Agent will pause before executing tools
await agent.invoke(
{ messages: [{ role: "user", content: "Delete the file" }] },
{ configurable: { thread_id: "session-1" } }
);
// Resume after human approval
await agent.invoke(
{ messages: [] },
{ configurable: { thread_id: "session-1" } }
);→ Human-in-the-Loop Guide | Built-in Middleware
const agent = createAgent({
model: "openai:gpt-4o",
tools: [apiTool],
middleware: [
humanInTheLoopMiddleware({ interruptOn: "tool_results" }),
],
checkpointer: checkpointer,
});→ Human-in-the-Loop Guide - Tool Results
const agent = createAgent({
model: "openai:gpt-4o",
tools: [tools],
middleware: [
humanInTheLoopMiddleware({
interruptOn: (state, toolCalls) => {
const dangerous = ["delete_file", "send_email", "make_payment"];
return toolCalls?.some(call => dangerous.includes(call.name)) ?? false;
},
}),
],
checkpointer: checkpointer,
});→ Human-in-the-Loop Guide - Custom Logic
const agent = createAgent({
model: "openai:gpt-4o",
tools: [],
});
for await (const state of agent.stream(
{ messages: [{ role: "user", content: "Tell me a story" }] },
{ streamMode: "values" }
)) {
const lastMessage = state.messages[state.messages.length - 1];
console.log(lastMessage.content);
}→ Agent Guide - Streaming | Quick Reference
for await (const update of agent.stream(
{ messages: [{ role: "user", content: "Write code" }] },
{ streamMode: "updates" }
)) {
console.log("Update:", update);
}const stream = agent.streamEvents({
messages: [{ role: "user", content: "Research AI" }],
});
for await (const event of stream) {
if (event.event === "on_tool_start") {
console.log(`Tool started: ${event.name}`);
} else if (event.event === "on_tool_end") {
console.log(`Tool completed: ${event.name}`);
}
}const results = await agent.batch([
{ messages: [{ role: "user", content: "What is 2+2?" }] },
{ messages: [{ role: "user", content: "Capital of France?" }] },
{ messages: [{ role: "user", content: "Name a color" }] },
]);
results.forEach((result, i) => {
console.log(`Result ${i}:`, result.messages[result.messages.length - 1].content);
});→ Agent Guide - Batch Processing | API Reference
import { HumanMessage } from "langchain";
const result = await agent.invoke({
messages: [
new HumanMessage([
{ type: "text", text: "What's in this image?" },
{ type: "image_url", image_url: "https://example.com/image.jpg" },
]),
],
});→ Message Guide - Multimodal | Message API Reference
import { filterMessages } from "langchain";
const humanOnly = filterMessages(messages, {
includeTypes: ["human"],
});
const noSystemMessages = filterMessages(messages, {
excludeTypes: ["system"],
});import { trimMessages } from "langchain";
const trimmed = trimMessages(messages, {
maxTokens: 1000,
strategy: "last", // Keep most recent messages
});import { summarizationMiddleware } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
middleware: [
summarizationMiddleware({ threshold: 1000 }), // Summarize after 1000 tokens
],
});→ Built-in Middleware - Summarization
// OpenAI
const agent1 = createAgent({ model: "openai:gpt-4o" });
// Anthropic
const agent2 = createAgent({ model: "anthropic:claude-3-5-sonnet-20241022" });
// Google
const agent3 = createAgent({ model: "google:gemini-1.5-pro" });
// Ollama (local)
const agent4 = createAgent({ model: "ollama:llama3.1" });→ Model Guide | Model API Reference | Quick Reference - Providers
import { initChatModel } from "langchain";
const model = initChatModel("openai:gpt-4o", {
temperature: 0.7,
maxTokens: 2000,
timeout: 30000, // 30 seconds
});
const agent = createAgent({
model: model,
tools: [],
});→ Model Guide - Configuration | Model API Reference
const fastAgent = createAgent({
model: "openai:gpt-4o-mini", // Faster, cheaper
tools: [],
});→ Model Guide | Quick Reference - Best Practices
import { modelRetryMiddleware } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
middleware: [
modelRetryMiddleware({ maxRetries: 3 }),
],
});→ Built-in Middleware - Model Retry
import { modelFallbackMiddleware } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
middleware: [
modelFallbackMiddleware({
fallbackModels: ["openai:gpt-4o-mini", "anthropic:claude-3-5-haiku-20241022"],
}),
],
});→ Built-in Middleware - Model Fallback
import { toolRetryMiddleware } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
tools: [unreliableTool],
middleware: [
toolRetryMiddleware({ maxRetries: 3 }),
],
});→ Built-in Middleware - Tool Retry
try {
const result = await agent.invoke({
messages: [{ role: "user", content: "Extract data..." }],
});
console.log(result.structuredResponse);
} catch (error) {
if (error instanceof StructuredOutputParsingError) {
console.error("Failed to parse:", error.message);
console.error("Raw output:", error.rawOutput);
}
}→ Error Handling Guide | API Reference - Errors
import { piiMiddleware } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
middleware: [
piiMiddleware({
builtInTypes: ["email", "credit_card", "ip"],
strategy: "redact",
}),
],
});→ Built-in Middleware - PII Detection
import { openAIModerationMiddleware } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
middleware: [
openAIModerationMiddleware({ moderateInput: true, moderateOutput: true }),
],
});→ Built-in Middleware - OpenAI Moderation
import { toolCallLimitMiddleware } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
tools: [tools],
middleware: [
toolCallLimitMiddleware({ runLimit: 10 }), // Max 10 tool calls per run
],
});→ Built-in Middleware - Tool Call Limit
import { modelCallLimitMiddleware } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
middleware: [
modelCallLimitMiddleware({ maxCalls: 5 }),
],
});→ Built-in Middleware - Model Call Limit
import { anthropicPromptCachingMiddleware } from "langchain";
const agent = createAgent({
model: "anthropic:claude-3-5-sonnet-20241022",
middleware: [
anthropicPromptCachingMiddleware({ cacheSystemPrompt: true }),
],
});→ Built-in Middleware - Anthropic Prompt Caching
import { dynamicSystemPromptMiddleware } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
middleware: [
dynamicSystemPromptMiddleware({
promptFactory: (state, context) => {
const hour = new Date().getHours();
return `You are a helpful assistant. Current time: ${hour}:00`;
},
}),
],
});→ Built-in Middleware - Dynamic System Prompt
import { llmToolSelectorMiddleware } from "langchain";
const agent = createAgent({
model: "openai:gpt-4o",
tools: [manyTools], // Large tool set
middleware: [
llmToolSelectorMiddleware({ maxTools: 5 }), // LLM picks 5 most relevant
],
});→ Built-in Middleware - LLM Tool Selector
import { createMiddleware } from "langchain";
const loggingMiddleware = createMiddleware({
name: "logging",
beforeModel: async (state, runtime) => {
console.log("Calling model with messages:", state.messages);
return state;
},
afterModel: async (state, runtime) => {
console.log("Model response received");
return state;
},
});
const agent = createAgent({
model: "openai:gpt-4o",
middleware: [loggingMiddleware],
});→ Middleware Overview - Custom Middleware | Custom Middleware Guide
const metricsMiddleware = createMiddleware({
name: "metrics",
wrapToolCall: async (request, handler, runtime) => {
const start = Date.now();
console.log(`Calling tool: ${request.toolName}`);
const result = await handler(request);
const duration = Date.now() - start;
console.log(`Tool ${request.toolName} took ${duration}ms`);
return result;
},
});→ Middleware Overview - Wrapping Calls | Custom Middleware Guide
import { pull, push } from "langchain/hub";
// Pull prompt from hub
const prompt = await pull("username/prompt-name");
// Push prompt to hub
await push("username/my-prompt", myPrompt);import { load } from "langchain/load";
const agent = await load(serializedAgentString);New to LangChain? Start with the main documentation
Need API details? See Quick Reference or API Reference Index
Looking for a specific concept? Use the Glossary
Building production apps? Check Error Handling and Middleware Catalog