TypeScript framework for building LLM-powered applications with agents, tools, middleware, and model interoperability
—
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Pending
The risk profile of this skill
Rapid lookup for common LangChain APIs, patterns, and types. For complete documentation, see the full guides and API reference.
Looking for task-based examples? See the Task Index for "I want to..." → code mappings.
import {
// Agent
createAgent,
// Tools
tool,
DynamicTool,
StructuredTool,
DynamicStructuredTool,
// Model
initChatModel,
// Messages
AIMessage,
HumanMessage,
SystemMessage,
ToolMessage,
filterMessages,
trimMessages,
// Structured Outputs
toolStrategy,
providerStrategy,
// Middleware
createMiddleware,
humanInTheLoopMiddleware,
summarizationMiddleware,
piiMiddleware,
// Storage
InMemoryStore,
// Context
context,
// Type Inference
type InferAgentState,
type InferAgentResponse,
type InferAgentContext,
type InferMiddlewareState,
} from "langchain";// Minimal agent
const agent = createAgent({
model: "openai:gpt-4o",
tools: [],
});
const result = await agent.invoke({
messages: [{ role: "user", content: "Hello!" }],
});const calculator = tool(
async ({ expression }) => String(eval(expression)),
{
name: "calculator",
description: "Evaluate mathematical expressions",
schema: z.object({
expression: z.string().describe("Expression to evaluate"),
}),
}
);
const agent = createAgent({
model: "openai:gpt-4o",
tools: [calculator],
systemPrompt: "You are a helpful math assistant.",
});const ContactSchema = z.object({
name: z.string(),
email: z.string().email(),
phone: z.string(),
});
const agent = createAgent({
model: "openai:gpt-4o",
tools: [],
responseFormat: ContactSchema,
});
const result = await agent.invoke({
messages: [{ role: "user", content: "Extract: John, john@example.com, 555-1234" }],
});
console.log(result.structuredResponse);
// { name: "John", email: "john@example.com", phone: "555-1234" }const StateSchema = z.object({
sessionId: z.string(),
count: z.number().default(0),
});
const agent = createAgent({
model: "openai:gpt-4o",
tools: [],
stateSchema: StateSchema,
});
const result = await agent.invoke({
messages: [{ role: "user", content: "Hello" }],
sessionId: "session-123",
count: 1,
});
console.log(result.sessionId); // "session-123"
console.log(result.count); // 1import { MemorySaver } from "@langchain/langgraph";
const checkpointer = new MemorySaver();
const agent = createAgent({
model: "openai:gpt-4o",
tools: [],
checkpointer: checkpointer,
});
// First conversation
await agent.invoke(
{ messages: [{ role: "user", content: "My name is Alice" }] },
{ configurable: { thread_id: "thread-1" } }
);
// Later conversation - remembers previous context
const result = await agent.invoke(
{ messages: [{ role: "user", content: "What's my name?" }] },
{ configurable: { thread_id: "thread-1" } }
);const agent = createAgent({
model: "openai:gpt-4o",
tools: [],
});
for await (const state of agent.stream(
{ messages: [{ role: "user", content: "Tell me a story" }] },
{ streamMode: "values" }
)) {
const lastMessage = state.messages[state.messages.length - 1];
console.log(lastMessage.content);
}interface CreateAgentParams {
model?: string | ChatModel; // Required: Model identifier or instance
llm?: string | ChatModel; // Alias for model
tools?: Tool[] | ToolNode; // Array of tools
systemPrompt?: string | SystemMessage; // System instructions
prompt?: string | SystemMessage; // Alias for systemPrompt
responseFormat?: ResponseFormat; // Structured output schema
stateSchema?: ZodObject | AnnotationRoot; // Custom state schema
contextSchema?: ZodObject | AnnotationRoot; // Context schema (read-only)
middleware?: AgentMiddleware[]; // Middleware array
checkpointer?: BaseCheckpointSaver; // State persistence
store?: BaseStore; // Long-term memory
name?: string; // Agent name
description?: string; // Agent description
includeAgentName?: boolean | "tool_messages"; // Name exposure mode
signal?: AbortSignal; // Cancellation signal
version?: "v1" | "v2"; // Graph version (default: "v2")
}class ReactAgent<TConfig> {
// Synchronous execution
invoke(
input: UserInput<TConfig>,
config?: InvokeConfiguration<TConfig>
): Promise<State<TConfig>>;
// Streaming execution
stream(
input: UserInput<TConfig>,
config?: StreamConfiguration<TConfig>
): AsyncGenerator<State<TConfig>>;
// Event streaming
streamEvents(
input: UserInput<TConfig>,
config?: StreamConfiguration<TConfig>
): AsyncGenerator<StreamEvent>;
// Batch processing
batch(
inputs: UserInput<TConfig>[],
config?: BatchConfiguration<TConfig>
): Promise<State<TConfig>[]>;
}function tool<T = any>(
func: (input: T, config?: ToolConfig) => any | Promise<any>,
fields: {
name: string; // Tool name for LLM
description: string; // When to use this tool
schema: ZodType<T>; // Input validation schema
responseFormat?: "content" | "content_and_artifact";
}
): StructuredTool<T>;function initChatModel<RunInput = any, CallOptions extends BaseChatModelCallOptions = BaseChatModelCallOptions>(
model?: string | ChatModel, // "provider:model-name" or instance
fields?: InitChatModelFields
): ChatModel<RunInput, CallOptions>;
interface InitChatModelFields {
temperature?: number; // Randomness (0-1)
modelName?: string; // Alternate model name
timeout?: number; // Request timeout (ms)
maxTokens?: number; // Max output tokens
streaming?: boolean; // Enable streaming
// Provider-specific fields...
}// Create messages
new HumanMessage("User message")
new AIMessage("Assistant response")
new SystemMessage("System instructions")
new ToolMessage("Tool result", "tool_call_id")
// Multimodal messages
new HumanMessage([
{ type: "text", text: "What's in this image?" },
{ type: "image_url", image_url: "https://example.com/image.jpg" },
])
// Message utilities
filterMessages(messages, { includeTypes: ["human", "ai"] })
trimMessages(messages, { maxTokens: 1000 })// Direct schema
responseFormat: ContactSchema
// Schema array (union type)
responseFormat: [EmailSchema, TaskSchema, ReminderSchema]
// Tool strategy
responseFormat: toolStrategy(Schema, {
name: "generate_output",
strict: true,
})
// Provider strategy
responseFormat: providerStrategy(Schema)
// JSON schema
responseFormat: {
name: "output",
strict: true,
schema: { /* JSON Schema */ },
}const middleware = createMiddleware({
name: "my_middleware",
// Optional: Add state (persisted)
stateSchema: z.object({
counter: z.number().default(0),
}),
// Optional: Add context (read-only)
contextSchema: z.object({
requestId: z.string(),
}),
// Optional: Add tools
tools: [myTool],
// Lifecycle hooks (all optional):
beforeAgent: async (state, runtime) => state,
beforeModel: async (state, runtime) => state,
afterModel: async (state, runtime) => state,
afterAgent: async (state, runtime) => state,
// Wrappers:
wrapToolCall: async (request, handler, runtime) => {
console.log(`Calling ${request.toolName}`);
return await handler(request);
},
wrapModelCall: async (state, handler, runtime) => {
console.log("Calling model");
return await handler(state);
},
});// Infer agent state
type AgentState = InferAgentState<typeof agent>;
// Infer structured response type
type Response = InferAgentResponse<typeof agent>;
// Infer context type
type Context = InferAgentContext<typeof agent>;
// Infer middleware state
type MState = InferMiddlewareState<typeof middleware>;
// Infer middleware contexts
type MContexts = InferMiddlewareContexts<typeof middlewareArray>;
// Infer merged middleware states
type MStates = InferMiddlewareStates<typeof middlewareArray>;
// Infer tools from middleware
type MTools = InferMiddlewareTools<typeof middlewareArray>;
// Infer tool names
type ToolNames = typeof tools[number]["name"];
// Tools to message set
type ToolSet = ToolsToMessageToolSet<typeof tools>;// OpenAI
"openai:gpt-4o"
"openai:gpt-4o-mini"
"openai:gpt-4-turbo"
// Anthropic
"anthropic:claude-3-5-sonnet-20241022"
"anthropic:claude-3-5-haiku-20241022"
"anthropic:claude-3-opus-20240229"
// Google
"google:gemini-1.5-pro"
"google:gemini-1.5-flash"
// Ollama (local)
"ollama:llama3.1"
"ollama:mistral"
"ollama:phi3"
// Others
"cohere:command-r-plus"
"groq:mixtral-8x7b-32768"
"together:meta-llama/Llama-3-70b-chat-hf"// Human-in-the-loop
humanInTheLoopMiddleware({ interruptOn: "tools" })
// Message summarization
summarizationMiddleware({ threshold: 1000 })
// PII detection/redaction
piiMiddleware({ strategy: "redact", builtInTypes: ["email", "credit_card"] })
piiRedactionMiddleware({ types: ["email", "ip"] })
// Dynamic system prompts
dynamicSystemPromptMiddleware({ promptFactory: (state) => `...` })
// LLM tool selection
llmToolSelectorMiddleware({ maxTools: 5 })
// Context editing
contextEditingMiddleware({ edits: [new ClearToolUsesEdit()] })
// Limits
toolCallLimitMiddleware({ threadLimit: 100, runLimit: 10 })
modelCallLimitMiddleware({ maxCalls: 5 })
// Todo list
todoListMiddleware()
// Retry and fallback
modelRetryMiddleware({ maxRetries: 3 })
modelFallbackMiddleware({ fallbackModels: ["openai:gpt-4o-mini"] })
toolRetryMiddleware({ maxRetries: 3 })
// Tool emulation
toolEmulatorMiddleware({ emulations: { tool_name: async (args) => "result" } })
// Moderation
openAIModerationMiddleware({ moderateInput: true })
// Prompt caching
anthropicPromptCachingMiddleware({ cacheSystemPrompt: true })// Agent errors
MultipleToolsBoundError // Tools already bound to model
MultipleStructuredOutputsError // Multiple outputs when expecting one
StructuredOutputParsingError // Failed to parse structured output
ToolInvocationError // Tool execution failed
// Middleware errors
ToolCallLimitExceededError // Too many tool calls
PIIDetectionError // PII detection failed# OpenAI
OPENAI_API_KEY="sk-..."
# Anthropic
ANTHROPIC_API_KEY="sk-ant-..."
# Google
GOOGLE_APPLICATION_CREDENTIALS="/path/to/credentials.json"
# Cohere
COHERE_API_KEY="..."
# Groq
GROQ_API_KEY="gsk_..."
# Others
MISTRAL_API_KEY="..."
TOGETHER_API_KEY="..."
FIREWORKS_API_KEY="..."// Basic types
z.string()
z.number()
z.boolean()
z.date()
z.any()
// Arrays
z.array(z.string())
// Objects
z.object({
name: z.string(),
age: z.number(),
})
// Optional
z.string().optional()
z.number().nullable()
// Defaults
z.number().default(0)
z.string().default("default")
// Validation
z.string().email()
z.string().url()
z.string().min(3).max(100)
z.number().min(0).max(100)
z.string().regex(/^[a-z]+$/)
// Enums
z.enum(["option1", "option2"])
z.literal("exact_value")
// Unions
z.union([z.string(), z.number()])
// Discriminated unions
z.discriminatedUnion("type", [
z.object({ type: z.literal("A"), value: z.string() }),
z.object({ type: z.literal("B"), count: z.number() }),
])
// Descriptions (helps LLM)
z.string().describe("User's full name")
z.number().describe("Age in years")// Full state updates
agent.stream(input, { streamMode: "values" })
// Only changed values
agent.stream(input, { streamMode: "updates" })
// Only new messages
agent.stream(input, { streamMode: "messages" })// Invoke configuration
{
// Persistence
configurable: {
thread_id: "thread-123", // Required for checkpointer
user_id: "user-456", // Custom configurable
},
// Read-only context
context: {
requestId: "req-789",
ipAddress: "192.168.1.1",
},
// Cancellation
signal: AbortSignal.timeout(30000), // 30s timeout
}search_web, not search).describe() to all schema fieldsgpt-4o-mini for simple tasksdocs