or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

docs

advanced

error-handling.mdtype-inference.md
glossary.mdindex.mdquick-reference.mdtask-index.md
tile.json

quick-reference.mddocs/

Quick Reference

Rapid lookup for common LangChain APIs, patterns, and types. For complete documentation, see the full guides and API reference.

Looking for task-based examples? See the Task Index for "I want to..." → code mappings.

Core Imports

import {
  // Agent
  createAgent,

  // Tools
  tool,
  DynamicTool,
  StructuredTool,
  DynamicStructuredTool,

  // Model
  initChatModel,

  // Messages
  AIMessage,
  HumanMessage,
  SystemMessage,
  ToolMessage,
  filterMessages,
  trimMessages,

  // Structured Outputs
  toolStrategy,
  providerStrategy,

  // Middleware
  createMiddleware,
  humanInTheLoopMiddleware,
  summarizationMiddleware,
  piiMiddleware,

  // Storage
  InMemoryStore,

  // Context
  context,

  // Type Inference
  type InferAgentState,
  type InferAgentResponse,
  type InferAgentContext,
  type InferMiddlewareState,
} from "langchain";

Quick Start

// Minimal agent
const agent = createAgent({
  model: "openai:gpt-4o",
  tools: [],
});

const result = await agent.invoke({
  messages: [{ role: "user", content: "Hello!" }],
});

Common Patterns

Agent with Tools

const calculator = tool(
  async ({ expression }) => String(eval(expression)),
  {
    name: "calculator",
    description: "Evaluate mathematical expressions",
    schema: z.object({
      expression: z.string().describe("Expression to evaluate"),
    }),
  }
);

const agent = createAgent({
  model: "openai:gpt-4o",
  tools: [calculator],
  systemPrompt: "You are a helpful math assistant.",
});

Agent with Structured Output

const ContactSchema = z.object({
  name: z.string(),
  email: z.string().email(),
  phone: z.string(),
});

const agent = createAgent({
  model: "openai:gpt-4o",
  tools: [],
  responseFormat: ContactSchema,
});

const result = await agent.invoke({
  messages: [{ role: "user", content: "Extract: John, john@example.com, 555-1234" }],
});

console.log(result.structuredResponse);
// { name: "John", email: "john@example.com", phone: "555-1234" }

Agent with Custom State

const StateSchema = z.object({
  sessionId: z.string(),
  count: z.number().default(0),
});

const agent = createAgent({
  model: "openai:gpt-4o",
  tools: [],
  stateSchema: StateSchema,
});

const result = await agent.invoke({
  messages: [{ role: "user", content: "Hello" }],
  sessionId: "session-123",
  count: 1,
});

console.log(result.sessionId); // "session-123"
console.log(result.count); // 1

Agent with Persistence

import { MemorySaver } from "@langchain/langgraph";

const checkpointer = new MemorySaver();

const agent = createAgent({
  model: "openai:gpt-4o",
  tools: [],
  checkpointer: checkpointer,
});

// First conversation
await agent.invoke(
  { messages: [{ role: "user", content: "My name is Alice" }] },
  { configurable: { thread_id: "thread-1" } }
);

// Later conversation - remembers previous context
const result = await agent.invoke(
  { messages: [{ role: "user", content: "What's my name?" }] },
  { configurable: { thread_id: "thread-1" } }
);

Streaming Responses

const agent = createAgent({
  model: "openai:gpt-4o",
  tools: [],
});

for await (const state of agent.stream(
  { messages: [{ role: "user", content: "Tell me a story" }] },
  { streamMode: "values" }
)) {
  const lastMessage = state.messages[state.messages.length - 1];
  console.log(lastMessage.content);
}

API Quick Lookup

createAgent Parameters

interface CreateAgentParams {
  model?: string | ChatModel;           // Required: Model identifier or instance
  llm?: string | ChatModel;             // Alias for model
  tools?: Tool[] | ToolNode;            // Array of tools
  systemPrompt?: string | SystemMessage; // System instructions
  prompt?: string | SystemMessage;      // Alias for systemPrompt
  responseFormat?: ResponseFormat;      // Structured output schema
  stateSchema?: ZodObject | AnnotationRoot; // Custom state schema
  contextSchema?: ZodObject | AnnotationRoot; // Context schema (read-only)
  middleware?: AgentMiddleware[];       // Middleware array
  checkpointer?: BaseCheckpointSaver;   // State persistence
  store?: BaseStore;                    // Long-term memory
  name?: string;                        // Agent name
  description?: string;                 // Agent description
  includeAgentName?: boolean | "tool_messages"; // Name exposure mode
  signal?: AbortSignal;                 // Cancellation signal
  version?: "v1" | "v2";               // Graph version (default: "v2")
}

ReactAgent Methods

class ReactAgent<TConfig> {
  // Synchronous execution
  invoke(
    input: UserInput<TConfig>,
    config?: InvokeConfiguration<TConfig>
  ): Promise<State<TConfig>>;

  // Streaming execution
  stream(
    input: UserInput<TConfig>,
    config?: StreamConfiguration<TConfig>
  ): AsyncGenerator<State<TConfig>>;

  // Event streaming
  streamEvents(
    input: UserInput<TConfig>,
    config?: StreamConfiguration<TConfig>
  ): AsyncGenerator<StreamEvent>;

  // Batch processing
  batch(
    inputs: UserInput<TConfig>[],
    config?: BatchConfiguration<TConfig>
  ): Promise<State<TConfig>[]>;
}

tool Function

function tool<T = any>(
  func: (input: T, config?: ToolConfig) => any | Promise<any>,
  fields: {
    name: string;                      // Tool name for LLM
    description: string;               // When to use this tool
    schema: ZodType<T>;               // Input validation schema
    responseFormat?: "content" | "content_and_artifact";
  }
): StructuredTool<T>;

initChatModel

function initChatModel<RunInput = any, CallOptions extends BaseChatModelCallOptions = BaseChatModelCallOptions>(
  model?: string | ChatModel,          // "provider:model-name" or instance
  fields?: InitChatModelFields
): ChatModel<RunInput, CallOptions>;

interface InitChatModelFields {
  temperature?: number;                // Randomness (0-1)
  modelName?: string;                  // Alternate model name
  timeout?: number;                    // Request timeout (ms)
  maxTokens?: number;                  // Max output tokens
  streaming?: boolean;                 // Enable streaming
  // Provider-specific fields...
}

Message Types

// Create messages
new HumanMessage("User message")
new AIMessage("Assistant response")
new SystemMessage("System instructions")
new ToolMessage("Tool result", "tool_call_id")

// Multimodal messages
new HumanMessage([
  { type: "text", text: "What's in this image?" },
  { type: "image_url", image_url: "https://example.com/image.jpg" },
])

// Message utilities
filterMessages(messages, { includeTypes: ["human", "ai"] })
trimMessages(messages, { maxTokens: 1000 })

Structured Output Strategies

// Direct schema
responseFormat: ContactSchema

// Schema array (union type)
responseFormat: [EmailSchema, TaskSchema, ReminderSchema]

// Tool strategy
responseFormat: toolStrategy(Schema, {
  name: "generate_output",
  strict: true,
})

// Provider strategy
responseFormat: providerStrategy(Schema)

// JSON schema
responseFormat: {
  name: "output",
  strict: true,
  schema: { /* JSON Schema */ },
}

Middleware Creation

const middleware = createMiddleware({
  name: "my_middleware",

  // Optional: Add state (persisted)
  stateSchema: z.object({
    counter: z.number().default(0),
  }),

  // Optional: Add context (read-only)
  contextSchema: z.object({
    requestId: z.string(),
  }),

  // Optional: Add tools
  tools: [myTool],

  // Lifecycle hooks (all optional):
  beforeAgent: async (state, runtime) => state,
  beforeModel: async (state, runtime) => state,
  afterModel: async (state, runtime) => state,
  afterAgent: async (state, runtime) => state,

  // Wrappers:
  wrapToolCall: async (request, handler, runtime) => {
    console.log(`Calling ${request.toolName}`);
    return await handler(request);
  },

  wrapModelCall: async (state, handler, runtime) => {
    console.log("Calling model");
    return await handler(state);
  },
});

Type Inference

// Infer agent state
type AgentState = InferAgentState<typeof agent>;

// Infer structured response type
type Response = InferAgentResponse<typeof agent>;

// Infer context type
type Context = InferAgentContext<typeof agent>;

// Infer middleware state
type MState = InferMiddlewareState<typeof middleware>;

// Infer middleware contexts
type MContexts = InferMiddlewareContexts<typeof middlewareArray>;

// Infer merged middleware states
type MStates = InferMiddlewareStates<typeof middlewareArray>;

// Infer tools from middleware
type MTools = InferMiddlewareTools<typeof middlewareArray>;

// Infer tool names
type ToolNames = typeof tools[number]["name"];

// Tools to message set
type ToolSet = ToolsToMessageToolSet<typeof tools>;

Model Providers

// OpenAI
"openai:gpt-4o"
"openai:gpt-4o-mini"
"openai:gpt-4-turbo"

// Anthropic
"anthropic:claude-3-5-sonnet-20241022"
"anthropic:claude-3-5-haiku-20241022"
"anthropic:claude-3-opus-20240229"

// Google
"google:gemini-1.5-pro"
"google:gemini-1.5-flash"

// Ollama (local)
"ollama:llama3.1"
"ollama:mistral"
"ollama:phi3"

// Others
"cohere:command-r-plus"
"groq:mixtral-8x7b-32768"
"together:meta-llama/Llama-3-70b-chat-hf"

Built-in Middleware

// Human-in-the-loop
humanInTheLoopMiddleware({ interruptOn: "tools" })

// Message summarization
summarizationMiddleware({ threshold: 1000 })

// PII detection/redaction
piiMiddleware({ strategy: "redact", builtInTypes: ["email", "credit_card"] })
piiRedactionMiddleware({ types: ["email", "ip"] })

// Dynamic system prompts
dynamicSystemPromptMiddleware({ promptFactory: (state) => `...` })

// LLM tool selection
llmToolSelectorMiddleware({ maxTools: 5 })

// Context editing
contextEditingMiddleware({ edits: [new ClearToolUsesEdit()] })

// Limits
toolCallLimitMiddleware({ threadLimit: 100, runLimit: 10 })
modelCallLimitMiddleware({ maxCalls: 5 })

// Todo list
todoListMiddleware()

// Retry and fallback
modelRetryMiddleware({ maxRetries: 3 })
modelFallbackMiddleware({ fallbackModels: ["openai:gpt-4o-mini"] })
toolRetryMiddleware({ maxRetries: 3 })

// Tool emulation
toolEmulatorMiddleware({ emulations: { tool_name: async (args) => "result" } })

// Moderation
openAIModerationMiddleware({ moderateInput: true })

// Prompt caching
anthropicPromptCachingMiddleware({ cacheSystemPrompt: true })

Error Classes

// Agent errors
MultipleToolsBoundError          // Tools already bound to model
MultipleStructuredOutputsError   // Multiple outputs when expecting one
StructuredOutputParsingError     // Failed to parse structured output
ToolInvocationError             // Tool execution failed

// Middleware errors
ToolCallLimitExceededError      // Too many tool calls
PIIDetectionError               // PII detection failed

Environment Variables

# OpenAI
OPENAI_API_KEY="sk-..."

# Anthropic
ANTHROPIC_API_KEY="sk-ant-..."

# Google
GOOGLE_APPLICATION_CREDENTIALS="/path/to/credentials.json"

# Cohere
COHERE_API_KEY="..."

# Groq
GROQ_API_KEY="gsk_..."

# Others
MISTRAL_API_KEY="..."
TOGETHER_API_KEY="..."
FIREWORKS_API_KEY="..."

Common Zod Patterns

// Basic types
z.string()
z.number()
z.boolean()
z.date()
z.any()

// Arrays
z.array(z.string())

// Objects
z.object({
  name: z.string(),
  age: z.number(),
})

// Optional
z.string().optional()
z.number().nullable()

// Defaults
z.number().default(0)
z.string().default("default")

// Validation
z.string().email()
z.string().url()
z.string().min(3).max(100)
z.number().min(0).max(100)
z.string().regex(/^[a-z]+$/)

// Enums
z.enum(["option1", "option2"])
z.literal("exact_value")

// Unions
z.union([z.string(), z.number()])

// Discriminated unions
z.discriminatedUnion("type", [
  z.object({ type: z.literal("A"), value: z.string() }),
  z.object({ type: z.literal("B"), count: z.number() }),
])

// Descriptions (helps LLM)
z.string().describe("User's full name")
z.number().describe("Age in years")

Stream Modes

// Full state updates
agent.stream(input, { streamMode: "values" })

// Only changed values
agent.stream(input, { streamMode: "updates" })

// Only new messages
agent.stream(input, { streamMode: "messages" })

Configuration Options

// Invoke configuration
{
  // Persistence
  configurable: {
    thread_id: "thread-123",        // Required for checkpointer
    user_id: "user-456",           // Custom configurable
  },

  // Read-only context
  context: {
    requestId: "req-789",
    ipAddress: "192.168.1.1",
  },

  // Cancellation
  signal: AbortSignal.timeout(30000), // 30s timeout
}

Best Practices

Tool Design

  • Use descriptive, action-oriented names (search_web, not search)
  • Add .describe() to all schema fields
  • Return error messages as strings (don't throw)
  • Keep return values concise

Schema Design

  • Use descriptive property names
  • Add descriptions to help the LLM
  • Use enums for constrained choices
  • Make optional properties truly optional

Performance

  • Use gpt-4o-mini for simple tasks
  • Stream for better UX
  • Batch independent requests
  • Set appropriate timeouts
  • Cache expensive operations

Error Handling

  • Always catch and handle errors
  • Implement retry logic for transient failures
  • Log errors for debugging
  • Provide actionable error messages

Security

  • Validate all inputs
  • Never expose sensitive data
  • Use environment variables for API keys
  • Consider human-in-the-loop for dangerous operations

See Also