LangSmith provides seamless integration with LangChain through callbacks and runnable wrappers, enabling automatic tracing of LangChain applications and bidirectional interoperability between LangSmith's traceable functions and LangChain's Runnable interface.
import { getLangchainCallbacks, RunnableTraceable } from "langsmith/langchain";import { getLangchainCallbacks } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
// Get callbacks for tracing handoff
const callbacks = getLangchainCallbacks();
// Use with any LangChain runnable
const model = new ChatOpenAI();
const response = await model.invoke(
"What is the capital of France?",
{ callbacks }
);The LangChain integration module provides two primary mechanisms for integrating LangSmith tracing with LangChain applications:
getLangchainCallbacks() creates LangChain-compatible callbacks that bridge LangSmith's tracing context to LangChain's callback system, enabling automatic trace continuationRunnableTraceable class wraps LangSmith traceable functions as LangChain Runnables, allowing traceable functions to be used seamlessly within LangChain chains and supporting all standard Runnable methods (invoke, stream, batch)Obtain LangChain callbacks for tracing handoff from traceable functions to LangChain runnables.
/**
* Get LangChain callbacks for tracing handoff
* @param currentRunTree - Optional current run tree context (defaults to current context)
* @returns LangChain-compatible callbacks object
*/
function getLangchainCallbacks(currentRunTree?: RunTree): object;Usage Examples:
import { traceable } from "langsmith/traceable";
import { getLangchainCallbacks } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
import { StringOutputParser } from "@langchain/core/output_parsers";
// Use within a traceable function
const analyzeText = traceable(async (text: string) => {
// Get callbacks to pass to LangChain
const callbacks = getLangchainCallbacks();
const model = new ChatOpenAI({ temperature: 0 });
const parser = new StringOutputParser();
const chain = model.pipe(parser);
// LangChain execution will be traced as a child of this run
const result = await chain.invoke(
`Analyze the following text: ${text}`,
{ callbacks }
);
return result;
}, { name: "analyze_text" });
// Execute traceable function
const analysis = await analyzeText("The quick brown fox jumps over the lazy dog.");Advanced Example with Explicit Run Tree:
import { RunTree } from "langsmith/run_trees";
import { getLangchainCallbacks } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
// Create explicit run tree
const runTree = new RunTree({
name: "my_langchain_app",
run_type: "chain",
inputs: { query: "What is LangSmith?" }
});
// Get callbacks for this specific run tree
const callbacks = getLangchainCallbacks(runTree);
const model = new ChatOpenAI();
const response = await model.invoke(
"What is LangSmith?",
{ callbacks }
);
// End the run tree
await runTree.end({ output: response });
await runTree.postRun();Chain Composition:
import { traceable } from "langsmith/traceable";
import { getLangchainCallbacks } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
const processDocument = traceable(async (document: string, question: string) => {
const callbacks = getLangchainCallbacks();
// Create prompt template
const prompt = PromptTemplate.fromTemplate(
"Based on the following document:\n\n{document}\n\nAnswer this question: {question}"
);
const model = new ChatOpenAI({ modelName: "gpt-4" });
// Create chain
const chain = prompt.pipe(model);
// All chain steps will be traced
const result = await chain.invoke(
{ document, question },
{ callbacks }
);
return result.content;
}, { name: "process_document", run_type: "chain" });
// Execute
const answer = await processDocument(
"LangSmith is a platform for debugging, testing, and monitoring LLM applications.",
"What is LangSmith used for?"
);Wrap LangSmith traceable functions as LangChain Runnables for seamless integration in LangChain chains and workflows.
/**
* RunnableTraceable class wraps traceable functions as LangChain Runnables
*/
class RunnableTraceable<RunInput = any, RunOutput = any> {
/**
* Create RunnableTraceable from a traceable function
* @param func - Traceable function to wrap
* @returns RunnableTraceable instance
*/
static from<RunInput, RunOutput>(
func: TraceableFunction<(input: RunInput) => RunOutput>
): RunnableTraceable<RunInput, RunOutput>;
/**
* Invoke the runnable with a single input
* @param input - Input to the runnable
* @param options - Optional runnable configuration (callbacks, tags, metadata, etc.)
* @returns Promise resolving to output
*/
invoke(input: RunInput, options?: RunnableConfig): Promise<RunOutput>;
/**
* Stream results from the runnable
* @param input - Input to the runnable
* @param options - Optional runnable configuration
* @returns Promise resolving to readable stream of output chunks
*/
stream(
input: RunInput,
options?: RunnableConfig
): Promise<IterableReadableStream<RunOutput>>;
/**
* Batch invoke the runnable with multiple inputs
* @param inputs - Array of inputs
* @param options - Optional runnable configuration or array of configs
* @returns Promise resolving to array of outputs
*/
batch(
inputs: RunInput[],
options?: RunnableConfig | RunnableConfig[]
): Promise<RunOutput[]>;
}Usage Examples:
import { traceable } from "langsmith/traceable";
import { RunnableTraceable } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
// Define a traceable function
const customProcessor = traceable(
async (input: { text: string }) => {
// Your custom processing logic
const processed = input.text.toUpperCase();
return { result: processed };
},
{ name: "custom_processor", run_type: "chain" }
);
// Wrap as a LangChain Runnable
const runnableProcessor = RunnableTraceable.from(customProcessor);
// Now you can use it in LangChain chains
const model = new ChatOpenAI();
// Pipe it with other runnables
const chain = runnableProcessor.pipe(model);
// Invoke the chain
const result = await chain.invoke({ text: "hello world" });Invoke Method:
import { traceable } from "langsmith/traceable";
import { RunnableTraceable } from "langsmith/langchain";
// Create traceable function
const analyzesentiment = traceable(
async (text: string) => {
// Sentiment analysis logic
const sentiment = text.includes("good") ? "positive" : "negative";
return { sentiment, confidence: 0.95 };
},
{ name: "analyze_sentiment" }
);
// Wrap as runnable
const sentimentRunnable = RunnableTraceable.from(analyzesentiment);
// Invoke with options
const result = await sentimentRunnable.invoke(
"This is a good example",
{
tags: ["sentiment", "analysis"],
metadata: { version: "1.0" }
}
);
// Result: { sentiment: "positive", confidence: 0.95 }Stream Method:
import { traceable } from "langsmith/traceable";
import { RunnableTraceable } from "langsmith/langchain";
// Traceable function that yields results
const generateTokens = traceable(
async function* (prompt: string) {
const words = prompt.split(" ");
for (const word of words) {
yield word;
await new Promise(resolve => setTimeout(resolve, 100));
}
},
{ name: "generate_tokens" }
);
// Wrap as runnable
const tokenRunnable = RunnableTraceable.from(generateTokens);
// Stream results
const stream = await tokenRunnable.stream("Hello world from LangChain");
// Consume stream
for await (const chunk of stream) {
console.log("Chunk:", chunk);
}
// Output: "Hello", "world", "from", "LangChain" (one at a time)Batch Method:
import { traceable } from "langsmith/traceable";
import { RunnableTraceable } from "langsmith/langchain";
// Traceable function for processing
const translateText = traceable(
async (input: { text: string; targetLang: string }) => {
// Translation logic (simplified)
return {
translated: `[${input.targetLang}] ${input.text}`,
originalLength: input.text.length
};
},
{ name: "translate_text" }
);
// Wrap as runnable
const translateRunnable = RunnableTraceable.from(translateText);
// Batch invoke with multiple inputs
const results = await translateRunnable.batch([
{ text: "Hello", targetLang: "es" },
{ text: "Goodbye", targetLang: "fr" },
{ text: "Welcome", targetLang: "de" }
]);
console.log(results);
// [
// { translated: "[es] Hello", originalLength: 5 },
// { translated: "[fr] Goodbye", originalLength: 7 },
// { translated: "[de] Welcome", originalLength: 7 }
// ]Batch with Individual Options:
import { traceable } from "langsmith/traceable";
import { RunnableTraceable } from "langsmith/langchain";
const processItem = traceable(
async (item: string) => {
return { processed: item.trim().toLowerCase() };
},
{ name: "process_item" }
);
const processRunnable = RunnableTraceable.from(processItem);
// Batch with different configs for each input
const results = await processRunnable.batch(
["Item 1", "Item 2", "Item 3"],
[
{ tags: ["batch-1"], metadata: { priority: "high" } },
{ tags: ["batch-2"], metadata: { priority: "medium" } },
{ tags: ["batch-3"], metadata: { priority: "low" } }
]
);Chain Composition:
import { traceable } from "langsmith/traceable";
import { RunnableTraceable } from "langsmith/langchain";
import { RunnableLambda } from "@langchain/core/runnables";
import { ChatOpenAI } from "@langchain/openai";
// First traceable step
const preprocessor = traceable(
async (input: string) => {
return { text: input.trim().toLowerCase() };
},
{ name: "preprocessor" }
);
// Second traceable step
const postprocessor = traceable(
async (output: any) => {
return { final: output.content.toUpperCase() };
},
{ name: "postprocessor" }
);
// Wrap both as runnables
const preprocessRunnable = RunnableTraceable.from(preprocessor);
const postprocessRunnable = RunnableTraceable.from(postprocessor);
// Create LangChain model
const model = new ChatOpenAI();
// Compose into a chain
const chain = preprocessRunnable
.pipe(RunnableLambda.from((x) => x.text))
.pipe(model)
.pipe(postprocessRunnable);
// Execute the full chain
const result = await chain.invoke(" HELLO WORLD ");
// All steps are traced in LangSmithIntegration with LangChain LCEL:
import { traceable } from "langsmith/traceable";
import { RunnableTraceable } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
import { RunnableSequence } from "@langchain/core/runnables";
// Custom validation step
const validateInput = traceable(
async (input: { query: string }) => {
if (!input.query || input.query.length < 3) {
throw new Error("Query must be at least 3 characters");
}
return input;
},
{ name: "validate_input" }
);
// Custom post-processing
const formatOutput = traceable(
async (response: any) => {
return {
answer: response.content,
timestamp: new Date().toISOString(),
length: response.content.length
};
},
{ name: "format_output" }
);
// Wrap as runnables
const validateRunnable = RunnableTraceable.from(validateInput);
const formatRunnable = RunnableTraceable.from(formatOutput);
// Build chain with LangChain components
const prompt = PromptTemplate.fromTemplate("Answer this question: {query}");
const model = new ChatOpenAI();
const chain = RunnableSequence.from([
validateRunnable,
prompt,
model,
formatRunnable
]);
// Execute with full tracing
const result = await chain.invoke({ query: "What is LangSmith?" });
console.log(result);
// { answer: "...", timestamp: "2024-01-15T...", length: 123 }When using getLangchainCallbacks() within a traceable function, the current trace context is automatically detected and passed to LangChain:
import { traceable } from "langsmith/traceable";
import { getLangchainCallbacks } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
const myApp = traceable(async (userQuery: string) => {
// Automatically uses current trace context
const callbacks = getLangchainCallbacks();
const model = new ChatOpenAI();
const response = await model.invoke(userQuery, { callbacks });
return response.content;
}, { name: "my_app" });For more control, you can manually create and manage run trees:
import { RunTree } from "langsmith/run_trees";
import { getLangchainCallbacks } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
// Create parent run
const parentRun = new RunTree({
name: "parent_operation",
run_type: "chain",
inputs: { query: "user query" }
});
// Create child run for LangChain
const childRun = parentRun.createChild({
name: "langchain_step",
run_type: "llm"
});
const callbacks = getLangchainCallbacks(childRun);
const model = new ChatOpenAI();
const response = await model.invoke("Hello!", { callbacks });
await childRun.end({ output: response });
await childRun.postRun();
await parentRun.end({ output: response });
await parentRun.postRun();Combine both approaches for full bidirectional integration:
import { traceable } from "langsmith/traceable";
import { getLangchainCallbacks, RunnableTraceable } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
import { RunnableSequence } from "@langchain/core/runnables";
// Traceable function as a runnable
const preprocessor = traceable(
async (input: string) => ({ text: input.trim() }),
{ name: "preprocessor" }
);
// Another traceable function that uses LangChain
const processor = traceable(async (input: { text: string }) => {
const callbacks = getLangchainCallbacks();
const model = new ChatOpenAI();
const response = await model.invoke(input.text, { callbacks });
return { result: response.content };
}, { name: "processor" });
// Compose everything
const preprocessRunnable = RunnableTraceable.from(preprocessor);
const processorRunnable = RunnableTraceable.from(processor);
const chain = RunnableSequence.from([
preprocessRunnable,
processorRunnable
]);
// Execute with full tracing
const result = await chain.invoke(" Process this text ");LangChain configuration options (tags, metadata, etc.) are properly propagated through RunnableTraceable:
import { traceable } from "langsmith/traceable";
import { RunnableTraceable } from "langsmith/langchain";
const myFunction = traceable(
async (input: string) => {
return { output: input.toUpperCase() };
},
{ name: "my_function" }
);
const runnable = RunnableTraceable.from(myFunction);
// Configuration is passed through to the trace
await runnable.invoke("test", {
tags: ["production", "v1"],
metadata: { user_id: "123", session: "abc" },
runName: "custom_run_name"
});When calling LangChain from within traceable functions, always use getLangchainCallbacks() to maintain trace hierarchy:
const goodExample = traceable(async (input: string) => {
const callbacks = getLangchainCallbacks(); // ✓ Correct
const model = new ChatOpenAI();
return await model.invoke(input, { callbacks });
});
const badExample = traceable(async (input: string) => {
const model = new ChatOpenAI();
return await model.invoke(input); // ✗ Missing callbacks - broken trace hierarchy
});When creating reusable components that need to work in both LangChain and LangSmith contexts, wrap them with RunnableTraceable:
// Reusable component
const customValidator = traceable(
async (input: any) => {
// Validation logic
return input;
},
{ name: "validator" }
);
// Make it available as a runnable
export const validatorRunnable = RunnableTraceable.from(customValidator);
// Can be used in pure LangChain chains
const chain = validatorRunnable.pipe(model).pipe(parser);
// Or in pure traceable workflows
const workflow = traceable(async (data) => {
const validated = await customValidator(data);
return process(validated);
});Leverage TypeScript generics for type-safe runnable wrappers:
interface MyInput {
query: string;
context: string[];
}
interface MyOutput {
answer: string;
sources: string[];
}
const typedFunction = traceable(
async (input: MyInput): Promise<MyOutput> => {
// Implementation
return {
answer: "result",
sources: input.context
};
},
{ name: "typed_function" }
);
// Type-safe runnable
const typedRunnable = RunnableTraceable.from<MyInput, MyOutput>(typedFunction);
// TypeScript will enforce types
const result = await typedRunnable.invoke({
query: "test",
context: ["doc1", "doc2"]
});
// result is typed as MyOutputBoth integration methods properly handle and trace errors:
import { traceable } from "langsmith/traceable";
import { getLangchainCallbacks } from "langsmith/langchain";
const robustFunction = traceable(async (input: string) => {
try {
const callbacks = getLangchainCallbacks();
const model = new ChatOpenAI();
return await model.invoke(input, { callbacks });
} catch (error) {
// Error is automatically captured in the trace
console.error("LangChain call failed:", error);
throw error; // Re-throw to propagate
}
}, { name: "robust_function" });