tessl install tessl/npm-langsmith@0.4.3TypeScript client SDK for the LangSmith LLM tracing, evaluation, and monitoring platform.
Seamless integration with LangChain through callbacks and runnable wrappers.
LangSmith provides bidirectional integration with LangChain, enabling automatic tracing of LangChain applications and allowing LangSmith traceable functions to work within LangChain chains.
import { getLangchainCallbacks, RunnableTraceable } from "langsmith/langchain";import { getLangchainCallbacks } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
// Get callbacks for tracing
const callbacks = getLangchainCallbacks();
// Use with LangChain runnable
const model = new ChatOpenAI();
const response = await model.invoke(
"What is the capital of France?",
{ callbacks }
);Obtain LangChain callbacks for tracing handoff.
/**
* Get LangChain callbacks for tracing handoff
* @param currentRunTree - Optional run tree context
* @returns LangChain-compatible callbacks
*/
function getLangchainCallbacks(currentRunTree?: RunTree): object;import { traceable } from "langsmith/traceable";
import { getLangchainCallbacks } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
import { StringOutputParser } from "@langchain/core/output_parsers";
const analyzeText = traceable(async (text: string) => {
const callbacks = getLangchainCallbacks();
const model = new ChatOpenAI({ temperature: 0 });
const parser = new StringOutputParser();
const chain = model.pipe(parser);
// LangChain execution traced as child
const result = await chain.invoke(
`Analyze: ${text}`,
{ callbacks }
);
return result;
}, { name: "analyze_text" });Using explicit run trees with callbacks.
import { RunTree } from "langsmith/run_trees";
import { getLangchainCallbacks } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
// Create explicit run tree
const runTree = new RunTree({
name: "my_langchain_app",
run_type: "chain",
inputs: { query: "What is LangSmith?" }
});
// Get callbacks for this specific run tree
const callbacks = getLangchainCallbacks(runTree);
const model = new ChatOpenAI();
const response = await model.invoke(
"What is LangSmith?",
{ callbacks }
);
// End the run tree
await runTree.end({ output: response });
await runTree.postRun();Building complex chains with tracing.
import { traceable } from "langsmith/traceable";
import { getLangchainCallbacks } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
const processDocument = traceable(async (document: string, question: string) => {
const callbacks = getLangchainCallbacks();
// Create prompt template
const prompt = PromptTemplate.fromTemplate(
"Based on the following document:\n\n{document}\n\nAnswer this question: {question}"
);
const model = new ChatOpenAI({ modelName: "gpt-4" });
// Create chain
const chain = prompt.pipe(model);
// All chain steps will be traced
const result = await chain.invoke(
{ document, question },
{ callbacks }
);
return result.content;
}, { name: "process_document", run_type: "chain" });
// Execute
const answer = await processDocument(
"LangSmith is a platform for debugging, testing, and monitoring LLM applications.",
"What is LangSmith used for?"
);Wrap traceable functions as LangChain Runnables.
Note: RunnableTraceable is deprecated. Modern versions of LangChain can directly use traceable functions or wrap them using standard LangChain patterns. This class is maintained for backward compatibility.
/**
* Wrap traceable function as LangChain Runnable
* @deprecated Wrap or pass traceable functions directly instead
*/
class RunnableTraceable<RunInput, RunOutput> {
/**
* Create from traceable function
* @param func - Traceable function
* @returns RunnableTraceable instance
*/
static from<RunInput, RunOutput>(
func: TraceableFunction<(input: RunInput) => RunOutput>
): RunnableTraceable<RunInput, RunOutput>;
/**
* Invoke the runnable
* @param input - Input data
* @param options - Runnable configuration
* @returns Promise resolving to output
*/
invoke(input: RunInput, options?: RunnableConfig): Promise<RunOutput>;
/**
* Batch invoke
* @param inputs - Array of inputs
* @param options - Configuration
* @returns Promise resolving to outputs
*/
batch(
inputs: RunInput[],
options?: RunnableConfig | RunnableConfig[]
): Promise<RunOutput[]>;
}import { traceable } from "langsmith/traceable";
import { RunnableTraceable } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
// Define traceable function
const customProcessor = traceable(
async (input: { text: string }) => {
const processed = input.text.toUpperCase();
return { result: processed };
},
{ name: "custom_processor" }
);
// Wrap as Runnable
const runnableProcessor = RunnableTraceable.from(customProcessor);
// Use in LangChain chains
const model = new ChatOpenAI();
const chain = runnableProcessor.pipe(model);
const result = await chain.invoke({ text: "hello world" });Streaming results with traceable functions.
import { traceable } from "langsmith/traceable";
import { RunnableTraceable } from "langsmith/langchain";
// Traceable function that yields results
const generateTokens = traceable(
async function* (prompt: string) {
const words = prompt.split(" ");
for (const word of words) {
yield word;
await new Promise(resolve => setTimeout(resolve, 100));
}
},
{ name: "generate_tokens" }
);
// Wrap as runnable
const tokenRunnable = RunnableTraceable.from(generateTokens);
// Stream results
const stream = await tokenRunnable.stream("Hello world from LangChain");
// Consume stream
for await (const chunk of stream) {
console.log("Chunk:", chunk);
}
// Output: "Hello", "world", "from", "LangChain" (one at a time)Batch processing with traceable functions.
import { traceable } from "langsmith/traceable";
import { RunnableTraceable } from "langsmith/langchain";
// Traceable function for processing
const translateText = traceable(
async (input: { text: string; targetLang: string }) => {
// Translation logic (simplified)
return {
translated: `[${input.targetLang}] ${input.text}`,
originalLength: input.text.length
};
},
{ name: "translate_text" }
);
// Wrap as runnable
const translateRunnable = RunnableTraceable.from(translateText);
// Batch invoke with multiple inputs
const results = await translateRunnable.batch([
{ text: "Hello", targetLang: "es" },
{ text: "Goodbye", targetLang: "fr" },
{ text: "Welcome", targetLang: "de" }
]);
console.log(results);
// [
// { translated: "[es] Hello", originalLength: 5 },
// { translated: "[fr] Goodbye", originalLength: 7 },
// { translated: "[de] Welcome", originalLength: 7 }
// ]Applying different configurations to each batch item.
import { traceable } from "langsmith/traceable";
import { RunnableTraceable } from "langsmith/langchain";
const processItem = traceable(
async (item: string) => {
return { processed: item.trim().toLowerCase() };
},
{ name: "process_item" }
);
const processRunnable = RunnableTraceable.from(processItem);
// Batch with different configs for each input
const results = await processRunnable.batch(
["Item 1", "Item 2", "Item 3"],
[
{ tags: ["batch-1"], metadata: { priority: "high" } },
{ tags: ["batch-2"], metadata: { priority: "medium" } },
{ tags: ["batch-3"], metadata: { priority: "low" } }
]
);import { traceable } from "langsmith/traceable";
import { getLangchainCallbacks, RunnableTraceable } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
import { RunnableSequence } from "@langchain/core/runnables";
// Traceable as runnable
const preprocessor = traceable(
async (input: string) => ({ text: input.trim() }),
{ name: "preprocessor" }
);
// Traceable that uses LangChain
const processor = traceable(async (input: { text: string }) => {
const callbacks = getLangchainCallbacks();
const model = new ChatOpenAI();
const response = await model.invoke(input.text, { callbacks });
return { result: response.content };
}, { name: "processor" });
// Compose everything
const preprocessRunnable = RunnableTraceable.from(preprocessor);
const processorRunnable = RunnableTraceable.from(processor);
const chain = RunnableSequence.from([
preprocessRunnable,
processorRunnable
]);
const result = await chain.invoke(" Process this ");Building complex chains with mixed components.
import { traceable } from "langsmith/traceable";
import { RunnableTraceable } from "langsmith/langchain";
import { RunnableLambda } from "@langchain/core/runnables";
import { ChatOpenAI } from "@langchain/openai";
// First traceable step
const preprocessor = traceable(
async (input: string) => {
return { text: input.trim().toLowerCase() };
},
{ name: "preprocessor" }
);
// Second traceable step
const postprocessor = traceable(
async (output: any) => {
return { final: output.content.toUpperCase() };
},
{ name: "postprocessor" }
);
// Wrap both as runnables
const preprocessRunnable = RunnableTraceable.from(preprocessor);
const postprocessRunnable = RunnableTraceable.from(postprocessor);
// Create LangChain model
const model = new ChatOpenAI();
// Compose into a chain
const chain = preprocessRunnable
.pipe(RunnableLambda.from((x) => x.text))
.pipe(model)
.pipe(postprocessRunnable);
// Execute the full chain
const result = await chain.invoke(" HELLO WORLD ");
// All steps are traced in LangSmithUsing traceable functions within LangChain Expression Language chains.
import { traceable } from "langsmith/traceable";
import { RunnableTraceable } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
import { RunnableSequence } from "@langchain/core/runnables";
// Custom validation step
const validateInput = traceable(
async (input: { query: string }) => {
if (!input.query || input.query.length < 3) {
throw new Error("Query must be at least 3 characters");
}
return input;
},
{ name: "validate_input" }
);
// Custom post-processing
const formatOutput = traceable(
async (response: any) => {
return {
answer: response.content,
timestamp: new Date().toISOString(),
length: response.content.length
};
},
{ name: "format_output" }
);
// Wrap as runnables
const validateRunnable = RunnableTraceable.from(validateInput);
const formatRunnable = RunnableTraceable.from(formatOutput);
// Build chain with LangChain components
const prompt = PromptTemplate.fromTemplate("Answer this question: {query}");
const model = new ChatOpenAI();
const chain = RunnableSequence.from([
validateRunnable,
prompt,
model,
formatRunnable
]);
// Execute with full tracing
const result = await chain.invoke({ query: "What is LangSmith?" });
console.log(result);
// { answer: "...", timestamp: "2024-01-15T...", length: 123 }Automatic trace context detection within traceable functions.
import { traceable } from "langsmith/traceable";
import { getLangchainCallbacks } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
const myApp = traceable(async (userQuery: string) => {
// Automatically uses current trace context
const callbacks = getLangchainCallbacks();
const model = new ChatOpenAI();
const response = await model.invoke(userQuery, { callbacks });
return response.content;
}, { name: "my_app" });Explicit control over run tree creation and management.
import { RunTree } from "langsmith/run_trees";
import { getLangchainCallbacks } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
// Create parent run
const parentRun = new RunTree({
name: "parent_operation",
run_type: "chain",
inputs: { query: "user query" }
});
// Create child run for LangChain
const childRun = parentRun.createChild({
name: "langchain_step",
run_type: "llm"
});
const callbacks = getLangchainCallbacks(childRun);
const model = new ChatOpenAI();
const response = await model.invoke("Hello!", { callbacks });
await childRun.end({ output: response });
await childRun.postRun();
await parentRun.end({ output: response });
await parentRun.postRun();LangChain configuration options properly propagated through RunnableTraceable.
import { traceable } from "langsmith/traceable";
import { RunnableTraceable } from "langsmith/langchain";
const myFunction = traceable(
async (input: string) => {
return { output: input.toUpperCase() };
},
{ name: "my_function" }
);
const runnable = RunnableTraceable.from(myFunction);
// Configuration is passed through to the trace
await runnable.invoke("test", {
tags: ["production", "v1"],
metadata: { user_id: "123", session: "abc" },
runName: "custom_run_name"
});// Good
const withCallbacks = traceable(async (input: string) => {
const callbacks = getLangchainCallbacks();
const model = new ChatOpenAI();
return await model.invoke(input, { callbacks });
});
// Bad - broken trace hierarchy
const withoutCallbacks = traceable(async (input: string) => {
const model = new ChatOpenAI();
return await model.invoke(input); // Missing callbacks
});interface MyInput {
query: string;
context: string[];
}
interface MyOutput {
answer: string;
sources: string[];
}
const typedFunction = traceable(
async (input: MyInput): Promise<MyOutput> => {
return {
answer: "result",
sources: input.context
};
},
{ name: "typed_function" }
);
const typedRunnable = RunnableTraceable.from<MyInput, MyOutput>(typedFunction);
// TypeScript will enforce types
const result = await typedRunnable.invoke({
query: "test",
context: ["doc1", "doc2"]
});
// result is typed as MyOutputBoth integration methods properly handle and trace errors.
import { traceable } from "langsmith/traceable";
import { getLangchainCallbacks } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
const robustFunction = traceable(async (input: string) => {
try {
const callbacks = getLangchainCallbacks();
const model = new ChatOpenAI();
return await model.invoke(input, { callbacks });
} catch (error) {
// Error is automatically captured in the trace
console.error("LangChain call failed:", error);
throw error; // Re-throw to propagate
}
}, { name: "robust_function" });