tessl install tessl/npm-langsmith@0.4.3TypeScript client SDK for the LangSmith LLM tracing, evaluation, and monitoring platform.
Common patterns, code snippets, and essential APIs for quick lookup.
// Core
import { Client } from "langsmith";
import { traceable } from "langsmith/traceable";
import { RunTree } from "langsmith";
// Evaluation
import { evaluate } from "langsmith/evaluation";
// Wrappers
import { wrapOpenAI } from "langsmith/wrappers/openai";
import { wrapAnthropic } from "langsmith/wrappers/anthropic";
import { wrapAISDK } from "langsmith/experimental/vercel";
// LangChain
import { getLangchainCallbacks, RunnableTraceable } from "langsmith/langchain";
// Testing
import { test, expect, wrapEvaluator } from "langsmith/jest";
import { test, expect, wrapEvaluator } from "langsmith/vitest";
// Utilities
import { createAnonymizer } from "langsmith/anonymizer";
import { uuid7, uuid7FromTime, getDefaultProjectName } from "langsmith";# Required
LANGCHAIN_API_KEY=lsv2_pt_... # Your API key
# Optional
LANGCHAIN_PROJECT=my-project # Default project name
LANGCHAIN_ENDPOINT=https://... # API endpoint
LANGCHAIN_TRACING=true # Enable/disable tracingimport { Client } from "langsmith";
// Use environment variables
const client = new Client();
// Explicit configuration
const client = new Client({
apiUrl: "https://api.smith.langchain.com",
apiKey: process.env.LANGCHAIN_API_KEY,
timeout_ms: 10000,
});
// Production configuration
const client = new Client({
autoBatchTracing: true,
tracingSamplingRate: 0.1, // 10% sampling
hideInputs: (inputs) => redactPII(inputs),
});import { traceable } from "langsmith/traceable";
const myFunction = traceable(
async (input: string) => {
return `Processed: ${input}`;
},
{ name: "my-function", run_type: "chain" }
);
await myFunction("test");const retrieve = traceable(
async (query: string) => await vectorDB.search(query),
{ name: "retrieve", run_type: "retriever" }
);
const generate = traceable(
async (query: string, docs: string[]) => await llm.generate({ query, context: docs.join("\n") }),
{ name: "generate", run_type: "llm" }
);
const ragPipeline = traceable(
async (query: string) => {
const docs = await retrieve(query);
const answer = await generate(query, docs);
return answer;
},
{ name: "rag-pipeline", run_type: "chain" }
);import { traceable, getCurrentRunTree } from "langsmith/traceable";
const myFunction = traceable(async (input: string) => {
const runTree = getCurrentRunTree();
// Add metadata dynamically
runTree.metadata = { ...runTree.metadata, processed: true };
return result;
}, { name: "my-function" });import { RunTree } from "langsmith";
const parentRun = new RunTree({
name: "parent-operation",
run_type: "chain",
inputs: { query: "What is AI?" },
});
const llmRun = parentRun.createChild({
name: "llm-call",
run_type: "llm",
});
await llmRun.end({ response: "AI is..." });
await llmRun.postRun();
await parentRun.end({ result: "Complete" });
await parentRun.postRun();import { evaluate } from "langsmith/evaluation";
import { Client } from "langsmith";
const client = new Client();
// Create dataset
const dataset = await client.createDataset({
datasetName: "qa-eval",
description: "QA evaluation dataset"
});
await client.createExamples({
datasetId: dataset.id,
inputs: [{ question: "What is 2+2?" }],
outputs: [{ answer: "4" }]
});
// Define target function
async function myBot(input: { question: string }) {
return { answer: await generateAnswer(input.question) };
}
// Run evaluation
const results = await evaluate(myBot, {
data: "qa-eval",
evaluators: [
({ run, example }) => ({
key: "correctness",
score: run.outputs?.answer === example?.outputs?.answer ? 1 : 0
})
]
});// Simple correctness
const correctnessEvaluator = ({ run, example }) => ({
key: "correctness",
score: run.outputs?.answer === example?.outputs?.answer ? 1 : 0
});
// With LLM judge
const qualityEvaluator = async ({ run, example }) => {
const judgment = await llmJudge(run.outputs, example.outputs);
return {
key: "quality",
score: judgment.score,
comment: judgment.reasoning
};
};
// Latency check
const latencyEvaluator = ({ run }) => {
const latency = (run.end_time || 0) - (run.start_time || 0);
return {
key: "latency",
score: latency < 1000 ? 1 : 0,
value: latency,
comment: `${latency}ms`
};
};import { wrapOpenAI } from "langsmith/wrappers/openai";
import OpenAI from "openai";
const openai = wrapOpenAI(new OpenAI(), {
projectName: "openai-project"
});
const response = await openai.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: "Hello!" }]
});import { wrapAnthropic } from "langsmith/wrappers/anthropic";
import Anthropic from "@anthropic-ai/sdk";
const anthropic = wrapAnthropic(new Anthropic(), {
project_name: "anthropic-project"
});
const message = await anthropic.messages.create({
model: "claude-sonnet-4-20250514",
max_tokens: 1024,
messages: [{ role: "user", content: "Hello!" }]
});import { wrapAISDK } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText } from "ai";
import { openai } from "@ai-sdk/openai";
const wrappedAI = wrapAISDK(
{ wrapLanguageModel, generateText },
{ project_name: "vercel-app" }
);
const { text } = await wrappedAI.generateText({
model: openai("gpt-4"),
prompt: "Hello!"
});import { Client } from "langsmith";
const client = new Client();
const dataset = await client.createDataset({
datasetName: "my-dataset",
description: "Test dataset",
dataType: "kv"
});// Single example
await client.createExample({
dataset_id: dataset.id,
inputs: { question: "What is 2+2?" },
outputs: { answer: "4" }
});
// Bulk examples
await client.createExamples({
datasetName: "my-dataset",
inputs: [
{ question: "What is 2+2?" },
{ question: "What is 3+3?" }
],
outputs: [
{ answer: "4" },
{ answer: "6" }
]
});for await (const example of client.listExamples({
datasetName: "my-dataset",
limit: 100
})) {
console.log(example.inputs, example.outputs);
}import { Client } from "langsmith";
const client = new Client();
// Thumbs up/down
await client.createFeedback(runId, "user_rating", {
score: 1, // 1 = thumbs up, 0 = thumbs down,
comment: "Great response!",
});
// Numeric score
await client.createFeedback(runId, "accuracy", {
score: 0.95,
comment: "Highly accurate",
});
// With correction
await client.createFeedback(runId, "correctness", {
score: 0,
correction: { answer: "Correct answer" },
});const token = await client.createPresignedFeedbackToken({
run_id: runId,
feedback_key: "user_rating",
expires_in: 86400 // 24 hours
});
// Share token.url with users
// They can POST feedback without API keyimport { Client } from "langsmith";
const client = new Client();
// Basic listing
for await (const run of client.listRuns({
projectName: "my-project",
limit: 100
})) {
console.log(run.name, run.status);
}
// With filters
for await (const run of client.listRuns({
projectName: "my-project",
filter: 'and(eq(error, null), gte(latency, 1000))',
isRoot: true
})) {
console.log(`Slow run: ${run.name}`);
}
// Root runs only
for await (const run of client.listRuns({
projectName: "my-project",
isRoot: true
})) {
console.log(`Root: ${run.name}`);
}// Read run with children
const run = await client.readRun(runId, { loadChildRuns: true });
console.log(run.child_runs);
// Get run URL
const url = client.getRunUrl({
runId: runId,
projectName: "my-project"
});import { Client } from "langsmith";
const client = new Client();
// Create prompt
await client.createPrompt("my-prompt", {
description: "Customer greeting prompt",
tags: ["customer-service"]
});
// Push version
await client.pushPrompt("my-prompt", {
object: {
type: "chat",
messages: [
{ role: "system", content: "You are helpful." },
{ role: "user", content: "{query}" }
]
},
description: "Initial version"
});const prompt = await client.pullPrompt({
promptName: "my-prompt"
});
// Use prompt content
const messages = prompt.content.messages.map(msg => ({
role: msg.role,
content: msg.content.replace("{query}", userQuery)
}));import { Client } from "langsmith";
const client = new Client({
hideInputs: true,
hideOutputs: true
});
// Or selective
const client = new Client({
hideInputs: (inputs) => {
const { apiKey, password, ...safe } = inputs;
return safe;
}
});import { traceable } from "langsmith/traceable";
import { createAnonymizer } from "langsmith/anonymizer";
const anonymizer = createAnonymizer([
{ pattern: /\b[\w\.-]+@[\w\.-]+\.\w+\b/g, replace: "[EMAIL]" },
{ pattern: /\bsk-[a-zA-Z0-9]{32,}\b/g, replace: "[API_KEY]" },
{ pattern: /\b\d{3}-\d{2}-\d{4}\b/g, replace: "[SSN]" }
]);
const privateFunction = traceable(
async (input: string) => processData(input),
{
name: "private-function",
processInputs: anonymizer,
processOutputs: anonymizer
}
);import { test, expect, wrapEvaluator } from "langsmith/jest";
test(
"greeting generation",
{
input: { name: "Alice" },
expected: { greeting: "Hello, Alice!" }
},
async (input) => {
return { greeting: `Hello, ${input.name}!` };
}
);
// Custom evaluator
const lengthEvaluator = wrapEvaluator((input, output, expected) => ({
key: "length",
score: output.length >= 10 ? 1 : 0
}));
test(
"with evaluator",
{
input: "test",
evaluators: [lengthEvaluator]
},
async (input) => {
const result = await process(input);
expect(result).evaluatedBy(lengthEvaluator);
return result;
}
);import { test, expect } from "langsmith/vitest";
// Identical API to Jest
test(
"translation test",
{
input: { text: "Hello", lang: "es" },
expected: { translation: "Hola" }
},
async (input) => {
return await translate(input.text, input.lang);
}
);
// Custom matchers
expect(output).toBeSemanticCloseTo("Expected meaning", {
threshold: 0.85
});
expect(output).toBeRelativeCloseTo("Expected text", {
threshold: 0.8
});Vitest configuration:
// vitest.config.ts
import { defineConfig } from "vitest/config";
export default defineConfig({
test: {
reporters: ["default", "langsmith/vitest/reporter"]
}
});import { traceable } from "langsmith/traceable";
import { getLangchainCallbacks } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";
const analyzeText = traceable(async (text: string) => {
const callbacks = getLangchainCallbacks();
const model = new ChatOpenAI();
const result = await model.invoke(text, { callbacks });
return result;
}, { name: "analyze-text" });import { uuid7, uuid7FromTime } from "langsmith";
// Generate UUID v7
const runId = uuid7();
// From timestamp
const timestampId = uuid7FromTime(Date.now());
const dateId = uuid7FromTime("2024-01-01T00:00:00Z");import { getDefaultProjectName } from "langsmith";
const projectName = getDefaultProjectName();
console.log("Using project:", projectName);import { overrideFetchImplementation } from "langsmith";
const customFetch = (url: string, init?: RequestInit) => {
console.log("Fetching:", url);
return fetch(url, init);
};
overrideFetchImplementation(customFetch);traceable() decorator for automatic tracingname and appropriate run_typemetadata and tagstracingSamplingRate to control trace volumehideInputs/hideOutputs for sensitive dataawait client.awaitPendingTraceBatches() before shutdownprocessInputs/processOutputs to redact sensitive datahideInputs: true for client-level hidingcreateAnonymizer() for pattern-based PII removalLANGCHAIN_API_KEY is set correctlyawait client.awaitPendingTraceBatches() before app shutdown// Correct - use subpath exports
import { traceable } from "langsmith/traceable";
import { evaluate } from "langsmith/evaluation";
// Incorrect - won't work
import { traceable } from "langsmith";// Import types from langsmith/schemas
import type { Run, Example, Feedback } from "langsmith/schemas";type RunType =
| "llm" // Direct language model API call
| "chain" // Sequence of operations
| "tool" // Tool/function execution
| "retriever" // Document retrieval
| "embedding" // Embedding generation
| "prompt" // Prompt formatting
| "parser"; // Output parsinginterface ClientConfig {
apiUrl?: string;
apiKey?: string;
timeout_ms?: number;
autoBatchTracing?: boolean;
hideInputs?: boolean | ((inputs: KVMap) => KVMap);
hideOutputs?: boolean | ((outputs: KVMap) => KVMap);
tracingSamplingRate?: number;
}interface TraceableConfig {
name?: string;
run_type?: string;
metadata?: Record<string, any>;
tags?: string[];
client?: Client;
project_name?: string;
processInputs?: (inputs: any) => KVMap;
processOutputs?: (outputs: any) => KVMap;
}interface EvaluateOptions {
data: string | Example[];
evaluators: EvaluatorT[];
summary_evaluators?: SummaryEvaluatorT[];
experiment_name?: string;
max_concurrency?: number;
metadata?: Record<string, any>;
}