tessl install tessl/npm-langsmith@0.4.3TypeScript client SDK for the LangSmith LLM tracing, evaluation, and monitoring platform.
Comprehensive tracing capabilities for LLM applications, from simple function decorators to manual run tree construction and distributed tracing across services.
langsmith (npm)npm install langsmithlangsmith/traceableLangSmith tracing captures execution details of your LLM applications, enabling debugging, performance monitoring, and quality analysis. Traces are organized hierarchically, allowing you to see the complete execution flow from high-level operations down to individual LLM calls.
When to use tracing:
import { traceable, getCurrentRunTree, withRunTree, isTraceableFunction, ROOT } from "langsmith/traceable";
import { RunTree } from "langsmith";For CommonJS:
const { traceable, getCurrentRunTree, withRunTree, isTraceableFunction, ROOT } = require("langsmith/traceable");
const { RunTree } = require("langsmith");import { traceable } from "langsmith/traceable";
const myFunction = traceable(
async (input: string) => {
return `Processed: ${input}`;
},
{ name: "my-function", run_type: "chain" }
);
await myFunction("test"); // Automatically tracedFor CommonJS:
const { traceable } = require("langsmith/traceable");import { traceable } from "langsmith/traceable";
const retrieveDocs = traceable(
async (query: string) => await vectorDB.search(query),
{ name: "retrieve", run_type: "retriever" }
);
const generateAnswer = traceable(
async (query: string, docs: string[]) => {
return await llm.generate({ query, context: docs.join("\n") });
},
{ name: "generate", run_type: "llm" }
);
const ragPipeline = traceable(
async (query: string) => {
const docs = await retrieveDocs(query); // Traced as child
const answer = await generateAnswer(query, docs); // Traced as child
return answer;
},
{ name: "rag-pipeline", run_type: "chain" }
);
// Creates hierarchical trace: rag-pipeline > retrieve > generate
await ragPipeline("What is LangSmith?");Wrap any function with automatic tracing to LangSmith.
/**
* Wrap a function with automatic tracing
* @param wrappedFunc - Function to wrap
* @param config - Tracing configuration
* @returns Traceable function
*/
function traceable<Func>(
wrappedFunc: Func,
config?: TraceableConfig<Func>
): TraceableFunction<Func>;
interface TraceableConfig<Func> {
/** Run name override (defaults to function name) */
name?: string;
/** Run type: "llm" | "chain" | "tool" | "retriever" | "embedding" | "prompt" | "parser" */
run_type?: string;
/** Additional metadata */
metadata?: KVMap;
/** Tags for filtering */
tags?: string[];
/** Custom client instance */
client?: Client;
/** Project name override */
project_name?: string;
/** Enable or disable tracing for this function */
tracingEnabled?: boolean;
/** Callback invoked when run completes */
on_end?: (runTree: RunTree) => void;
/** Aggregator function for reducing batched inputs */
aggregator?: (args: any[]) => any;
/** Path to extract config from arguments: [argIndex] or [argIndex, propertyName] */
argsConfigPath?: [number] | [number, string];
/** Extract attachments from arguments */
extractAttachments?: (...args: any[]) => [Attachments?, KVMap];
/** Extract LLM invocation parameters */
getInvocationParams?: (...args: any[]) => InvocationParamsSchema?;
/** Transform inputs before logging (supports async) */
processInputs?: (inputs: any) => KVMap | Promise<KVMap>;
/** Transform outputs before logging (supports async) */
processOutputs?: (outputs: any) => KVMap | Promise<KVMap>;
/** Custom OpenTelemetry tracer for integration */
tracer?: any; // OTELTracer type
/** Additional arbitrary data beyond metadata */
extra?: Record<string, unknown>;
// Advanced: Inherited from RunTreeConfig (see run-trees.md for full details)
/** Custom run ID (UUID) */
id?: string;
/** Trace UUID for grouping related runs */
trace_id?: string;
/** Dotted order string for distributed tracing */
dotted_order?: string;
/** Parent run UUID */
parent_run_id?: string;
/** Replica configurations for distributed tracing */
replicas?: any[];
/** Parent ID for distributed tracing */
distributedParentId?: string;
/** Custom start time (milliseconds or ISO string) */
start_time?: number | string;
/** Custom end time (milliseconds or ISO string) */
end_time?: number | string;
/** Reference to dataset example UUID */
reference_example_id?: string;
}
interface InvocationParamsSchema {
ls_provider?: string;
ls_model_name?: string;
ls_model_type?: "chat" | "llm";
ls_temperature?: number;
ls_max_tokens?: number;
ls_stop?: string[];
[key: string]: any;
}
type Attachments = Record<string, AttachmentData>;
interface AttachmentData {
mime_type: string;
data: string | Uint8Array;
}
/**
* Type of a traceable function
*/
type TraceableFunction<Func> = Func & {
/** Get run tree for the current traceable function invocation */
getCurrentRunTree(): RunTree;
/** Configure the traceable function */
withConfig(config: Partial<TraceableConfig<Func>>): TraceableFunction<Func>;
};
/**
* Run tree-like interface for context management
*/
interface RunTreeLike {
/** Run ID */
id: string;
/** Run name */
name: string;
/** Run type */
run_type?: string;
/** Trace ID */
trace_id?: string;
/** Dotted order for distributed tracing */
dotted_order?: string;
}import { traceable } from "langsmith/traceable";
// Basic tracing
const myFunction = traceable(
async (input: string) => `Processed: ${input}`,
{ name: "my-function", run_type: "chain" }
);
// With metadata and tags
const pipeline = traceable(
async (query: string, context: string[]) => {
return result;
},
{
name: "rag-pipeline",
run_type: "chain",
metadata: { version: "1.0" },
tags: ["production", "rag"]
}
);
// Transform inputs/outputs
const sanitized = traceable(
async (apiKey: string, data: any) => {
return await callAPI(apiKey, data);
},
{
name: "api-call",
processInputs: (inputs) => ({ data: inputs.data }), // Hide API key
processOutputs: (outputs) => ({ success: true }) // Hide response
}
);
// Using getCurrentRunTree() method on traceable function
const processData = traceable(
async (data: string) => {
// Access run tree from the traceable function itself
const runTree = processData.getCurrentRunTree();
// Add metadata dynamically
runTree.metadata = { ...runTree.metadata, processed: true };
return `Processed: ${data}`;
},
{ name: "process-data", run_type: "tool" }
);
// Using withConfig() to create configured variants
const baseFunction = traceable(
async (input: string) => {
return await processInput(input);
},
{ name: "base-function", run_type: "chain" }
);
// Create production variant with additional metadata
const productionFunction = baseFunction.withConfig({
metadata: { environment: "production" },
tags: ["production", "critical"]
});
// Create development variant with different project
const devFunction = baseFunction.withConfig({
metadata: { environment: "development" },
tags: ["dev"],
project_name: "dev-project"
});
// Use configured variants
await productionFunction("prod data"); // Traced with production config
await devFunction("dev data"); // Traced with dev configConditional Tracing:
import { traceable } from "langsmith/traceable";
const processRequest = traceable(
async (userId: string, data: any) => {
return await handleRequest(userId, data);
},
{
name: "process-request",
// Conditionally disable tracing
tracingEnabled: process.env.NODE_ENV !== "test"
}
);Completion Callbacks:
import { traceable } from "langsmith/traceable";
const monitoredFunction = traceable(
async (input: string) => {
return await processInput(input);
},
{
name: "monitored-function",
// Callback invoked when run completes
on_end: (runTree) => {
console.log(`Run ${runTree.id} completed in ${runTree.end_time - runTree.start_time}ms`);
if (runTree.error) {
console.error(`Error occurred: ${runTree.error}`);
}
}
}
);Async Input/Output Processing:
import { traceable } from "langsmith/traceable";
const secureFunction = traceable(
async (apiKey: string, data: any) => {
return await callExternalAPI(apiKey, data);
},
{
name: "secure-api-call",
// Async processing of inputs
processInputs: async (inputs) => {
const sanitized = await sanitizeData(inputs.data);
return { data: sanitized }; // Hide API key
},
// Async processing of outputs
processOutputs: async (outputs) => {
const masked = await maskSensitiveFields(outputs);
return masked;
}
}
);Extra Data and Advanced Options:
import { traceable } from "langsmith/traceable";
const advancedFunction = traceable(
async (input: string) => {
return await process(input);
},
{
name: "advanced-function",
run_type: "chain",
metadata: { version: "2.0" },
tags: ["production"],
// Additional arbitrary data
extra: {
deployment: "us-east-1",
instance: "i-12345"
},
// Custom trace ID for distributed tracing
trace_id: "custom-trace-uuid",
// Reference to evaluation example
reference_example_id: "example-uuid"
}
);Access and manipulate the current run tree from within traced functions.
/**
* Get the current run tree from async context
* @returns Current RunTree instance
* @throws Error if no run tree is active
*/
function getCurrentRunTree(): RunTree;
/**
* Get the current run tree from async context (optional)
* @param permitAbsentRunTree - If true, returns undefined instead of throwing
* @returns Current RunTree instance or undefined
*/
function getCurrentRunTree(permitAbsentRunTree: true): RunTree | undefined;import { traceable, getCurrentRunTree } from "langsmith/traceable";
const myFunction = traceable(async (input: string) => {
// Access current run tree
const runTree = getCurrentRunTree();
// Add metadata dynamically
runTree.metadata = { ...runTree.metadata, processed: true };
// Create child run manually
const childRun = runTree.createChild({
name: "sub-operation",
run_type: "tool"
});
return result;
});
// Optional access
function helperFunction() {
const runTree = getCurrentRunTree(true);
if (runTree) {
runTree.metadata = { helper: true };
}
}Add metadata dynamically during execution:
import { traceable, getCurrentRunTree } from "langsmith/traceable";
const processWithMetadata = traceable(async (input: string) => {
const runTree = getCurrentRunTree();
// Add metadata as you go
runTree.metadata = {
...runTree.metadata,
startedAt: new Date().toISOString()
};
const result = await performProcessing(input);
// Add more metadata
runTree.metadata = {
...runTree.metadata,
processedItems: result.count,
completedAt: new Date().toISOString()
};
return result;
}, { name: "process-with-metadata" });Errors are automatically captured, but you can add custom error handling:
import { traceable } from "langsmith/traceable";
const robustFunction = traceable(
async (input: string) => {
try {
return await riskyOperation(input);
} catch (error) {
// Error is automatically logged in trace
console.error("Operation failed:", error.message);
// Can add custom error context
throw new Error(`Failed to process "${input}": ${error.message}`);
}
},
{ name: "robust-function", run_type: "tool" }
);Post runs as you complete them to avoid memory buildup:
import { RunTree } from "langsmith";
const parent = new RunTree({ name: "parent", run_type: "chain" });
for (let i = 0; i < 1000; i++) {
const child = parent.createChild({ name: `child-${i}`, run_type: "tool" });
await child.end({ result: i });
await child.postRun(); // Post immediately to free memory
}
await parent.end();
await parent.postRun();Enable or disable tracing based on conditions:
import { traceable } from "langsmith/traceable";
const conditionalTrace = traceable(
async (input: string) => {
// Your logic
return processInput(input);
},
{
name: "conditional-trace",
// Only trace in production
client: process.env.NODE_ENV === "production" ? client : undefined
}
);Redact sensitive data from traces:
import { Client } from "langsmith";
const client = new Client({
hideInputs: true, // Hide all inputs
hideOutputs: false, // Show outputs
});
// Or use functions for selective hiding
const client = new Client({
hideInputs: (inputs) => {
// Remove sensitive fields
const { apiKey, password, ...safe } = inputs;
return safe;
},
hideOutputs: (outputs) => {
// Redact PII
return {
...outputs,
email: "[REDACTED]"
};
}
});Use anonymizer for pattern-based redaction:
import { traceable } from "langsmith/traceable";
import { createAnonymizer } from "langsmith/anonymizer";
const anonymizer = createAnonymizer([
{ pattern: /\b[\w\.-]+@[\w\.-]+\.\w+\b/g, replace: "[EMAIL]" },
{ pattern: /\bsk-[a-zA-Z0-9]{32,}\b/g, replace: "[API_KEY]" },
]);
const privateFunction = traceable(
async (input: { email: string; query: string }) => {
return await processQuery(input);
},
{
name: "private-function",
processInputs: anonymizer,
processOutputs: anonymizer
}
);LangSmith automatically batches trace uploads for performance:
import { Client } from "langsmith";
const client = new Client({
autoBatchTracing: true,
batchSizeBytesLimit: 20_000_000, // 20 MB
traceBatchConcurrency: 5, // Concurrent uploads
});For serverless or short-lived processes:
import { Client } from "langsmith";
const client = new Client();
// Your traced operations
await myTracedFunction();
// Ensure all traces are uploaded before exit
await client.awaitPendingTraceBatches();Block until root runs are finalized (useful for testing):
import { Client } from "langsmith";
const client = new Client({
blockOnRootRunFinalization: true
});Trace only a percentage of requests:
import { Client } from "langsmith";
const client = new Client({
// Trace 10% of requests
tracingSamplingRate: 0.1
});
const sampledFunction = traceable(
async (input: string) => {
return process(input);
},
{ name: "sampled-function", client }
);Propagate trace context across services using headers.
import { RunTree } from "langsmith";
// Service A: Create run and export headers
const runA = new RunTree({
name: "service-a",
run_type: "chain"
});
const headers = runA.toHeaders();
// Send headers to Service B
// Service B: Receive and continue trace
const runB = RunTree.fromHeaders(headers, {
name: "service-b",
run_type: "chain"
});
// Runs are now linked in the same trace
await runB.end({ result: "done" });
await runB.postRun();LangSmith uses AsyncLocalStorage to maintain run tree context across async operations automatically.
const AsyncLocalStorageProviderSingleton: {
getInstance(): AsyncLocalStorage<RunTree | typeof ROOT>;
};The async context is managed automatically when using traceable(). Manual context management is rarely needed but available through withRunTree() and getCurrentRunTree().
import { traceable } from "langsmith/traceable";
const retrieveDocuments = traceable(
async (query: string) => {
const docs = await vectorDB.similaritySearch(query, 5);
return docs;
},
{ name: "retrieve-docs", run_type: "retriever" }
);
const generateAnswer = traceable(
async (query: string, docs: string[]) => {
const context = docs.join("\n");
const answer = await llm.generate({ query, context });
return answer;
},
{ name: "generate-answer", run_type: "llm" }
);
const ragPipeline = traceable(
async (query: string) => {
const docs = await retrieveDocuments(query);
const answer = await generateAnswer(query, docs);
return answer;
},
{ name: "rag-pipeline", run_type: "chain" }
);
await ragPipeline("What is LangSmith?");import { traceable, getCurrentRunTree } from "langsmith/traceable";
const planSteps = traceable(
async (goal: string) => ["Step 1", "Step 2", "Step 3"],
{ name: "plan-steps", run_type: "chain" }
);
const executeStep = traceable(
async (step: string) => `Executed: ${step}`,
{ name: "execute-step", run_type: "tool" }
);
const agent = traceable(
async (goal: string) => {
const runTree = getCurrentRunTree();
// Plan
const steps = await planSteps(goal);
runTree.addEvent({
name: "planning_complete",
time: Date.now(),
kwargs: { num_steps: steps.length }
});
// Execute
const results = [];
for (const step of steps) {
const result = await executeStep(step);
results.push(result);
}
return { goal, steps, results };
},
{ name: "agent", run_type: "chain" }
);
await agent("Analyze data");import { traceable } from "langsmith/traceable";
import express from "express";
const app = express();
const processRequest = traceable(
async (body: any) => {
return await handleRequest(body);
},
{ name: "api-request", run_type: "chain" }
);
app.post("/api/process", async (req, res) => {
try {
const result = await processRequest(req.body);
res.json(result);
} catch (error) {
// Error is captured in trace
res.status(500).json({ error: error.message });
}
});
app.listen(3000);// Good
const summarizeDocument = traceable(fn, { name: "summarize-document" });
const extractEntities = traceable(fn, { name: "extract-entities" });
// Bad
const func1 = traceable(fn, { name: "func1" });
const process = traceable(fn, { name: "process" });// LLM calls
traceable(fn, { run_type: "llm" });
// Chains of operations
traceable(fn, { run_type: "chain" });
// Tool/function calls
traceable(fn, { run_type: "tool" });
// Document retrieval
traceable(fn, { run_type: "retriever" });
// Embedding generation
traceable(fn, { run_type: "embedding" });const myFunction = traceable(
async (input: string) => process(input),
{
name: "my-function",
metadata: {
version: "2.1.0",
environment: process.env.NODE_ENV,
model: "gpt-4",
temperature: 0.7
},
tags: ["production", "critical"]
}
);// Lambda/serverless function
export const handler = async (event) => {
const result = await myTracedFunction(event);
// Ensure traces are uploaded before function ends
await client.awaitPendingTraceBatches();
return result;
};import { RunTree } from "langsmith";
const run = new RunTree({ name: "pipeline", run_type: "chain" });
run.addEvent({ name: "started", time: Date.now() });
const step1 = await processStep1();
run.addEvent({ name: "step1_complete", time: Date.now(), kwargs: { items: step1.length } });
const step2 = await processStep2();
run.addEvent({ name: "step2_complete", time: Date.now(), kwargs: { items: step2.length } });
await run.end({ result: step2 });
await run.postRun();