tessl install tessl/npm-langsmith@0.4.3TypeScript client SDK for the LangSmith LLM tracing, evaluation, and monitoring platform.
Manual trace creation with hierarchical run trees for fine-grained control over tracing.
RunTree provides a programmatic API for creating hierarchical traces manually. While traceable() is recommended for most use cases, RunTree is essential when you need explicit control over trace creation.
When to use RunTree:
import { RunTree } from "langsmith";
import { isRunTree, convertToDottedOrderFormat, ROOT, withRunTree, isTraceableFunction } from "langsmith";/**
* ROOT constant for parent_run configuration
* Use this to indicate a run should be a root run
*/
const ROOT: unique symbol;
/**
* Async local storage provider singleton for context management
* Internal utility for managing trace context across async operations
*/
const AsyncLocalStorageProviderSingleton: {
getInstance(): AsyncLocalStorage<RunTree | typeof ROOT>;
};/**
* Hierarchical run tree for manual tracing
*/
class RunTree {
/** Unique run ID */
id: string;
/** Run name */
name: string;
/** Run type */
run_type?: string;
/** Input data */
inputs: KVMap;
/** Output data */
outputs?: KVMap;
/** Child runs */
child_runs: RunTree[];
/**
* Create new run tree
* @param config - Run tree configuration
*/
constructor(config: RunTreeConfig);
/**
* Create child run
* @param config - Partial configuration
* @returns Child RunTree instance
*/
createChild(config: Partial<RunTreeConfig>): RunTree;
/**
* End the run
* @param outputs - Output data
* @param error - Error message if failed
* @param endTime - End timestamp (defaults to Date.now())
* @param metadata - Additional metadata to merge
*/
end(
outputs?: KVMap,
error?: string,
endTime?: number,
metadata?: KVMap
): Promise<void>;
/**
* Post run to LangSmith API
* @param excludeChildRuns - If true, only post this run (not children)
*/
postRun(excludeChildRuns?: boolean): Promise<void>;
/**
* Update run via PATCH request
*/
patchRun(): Promise<void>;
/**
* Convert to JSON representation
*/
toJSON(): object;
/**
* Create from LangChain runnable config
* @param parentConfig - Parent runnable config
* @param props - Additional run tree properties
* @returns RunTree instance
*/
static fromRunnableConfig(
parentConfig: RunnableConfigLike,
props: Partial<RunTreeConfig>
): RunTree;
/**
* Create from dotted order string
* @param dottedOrder - Dotted order format string
* @returns RunTree instance
*/
static fromDottedOrder(dottedOrder: string): RunTree;
/**
* Add event to run
* @param event - Run event
*/
addEvent(event: RunEvent): void;
/**
* Convert to headers for distributed tracing
* @param headers - Optional HeadersLike object to set headers on (must have get/set methods)
* @returns Object with langsmith-trace and baggage headers
*/
toHeaders(headers?: HeadersLike): {
"langsmith-trace": string;
baggage: string;
};
/**
* Create from trace headers
* Note: Pass headers directly as the first argument, NOT wrapped in an object.
* @param headers - HTTP headers with trace context (Record or HeadersLike) - pass directly
* @param inheritArgs - Additional configuration (optional second parameter)
* @returns RunTree instance or undefined if headers don't contain trace context
*
* Example usage:
* const run = RunTree.fromHeaders(request.headers, { name: "my-run", run_type: "chain" });
* // NOT: RunTree.fromHeaders({ headers: request.headers, config: {...} })
*/
static fromHeaders(
headers: Record<string, string | string[]> | HeadersLike,
inheritArgs?: Partial<RunTreeConfig>
): RunTree | undefined;
}
/**
* Interface for objects with get/set header methods
* Compatible with Fetch API Headers and similar implementations
*/
interface HeadersLike {
get(name: string): string | null;
set(name: string, value: string): void;
}
interface RunTreeConfig {
/** Run name (required) */
name: string;
/** Run type */
run_type?: string;
/** Run ID (auto-generated if not provided) */
id?: string;
/** Project name */
project_name?: string;
/** Parent run */
parent_run?: RunTree | typeof ROOT;
/** Parent run ID */
parent_run_id?: string;
/** Child runs */
child_runs?: RunTree[];
/** Client instance */
client?: Client;
/** Start timestamp in milliseconds or ISO string */
start_time?: number | string;
/** End timestamp in milliseconds or ISO string */
end_time?: number | string;
/** Extra metadata */
extra?: KVMap;
/** Metadata */
metadata?: KVMap;
/** Tags */
tags?: string[];
/** Error message */
error?: string;
/** Input data */
inputs?: KVMap;
/** Output data */
outputs?: KVMap;
/** Reference example ID for evaluation */
reference_example_id?: string;
/** Serialized representation */
serialized?: object;
/** Whether tracing is enabled for this run */
tracingEnabled?: boolean;
/** End callback */
on_end?: (runTree: RunTree) => void;
/** Execution order */
execution_order?: number;
/** Child execution order */
child_execution_order?: number;
/** Dotted order for distributed tracing */
dotted_order?: string;
/** Trace ID */
trace_id?: string;
/** Attachments */
attachments?: Attachments;
/** Replicas for distributed runs */
replicas?: Replica[];
/** Distributed parent ID */
distributedParentId?: string;
/** LLM invocation parameters */
invocation_params?: InvocationParamsSchema;
}
type KVMap = Record<string, any>;
type Attachments = Record<string, AttachmentData>;
interface AttachmentData {
mime_type: string;
data: string | Uint8Array;
}
type Replica = ProjectReplica | WriteReplica;
type ProjectReplica = [string, KVMap | undefined];
interface WriteReplica {
apiUrl?: string;
apiKey?: string;
workspaceId?: string;
projectName?: string;
updates?: KVMap | undefined;
fromEnv?: boolean;
reroot?: boolean;
}
/**
* LLM invocation parameters - captures model configuration used for a run
* Helps track and reproduce LLM calls with specific settings
* Provider-specific fields can be added as additional properties
*/
interface InvocationParamsSchema {
/** Model identifier (e.g., "gpt-4", "claude-3-opus-20240229") */
model?: string;
/** Temperature for randomness (typically 0.0-2.0, where 0 is deterministic) */
temperature?: number;
/** Maximum tokens to generate in response */
max_tokens?: number;
/** Top-p nucleus sampling parameter (0.0-1.0) */
top_p?: number;
/** Top-k sampling parameter (number of top tokens to consider) */
top_k?: number;
/** Stop sequences that halt generation when encountered */
stop?: string[];
/** Presence penalty to reduce repetition (-2.0 to 2.0) */
presence_penalty?: number;
/** Frequency penalty to reduce repetition (-2.0 to 2.0) */
frequency_penalty?: number;
/** Token-level bias adjustments (token ID -> bias value) */
logit_bias?: Record<string, number>;
/** Additional provider-specific parameters */
[key: string]: any;
}import { RunTree } from "langsmith";
// Create root run
const parentRun = new RunTree({
name: "parent-operation",
run_type: "chain",
inputs: { query: "What is AI?" }
});
// Create child run
const llmRun = parentRun.createChild({
name: "llm-call",
run_type: "llm",
inputs: { prompt: "What is AI?" }
});
await llmRun.end({ response: "AI is..." });
await llmRun.postRun();
// End parent run
await parentRun.end({ result: "Complete" });
await parentRun.postRun();Add timestamped events to runs.
interface RunEvent {
/** Event name */
name?: string;
/** Event timestamp (ISO 8601 string) */
time?: string;
/** Event message */
message?: string;
/** Event metadata */
kwargs?: Record<string, unknown>;
/** Additional fields */
[key: string]: unknown;
}import { RunTree } from "langsmith";
const run = new RunTree({
name: "complex-operation",
run_type: "chain"
});
run.addEvent({
name: "retrieval_started",
time: new Date().toISOString(),
kwargs: { query: "search term" }
});
const docs = await retrieveDocuments();
run.addEvent({
name: "retrieval_completed",
time: new Date().toISOString(),
kwargs: { num_docs: docs.length }
});
await run.end({ result: docs });
await run.postRun();import { RunTree } from "langsmith";
// Service A: Create run and export headers
const runA = new RunTree({
name: "service-a",
run_type: "chain"
});
// Get trace headers as object
const traceHeaders = runA.toHeaders();
// Returns: { "langsmith-trace": "...", "baggage": "..." }
// Send headers to Service B
// Service B: Receive and continue trace
const runB = RunTree.fromHeaders(traceHeaders, {
name: "service-b",
run_type: "chain"
});
// Check if trace context was found
if (runB) {
await runB.end({ result: "done" });
await runB.postRun();
}Detailed example of distributed tracing across multiple services.
import { RunTree } from "langsmith";
import express from "express";
import axios from "axios";
// Service A: API Gateway
const app = express();
app.post("/process", async (req, res) => {
const run = new RunTree({
name: "api-gateway",
run_type: "chain",
inputs: req.body
});
try {
// Call Service B with trace context
const response = await axios.post(
"http://service-b/process",
req.body,
{
headers: run.toHeaders()
}
);
await run.end({ result: response.data });
await run.postRun();
res.json(response.data);
} catch (error) {
await run.end(undefined, error.message);
await run.postRun();
res.status(500).json({ error: error.message });
}
});
// Service B: Processing Service
const serviceB = express();
serviceB.post("/process", async (req, res) => {
// Continue trace from headers
const parentRun = RunTree.fromHeaders(req.headers);
const run = parentRun
? parentRun.createChild({
name: "processing-service",
run_type: "chain",
inputs: req.body
})
: new RunTree({
name: "processing-service",
run_type: "chain",
inputs: req.body
});
try {
const result = await processData(req.body);
await run.end({ result });
await run.postRun();
res.json(result);
} catch (error) {
await run.end(undefined, error.message);
await run.postRun();
res.status(500).json({ error: error.message });
}
});Using dotted order for distributed tracing.
/**
* Convert to dotted order format for distributed tracing
* @param epoch - Timestamp in milliseconds
* @param runId - Run ID
* @param executionOrder - Execution order number
* @returns Dotted order string
*/
function convertToDottedOrderFormat(
epoch: number,
runId: string,
executionOrder: number
): string;import { RunTree, convertToDottedOrderFormat } from "langsmith";
// Create run with dotted order
const epoch = Date.now();
const runId = "run-123";
const executionOrder = 1;
const dottedOrder = convertToDottedOrderFormat(epoch, runId, executionOrder);
console.log("Dotted order:", dottedOrder);
// Create RunTree from dotted order
const run = RunTree.fromDottedOrder(dottedOrder);/**
* Execute function with explicit run tree context
* @param runTree - Run tree to use as context
* @param fn - Function to execute
* @returns Function result
*/
function withRunTree<T>(runTree: RunTree, fn: () => T | Promise<T>): Promise<T>;import { withRunTree, RunTree } from "langsmith";
const parentRun = new RunTree({
name: "parent",
run_type: "chain"
});
// Execute function with specific run tree context
await withRunTree(parentRun, async () => {
// This code runs with parentRun as the current context
const result = await someOperation();
return result;
});Creating complex hierarchical trace structures.
import { RunTree } from "langsmith";
// Create a complex hierarchical trace
const pipeline = new RunTree({
name: "rag-pipeline",
run_type: "chain",
inputs: { question: "What is quantum computing?" }
});
// Query understanding
const understanding = pipeline.createChild({
name: "query-understanding",
run_type: "chain"
});
const expansion = understanding.createChild({
name: "query-expansion",
run_type: "llm"
});
await expansion.end({ expanded: ["quantum computing", "qubits", "superposition"] });
await expansion.postRun();
await understanding.end({ processed: true });
await understanding.postRun();
// Retrieval
const retrieval = pipeline.createChild({
name: "document-retrieval",
run_type: "retriever"
});
const search = retrieval.createChild({
name: "vector-search",
run_type: "tool"
});
await search.end({ documents: ["doc1", "doc2", "doc3"] });
await search.postRun();
const reranking = retrieval.createChild({
name: "reranking",
run_type: "tool"
});
await reranking.end({ rankedDocs: ["doc2", "doc1"] });
await reranking.postRun();
await retrieval.end({ finalDocs: ["doc2", "doc1"] });
await retrieval.postRun();
// Generation
const generation = pipeline.createChild({
name: "answer-generation",
run_type: "llm"
});
generation.addEvent({
name: "context_prepared",
time: Date.now(),
kwargs: { docCount: 2 }
});
await generation.end({ answer: "Quantum computing is..." });
await generation.postRun();
// Complete pipeline
await pipeline.end({ answer: "Quantum computing is..." });
await pipeline.postRun();Using metadata and attachments for rich traces.
import { RunTree } from "langsmith";
const run = new RunTree({
name: "analysis",
run_type: "chain",
inputs: { data: "..." },
metadata: {
userId: "user-123",
environment: "production",
version: "1.2.0"
},
tags: ["analysis", "production"],
attachments: {
"input.pdf": {
mime_type: "application/pdf",
data: pdfBuffer
}
}
});
// Add more metadata during execution
await run.end(
{ result: "..." },
undefined,
undefined,
{
duration_ms: 1234,
tokens_used: 500
}
);
await run.postRun();Send runs to multiple LangSmith projects.
import { RunTree } from "langsmith";
const run = new RunTree({
name: "multi-project-run",
run_type: "chain",
inputs: { query: "test" },
project_name: "main-project",
replicas: [
// Send to another project
["backup-project", { environment: "staging" }],
// Send to different workspace
{
apiUrl: "https://api.smith.langchain.com",
apiKey: "backup-key",
projectName: "analytics-project",
updates: { team: "analytics" }
}
]
});
await run.end({ result: "done" });
await run.postRun();
// Run is logged to main-project, backup-project, and analytics-projectTracking model parameters in traces.
import { RunTree } from "langsmith";
const llmRun = new RunTree({
name: "llm-call",
run_type: "llm",
inputs: { prompt: "Generate text" },
invocation_params: {
model: "gpt-4",
temperature: 0.7,
max_tokens: 1000,
top_p: 0.9,
top_k: 50,
stop: ["\n\n", "END"],
frequency_penalty: 0,
presence_penalty: 0,
logit_bias: { "50256": -100 } // Bias against specific token
}
});
await llmRun.end({ completion: "Generated text..." });
await llmRun.postRun();Execute custom logic when runs complete.
import { RunTree } from "langsmith";
const run = new RunTree({
name: "monitored-operation",
run_type: "chain",
inputs: { data: "..." },
on_end: (runTree) => {
const duration = runTree.end_time - runTree.start_time;
console.log(`Run ${runTree.name} completed in ${duration}ms`);
// Send to monitoring system
sendMetric({
name: runTree.name,
duration,
status: runTree.error ? "error" : "success"
});
}
});
await run.end({ result: "done" });
await run.postRun();Link runs to dataset examples for evaluation.
import { RunTree } from "langsmith";
const evaluationRun = new RunTree({
name: "evaluation-run",
run_type: "chain",
inputs: { query: "What is AI?" },
reference_example_id: "example-uuid-123",
metadata: {
evaluation: true,
dataset: "qa-dataset"
}
});
await evaluationRun.end({ answer: "AI is..." });
await evaluationRun.postRun();/**
* Type guard for RunTree
* @param x - Value to check
* @returns True if value is a RunTree
*/
function isRunTree(x: any): x is RunTree;
/**
* Type guard for traceable function
* @param x - Value to check
* @returns True if value is a traceable function
*/
function isTraceableFunction(x: any): x is TraceableFunction<any>;import { isRunTree, RunTree } from "langsmith";
function logRun(run: any) {
if (isRunTree(run)) {
console.log(`Run: ${run.name} (${run.id})`);
}
}/**
* Type guard for LangChain runnable config
* @param x - Value to check
* @returns True if value is a runnable config
*/
function isRunnableConfigLike(x: any): x is RunnableConfigLike;
interface RunnableConfigLike {
callbacks?: any;
tags?: string[];
metadata?: KVMap;
run_name?: string;
[key: string]: any;
}import { RunTree, isRunnableConfigLike } from "langsmith";
// From LangChain config
const langchainConfig = {
callbacks: callbackManager,
tags: ["langchain"],
metadata: { version: "1.0" }
};
if (isRunnableConfigLike(langchainConfig)) {
const runTree = RunTree.fromRunnableConfig(langchainConfig, {
name: "my-chain",
run_type: "chain"
});
}const run = new RunTree({
name: "operation",
run_type: "chain"
});
try {
const result = await riskyOperation();
await run.end({ result });
} catch (error) {
await run.end(undefined, error.message);
throw error;
} finally {
await run.postRun();
}traceable() for automatic tracing (recommended)RunTree for:
// Post runs immediately to avoid memory buildup
const parent = new RunTree({ name: "parent", run_type: "chain" });
for (let i = 0; i < 1000; i++) {
const child = parent.createChild({ name: `child-${i}` });
await child.end({ result: i });
await child.postRun(); // Post immediately
}
await parent.end();
await parent.postRun();import { RunTree } from "langsmith";
const pipeline = new RunTree({
name: "data-pipeline",
run_type: "chain"
});
try {
const step1 = pipeline.createChild({
name: "step-1",
run_type: "tool"
});
try {
const result1 = await executeStep1();
await step1.end({ result: result1 });
} catch (error) {
await step1.end(undefined, error.message);
throw error;
} finally {
await step1.postRun();
}
// Continue with step 2...
await pipeline.end({ success: true });
} catch (error) {
await pipeline.end(undefined, error.message);
} finally {
await pipeline.postRun();
}