LangSmith provides comprehensive OpenTelemetry (OTEL) integration for automatic instrumentation and tracing of applications. The OpenTelemetry integration allows you to capture traces using standard OTEL instrumentation and export them to LangSmith for observability and analysis.
langsmith/experimental/otel/setup and langsmith/experimental/otel/exporternpm install langsmithimport { initializeOTEL } from "langsmith/experimental/otel/setup";
import { LangSmithOTLPTraceExporter } from "langsmith/experimental/otel/exporter";
import {
GEN_AI_OPERATION_NAME,
GEN_AI_REQUEST_MODEL,
GEN_AI_REQUEST_TEMPERATURE,
GEN_AI_REQUEST_TOP_P,
GEN_AI_REQUEST_MAX_TOKENS,
GEN_AI_RESPONSE_ID,
GEN_AI_RESPONSE_MODEL,
GEN_AI_RESPONSE_FINISH_REASONS,
GEN_AI_USAGE_INPUT_TOKENS,
GEN_AI_USAGE_OUTPUT_TOKENS,
} from "langsmith/experimental/otel/exporter";For CommonJS:
const { initializeOTEL } = require("langsmith/experimental/otel/setup");
const { LangSmithOTLPTraceExporter } = require("langsmith/experimental/otel/exporter");import { initializeOTEL } from "langsmith/experimental/otel/setup";
// Initialize OpenTelemetry with LangSmith exporter
initializeOTEL({
projectName: "my-otel-project",
});
// Your application code with automatic tracing
// All instrumented operations will be captured and sent to LangSmithimport { LangSmithOTLPTraceExporter } from "langsmith/experimental/otel/exporter";
import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
import { SimpleSpanProcessor } from "@opentelemetry/sdk-trace-base";
// Create LangSmith OTLP exporter
const exporter = new LangSmithOTLPTraceExporter();
// Set up trace provider
const provider = new NodeTracerProvider();
provider.addSpanProcessor(new SimpleSpanProcessor(exporter));
provider.register();The LangSmith OpenTelemetry integration consists of several key components:
initializeOTEL() function provides one-line setup for OpenTelemetry with LangSmithLangSmithOTLPTraceExporter extends standard OTLP exporter with LangSmith-specific configurationGEN_AI_* constants provide standard attribute names for LLM instrumentationInitialize OpenTelemetry instrumentation with LangSmith exporter in a single function call. Handles trace provider setup, exporter configuration, and instrumentation registration automatically.
/**
* Initialize OpenTelemetry instrumentation with LangSmith exporter
* @param config - Optional configuration for OTEL initialization
*/
function initializeOTEL(config?: InitializeOTELConfig): void;
interface InitializeOTELConfig {
/** Client instance for LangSmith API */
client?: Client;
/** Project name for traces */
projectName?: string;
/** Additional OpenTelemetry instrumentations to register */
instrumentations?: Instrumentation[];
/** Text map propagator for distributed tracing */
propagator?: TextMapPropagator;
/** Context manager for managing trace context */
contextManager?: ContextManager;
}Usage Examples:
import { initializeOTEL } from "langsmith/experimental/otel/setup";
import { Client } from "langsmith";
// Basic initialization with project name
initializeOTEL({
projectName: "my-application",
});
// Advanced initialization with custom client
const client = new Client({
apiUrl: "https://api.smith.langchain.com",
apiKey: process.env.LANGSMITH_API_KEY,
});
initializeOTEL({
client: client,
projectName: "production-traces",
});
// With additional instrumentations
import { HttpInstrumentation } from "@opentelemetry/instrumentation-http";
import { ExpressInstrumentation } from "@opentelemetry/instrumentation-express";
initializeOTEL({
projectName: "web-service",
instrumentations: [
new HttpInstrumentation(),
new ExpressInstrumentation(),
],
});
// With custom propagator for distributed tracing
import { W3CTraceContextPropagator } from "@opentelemetry/core";
initializeOTEL({
projectName: "distributed-app",
propagator: new W3CTraceContextPropagator(),
});
// With custom context manager
import { AsyncLocalStorageContextManager } from "@opentelemetry/context-async-hooks";
initializeOTEL({
projectName: "my-app",
contextManager: new AsyncLocalStorageContextManager(),
});OpenTelemetry OTLP trace exporter configured for LangSmith. Extends the standard OTLP exporter with LangSmith-specific endpoint configuration and authentication.
/**
* LangSmith OTLP trace exporter for OpenTelemetry
* Extends OTLPTraceExporter with LangSmith configuration
*/
class LangSmithOTLPTraceExporter extends OTLPTraceExporter {
/**
* Create a new LangSmith OTLP trace exporter
* @param config - Optional exporter configuration
*/
constructor(config?: LangSmithOTLPTraceExporterConfig);
}
type LangSmithOTLPTraceExporterConfig = OTLPExporterConfigBase & {
// Inherits all standard OTLP exporter configuration options
// Automatically configured with LangSmith endpoints and authentication
};Usage Examples:
import { LangSmithOTLPTraceExporter } from "langsmith/experimental/otel/exporter";
import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
import { BatchSpanProcessor } from "@opentelemetry/sdk-trace-base";
// Create exporter with default configuration
const exporter = new LangSmithOTLPTraceExporter();
// Set up trace provider with batch processing
const provider = new NodeTracerProvider();
provider.addSpanProcessor(new BatchSpanProcessor(exporter));
provider.register();
// Manual tracing example
import { trace } from "@opentelemetry/api";
const tracer = trace.getTracer("my-app");
const span = tracer.startSpan("my-operation");
span.setAttribute("operation.type", "data-processing");
span.end();
// Using with different span processors
import { SimpleSpanProcessor } from "@opentelemetry/sdk-trace-base";
// Simple processor for immediate export (development)
const devProvider = new NodeTracerProvider();
devProvider.addSpanProcessor(new SimpleSpanProcessor(exporter));
devProvider.register();
// Batch processor for production
const prodProvider = new NodeTracerProvider();
prodProvider.addSpanProcessor(
new BatchSpanProcessor(exporter, {
maxQueueSize: 2048,
maxExportBatchSize: 512,
scheduledDelayMillis: 5000,
})
);
prodProvider.register();Standard OpenTelemetry semantic convention constants for LLM instrumentation. These constants provide consistent attribute naming for generative AI operations.
/**
* Semantic convention constants for LLM/GenAI instrumentation
* Use these constants as span attribute keys for consistent instrumentation
*/
/** Operation name attribute - e.g., "chat", "completion", "embedding" */
const GEN_AI_OPERATION_NAME: string;
/** Model name used in the request - e.g., "gpt-4", "claude-3-opus" */
const GEN_AI_REQUEST_MODEL: string;
/** Temperature parameter for generation randomness (0.0 to 2.0) */
const GEN_AI_REQUEST_TEMPERATURE: string;
/** Top-p parameter for nucleus sampling (0.0 to 1.0) */
const GEN_AI_REQUEST_TOP_P: string;
/** Maximum tokens to generate */
const GEN_AI_REQUEST_MAX_TOKENS: string;
/** Unique identifier for the response */
const GEN_AI_RESPONSE_ID: string;
/** Model name returned in the response */
const GEN_AI_RESPONSE_MODEL: string;
/** Finish reasons for the generation - e.g., ["stop", "length"] */
const GEN_AI_RESPONSE_FINISH_REASONS: string;
/** Number of tokens in the input/prompt */
const GEN_AI_USAGE_INPUT_TOKENS: string;
/** Number of tokens in the output/completion */
const GEN_AI_USAGE_OUTPUT_TOKENS: string;Usage Examples:
import { trace } from "@opentelemetry/api";
import {
GEN_AI_OPERATION_NAME,
GEN_AI_REQUEST_MODEL,
GEN_AI_REQUEST_TEMPERATURE,
GEN_AI_REQUEST_MAX_TOKENS,
GEN_AI_RESPONSE_ID,
GEN_AI_RESPONSE_FINISH_REASONS,
GEN_AI_USAGE_INPUT_TOKENS,
GEN_AI_USAGE_OUTPUT_TOKENS,
} from "langsmith/experimental/otel/exporter";
const tracer = trace.getTracer("llm-app");
// Instrumenting an LLM call
async function callLLM(prompt: string) {
const span = tracer.startSpan("llm.chat");
// Set request attributes
span.setAttribute(GEN_AI_OPERATION_NAME, "chat");
span.setAttribute(GEN_AI_REQUEST_MODEL, "gpt-4");
span.setAttribute(GEN_AI_REQUEST_TEMPERATURE, 0.7);
span.setAttribute(GEN_AI_REQUEST_MAX_TOKENS, 1000);
try {
// Make your LLM call
const response = await someLLMAPI.chat({
model: "gpt-4",
prompt: prompt,
temperature: 0.7,
max_tokens: 1000,
});
// Set response attributes
span.setAttribute(GEN_AI_RESPONSE_ID, response.id);
span.setAttribute(GEN_AI_RESPONSE_FINISH_REASONS, JSON.stringify([response.finish_reason]));
span.setAttribute(GEN_AI_USAGE_INPUT_TOKENS, response.usage.prompt_tokens);
span.setAttribute(GEN_AI_USAGE_OUTPUT_TOKENS, response.usage.completion_tokens);
span.setStatus({ code: 1 }); // OK
return response;
} catch (error) {
span.recordException(error);
span.setStatus({ code: 2, message: error.message }); // ERROR
throw error;
} finally {
span.end();
}
}
// Instrumenting an embedding operation
async function createEmbedding(text: string) {
const span = tracer.startSpan("llm.embedding");
span.setAttribute(GEN_AI_OPERATION_NAME, "embedding");
span.setAttribute(GEN_AI_REQUEST_MODEL, "text-embedding-ada-002");
try {
const response = await embeddingAPI.create({
input: text,
model: "text-embedding-ada-002",
});
span.setAttribute(GEN_AI_USAGE_INPUT_TOKENS, response.usage.prompt_tokens);
span.setStatus({ code: 1 });
return response;
} catch (error) {
span.recordException(error);
span.setStatus({ code: 2, message: error.message });
throw error;
} finally {
span.end();
}
}
// Instrumenting a complex LLM workflow
async function chatWithRetrieval(query: string) {
const span = tracer.startSpan("chat.with_retrieval");
try {
// Retrieval step
const retrievalSpan = tracer.startSpan("retrieval", {
parent: span.spanContext(),
});
const docs = await retrieveDocuments(query);
retrievalSpan.setAttribute("retrieval.document_count", docs.length);
retrievalSpan.end();
// LLM call step
const llmSpan = tracer.startSpan("llm.chat", {
parent: span.spanContext(),
});
llmSpan.setAttribute(GEN_AI_OPERATION_NAME, "chat");
llmSpan.setAttribute(GEN_AI_REQUEST_MODEL, "claude-3-opus-20240229");
llmSpan.setAttribute(GEN_AI_REQUEST_TEMPERATURE, 0.3);
const response = await llmAPI.chat({
model: "claude-3-opus-20240229",
messages: [
{ role: "user", content: query },
{ role: "assistant", content: docs.join("\n") },
],
});
llmSpan.setAttribute(GEN_AI_USAGE_INPUT_TOKENS, response.usage.input_tokens);
llmSpan.setAttribute(GEN_AI_USAGE_OUTPUT_TOKENS, response.usage.output_tokens);
llmSpan.setAttribute(GEN_AI_RESPONSE_ID, response.id);
llmSpan.end();
span.setStatus({ code: 1 });
return response;
} catch (error) {
span.recordException(error);
span.setStatus({ code: 2, message: error.message });
throw error;
} finally {
span.end();
}
}import express from "express";
import { initializeOTEL } from "langsmith/experimental/otel/setup";
import { HttpInstrumentation } from "@opentelemetry/instrumentation-http";
import { ExpressInstrumentation } from "@opentelemetry/instrumentation-express";
// Initialize OpenTelemetry before importing instrumented modules
initializeOTEL({
projectName: "express-app",
instrumentations: [
new HttpInstrumentation(),
new ExpressInstrumentation(),
],
});
const app = express();
app.use(express.json());
app.get("/api/chat", async (req, res) => {
// Automatically traced by OpenTelemetry
const result = await processChat(req.query.message);
res.json(result);
});
app.post("/api/analyze", async (req, res) => {
// HTTP and Express instrumentations capture request/response
const analysis = await analyzeText(req.body.text);
res.json(analysis);
});
app.listen(3000, () => {
console.log("Server running with OpenTelemetry tracing");
});
async function processChat(message: any) {
// Implementation
return { response: "Processed" };
}
async function analyzeText(text: string) {
// Implementation
return { sentiment: "positive" };
}import { initializeOTEL } from "langsmith/experimental/otel/setup";
import { trace, context } from "@opentelemetry/api";
import {
GEN_AI_OPERATION_NAME,
GEN_AI_REQUEST_MODEL,
GEN_AI_USAGE_INPUT_TOKENS,
GEN_AI_USAGE_OUTPUT_TOKENS,
} from "langsmith/experimental/otel/exporter";
// Initialize OTEL
initializeOTEL({ projectName: "llm-pipeline" });
const tracer = trace.getTracer("my-app");
async function processUserQuery(query: string) {
return await tracer.startActiveSpan("process_query", async (span) => {
try {
// Step 1: Embed query
const embedding = await tracer.startActiveSpan(
"embed_query",
async (embedSpan) => {
embedSpan.setAttribute(GEN_AI_OPERATION_NAME, "embedding");
embedSpan.setAttribute(GEN_AI_REQUEST_MODEL, "text-embedding-ada-002");
const result = await createEmbedding(query);
embedSpan.setAttribute(GEN_AI_USAGE_INPUT_TOKENS, result.usage.prompt_tokens);
embedSpan.end();
return result;
}
);
// Step 2: Search documents
const docs = await tracer.startActiveSpan(
"search_documents",
async (searchSpan) => {
const results = await vectorDB.search(embedding.vector);
searchSpan.setAttribute("search.results_count", results.length);
searchSpan.setAttribute("search.query_type", "similarity");
searchSpan.end();
return results;
}
);
// Step 3: Generate response
const response = await tracer.startActiveSpan(
"generate_response",
async (genSpan) => {
genSpan.setAttribute(GEN_AI_OPERATION_NAME, "chat");
genSpan.setAttribute(GEN_AI_REQUEST_MODEL, "gpt-4");
genSpan.setAttribute(GEN_AI_REQUEST_TEMPERATURE, 0.7);
genSpan.setAttribute(GEN_AI_REQUEST_MAX_TOKENS, 500);
const answer = await llm.chat({
prompt: query,
context: docs,
});
genSpan.setAttribute(GEN_AI_USAGE_INPUT_TOKENS, answer.usage.input);
genSpan.setAttribute(GEN_AI_USAGE_OUTPUT_TOKENS, answer.usage.output);
genSpan.setAttribute(GEN_AI_RESPONSE_ID, answer.id);
genSpan.end();
return answer;
}
);
span.setAttribute("pipeline.total_docs_retrieved", docs.length);
span.setStatus({ code: 1 });
return response;
} catch (error) {
span.recordException(error);
span.setStatus({ code: 2, message: error.message });
throw error;
} finally {
span.end();
}
});
}
// Mock implementations for example
const vectorDB = {
async search(vector: any) {
return ["doc1", "doc2"];
}
};
const llm = {
async chat(params: any) {
return {
id: "response-123",
text: "Generated response",
usage: { input: 100, output: 50 },
};
}
};
async function createEmbedding(text: string) {
return {
vector: [0.1, 0.2, 0.3],
usage: { prompt_tokens: 10 },
};
}// Service A (API Gateway)
import { initializeOTEL } from "langsmith/experimental/otel/setup";
import { W3CTraceContextPropagator } from "@opentelemetry/core";
import { trace, propagation, context } from "@opentelemetry/api";
initializeOTEL({
projectName: "api-gateway",
propagator: new W3CTraceContextPropagator(),
});
const tracer = trace.getTracer("api-gateway");
async function handleRequest(req: any, res: any) {
await tracer.startActiveSpan("handle_request", async (span) => {
// Extract context from incoming headers
const ctx = propagation.extract(context.active(), req.headers);
// Inject context into outgoing request
const headers: Record<string, string> = {};
propagation.inject(ctx, headers);
// Add request metadata
span.setAttribute("http.method", req.method);
span.setAttribute("http.route", req.path);
span.setAttribute("http.user_agent", req.headers["user-agent"]);
// Call downstream service with trace context
const response = await fetch("http://llm-service/process", {
method: "POST",
headers: {
...headers,
"Content-Type": "application/json",
},
body: JSON.stringify(req.body),
});
const data = await response.json();
span.setAttribute("http.status_code", response.status);
span.end();
return data;
});
}
// Service B (LLM Service)
import { initializeOTEL } from "langsmith/experimental/otel/setup";
import { W3CTraceContextPropagator } from "@opentelemetry/core";
import { trace, propagation, context } from "@opentelemetry/api";
import {
GEN_AI_OPERATION_NAME,
GEN_AI_REQUEST_MODEL,
GEN_AI_USAGE_INPUT_TOKENS,
GEN_AI_USAGE_OUTPUT_TOKENS,
} from "langsmith/experimental/otel/exporter";
initializeOTEL({
projectName: "llm-service",
propagator: new W3CTraceContextPropagator(),
});
const tracer = trace.getTracer("llm-service");
async function processLLMRequest(req: any, res: any) {
// Extract parent trace context from headers
const parentCtx = propagation.extract(context.active(), req.headers);
// Create span as child of parent context
await context.with(parentCtx, async () => {
await tracer.startActiveSpan("llm_processing", async (span) => {
span.setAttribute(GEN_AI_OPERATION_NAME, "chat");
span.setAttribute(GEN_AI_REQUEST_MODEL, "gpt-4");
span.setAttribute("service.name", "llm-service");
const result = await callLLM(req.body.prompt);
span.setAttribute(GEN_AI_USAGE_INPUT_TOKENS, result.usage.input_tokens);
span.setAttribute(GEN_AI_USAGE_OUTPUT_TOKENS, result.usage.output_tokens);
span.end();
res.json(result);
});
});
}
async function callLLM(prompt: string) {
return {
text: "Response",
usage: { input_tokens: 50, output_tokens: 100 },
};
}import { initializeOTEL } from "langsmith/experimental/otel/setup";
import { Client } from "langsmith";
import { HttpInstrumentation } from "@opentelemetry/instrumentation-http";
import { ExpressInstrumentation } from "@opentelemetry/instrumentation-express";
import { MongoDBInstrumentation } from "@opentelemetry/instrumentation-mongodb";
import { RedisInstrumentation } from "@opentelemetry/instrumentation-redis-4";
// Create custom client with configuration
const client = new Client({
apiKey: process.env.LANGSMITH_API_KEY,
apiUrl: process.env.LANGSMITH_API_URL,
});
// Initialize with multiple instrumentations
initializeOTEL({
client: client,
projectName: "full-stack-app",
instrumentations: [
// HTTP instrumentation for outgoing requests
new HttpInstrumentation({
requestHook: (span, request) => {
span.setAttribute("http.request.method", request.method);
span.setAttribute("http.request.url", request.url);
},
responseHook: (span, response) => {
span.setAttribute("http.response.status_code", response.statusCode);
},
}),
// Express instrumentation for API routes
new ExpressInstrumentation({
requestHook: (span, requestInfo) => {
span.setAttribute("express.route", requestInfo.route);
span.setAttribute("express.type", requestInfo.request.method);
},
}),
// Database instrumentation
new MongoDBInstrumentation({
enhancedDatabaseReporting: true,
}),
// Cache instrumentation
new RedisInstrumentation({
requireParentSpan: true,
}),
],
});
// Your application will now automatically trace:
// - HTTP requests and responses
// - Express routes and middleware
// - MongoDB queries
// - Redis operationsThe OpenTelemetry integration works seamlessly with other LangSmith features:
import { initializeOTEL } from "langsmith/experimental/otel/setup";
import { traceable } from "langsmith/traceable";
import { trace } from "@opentelemetry/api";
// Initialize OTEL
initializeOTEL({ projectName: "hybrid-tracing" });
// Use both OTEL spans and traceable functions
const tracer = trace.getTracer("my-app");
const processData = traceable(
async (data: string) => {
// This function is traced by LangSmith traceable
return await tracer.startActiveSpan("internal_processing", async (span) => {
// This span is captured by OpenTelemetry
span.setAttribute("processing.data_length", data.length);
const result = await expensiveOperation(data);
span.setAttribute("processing.result_size", result.length);
span.end();
return result;
});
},
{ name: "process_data", run_type: "chain" }
);
// Call the function - creates both traceable and OTEL traces
await processData("sample data");
async function expensiveOperation(data: string) {
return data.toUpperCase();
}import { initializeOTEL } from "langsmith/experimental/otel/setup";
import { Client } from "langsmith";
import { trace } from "@opentelemetry/api";
// Create client
const client = new Client();
// Initialize OTEL with the same client
initializeOTEL({
client: client,
projectName: "my-project",
});
const tracer = trace.getTracer("my-app");
// Use client API alongside OTEL tracing
async function processWithFeedback(input: string) {
await tracer.startActiveSpan("process_with_feedback", async (span) => {
// Get span context to link feedback
const spanContext = span.spanContext();
const traceId = spanContext.traceId;
// Process input
const result = await processInput(input);
// Create feedback linked to OTEL trace
await client.createFeedback({
run_id: traceId, // Link to OTEL trace ID
key: "user-feedback",
score: 1.0,
comment: "Processing successful",
});
span.end();
return result;
});
}
async function processInput(input: string) {
return { processed: true };
}Combine OTEL spans with RunTree for hybrid tracing.
import { initializeOTEL } from "langsmith/experimental/otel/setup";
import { RunTree } from "langsmith";
import { trace } from "@opentelemetry/api";
initializeOTEL({ projectName: "hybrid-traces" });
const tracer = trace.getTracer("my-app");
async function hybridTracing(input: string) {
// Start RunTree
const runTree = new RunTree({
name: "parent-operation",
run_type: "chain",
inputs: { input },
});
// Start OTEL span
await tracer.startActiveSpan("otel-operation", async (span) => {
// Both tracing systems capture this operation
const result = await performOperation(input);
span.setAttribute("result.length", result.length);
span.end();
await runTree.end({ outputs: { result } });
await runTree.postRun();
return result;
});
}
async function performOperation(input: string) {
return input.toUpperCase();
}The OpenTelemetry integration respects standard LangSmith environment variables:
LANGSMITH_API_KEY - API key for authentication (required)LANGSMITH_API_URL - API endpoint URL (defaults to https://api.smith.langchain.com)LANGSMITH_PROJECT - Default project name for tracesLANGCHAIN_API_KEY - Alternative to LANGSMITH_API_KEY for backwards compatibilityLANGCHAIN_ENDPOINT - Alternative to LANGSMITH_API_URL for backwards compatibilityLANGCHAIN_PROJECT - Alternative to LANGSMITH_PROJECT for backwards compatibilityStandard OpenTelemetry environment variables are also supported:
OTEL_EXPORTER_OTLP_ENDPOINT - Override OTLP endpoint (default: LangSmith endpoint)OTEL_EXPORTER_OTLP_HEADERS - Additional headers for OTLP requestsOTEL_SERVICE_NAME - Service name for traces (appears in trace metadata)OTEL_RESOURCE_ATTRIBUTES - Additional resource attributes (comma-separated key=value pairs)OTEL_TRACES_SAMPLER - Sampling strategy ("always_on", "always_off", "traceidratio", "parentbased_always_on")OTEL_TRACES_SAMPLER_ARG - Argument for the sampler (e.g., sampling ratio for "traceidratio")Environment Setup Example:
# LangSmith configuration
export LANGSMITH_API_KEY="lsv2_pt_..."
export LANGSMITH_PROJECT="my-production-app"
# OpenTelemetry configuration
export OTEL_SERVICE_NAME="llm-backend"
export OTEL_RESOURCE_ATTRIBUTES="environment=production,version=2.1.0,region=us-west-2"
export OTEL_TRACES_SAMPLER="traceidratio"
export OTEL_TRACES_SAMPLER_ARG="0.1" # Sample 10% of tracesInitialize OpenTelemetry before importing instrumented modules to ensure proper auto-instrumentation:
// CORRECT: Initialize first
import { initializeOTEL } from "langsmith/experimental/otel/setup";
initializeOTEL({ projectName: "my-app" });
// Then import application code
import app from "./app";
import { router } from "./routes";
// INCORRECT: Import before initialization
import app from "./app"; // Already imported, won't be instrumented
import { initializeOTEL } from "langsmith/experimental/otel/setup";
initializeOTEL({ projectName: "my-app" }); // Too late!Always use the provided constants for LLM attributes to ensure consistency:
import {
GEN_AI_REQUEST_MODEL,
GEN_AI_USAGE_INPUT_TOKENS,
} from "langsmith/experimental/otel/exporter";
// CORRECT: Use constants
span.setAttribute(GEN_AI_REQUEST_MODEL, "gpt-4");
span.setAttribute(GEN_AI_USAGE_INPUT_TOKENS, 150);
// INCORRECT: Use string literals (typos, inconsistency)
span.setAttribute("gen_ai.request.model", "gpt-4"); // Typo risk
span.setAttribute("ai_model", "gpt-4"); // Non-standard
span.setAttribute("input_tokens", 150); // Wrong conventionUse BatchSpanProcessor for production deployments to optimize performance:
import { BatchSpanProcessor, SimpleSpanProcessor } from "@opentelemetry/sdk-trace-base";
import { LangSmithOTLPTraceExporter } from "langsmith/experimental/otel/exporter";
const exporter = new LangSmithOTLPTraceExporter();
// Production: Batch processor for efficiency
const prodProcessor = new BatchSpanProcessor(exporter, {
maxQueueSize: 2048, // Buffer up to 2048 spans
maxExportBatchSize: 512, // Export in batches of 512
scheduledDelayMillis: 5000, // Export every 5 seconds
exportTimeoutMillis: 30000, // 30s export timeout
});
// Development: Simple processor for immediate visibility
const devProcessor = new SimpleSpanProcessor(exporter);
// Use appropriate processor based on environment
const processor = process.env.NODE_ENV === "production" ? prodProcessor : devProcessor;
provider.addSpanProcessor(processor);Always record exceptions and set proper span status:
import { trace } from "@opentelemetry/api";
const tracer = trace.getTracer("my-app");
async function operationWithErrorHandling(data: any) {
const span = tracer.startSpan("risky-operation");
try {
const result = await riskyOperation(data);
// Success: set OK status
span.setStatus({ code: 1 }); // SpanStatusCode.OK
span.setAttribute("operation.success", true);
return result;
} catch (error) {
// Record the exception with stack trace
span.recordException(error);
// Set error status with message
span.setStatus({
code: 2, // SpanStatusCode.ERROR
message: error.message,
});
span.setAttribute("operation.success", false);
span.setAttribute("error.type", error.constructor.name);
// Re-throw after recording
throw error;
} finally {
// Always end the span
span.end();
}
}
// Using startActiveSpan (auto-ends on async completion)
async function saferOperation(data: any) {
return await tracer.startActiveSpan("operation", async (span) => {
try {
const result = await processData(data);
span.setStatus({ code: 1 });
return result;
} catch (error) {
span.recordException(error);
span.setStatus({ code: 2, message: error.message });
throw error;
}
// Span automatically ends when async function completes
});
}
async function riskyOperation(data: any) {
return data;
}
async function processData(data: any) {
return data;
}Use proper context propagation for distributed systems:
import { propagation, context } from "@opentelemetry/api";
// Extract context from incoming requests
async function handleIncomingRequest(req: any) {
// Extract trace context from request headers
const ctx = propagation.extract(context.active(), req.headers);
// Run operation within extracted context
return await context.with(ctx, async () => {
// All spans created here will be children of the extracted context
return await processRequest(req);
});
}
// Inject context into outgoing requests
async function makeOutgoingRequest(url: string, data: any) {
// Create headers object
const headers: Record<string, string> = {
"Content-Type": "application/json",
};
// Inject current trace context into headers
propagation.inject(context.active(), headers);
// Make request with trace headers
const response = await fetch(url, {
method: "POST",
headers,
body: JSON.stringify(data),
});
return response.json();
}
// Complete distributed tracing example
async function distributedWorkflow(input: string) {
const tracer = trace.getTracer("orchestrator");
await tracer.startActiveSpan("orchestrate", async (span) => {
// Call service 1
const result1 = await makeOutgoingRequest("http://service1/process", {
input,
});
// Call service 2 with result from service 1
const result2 = await makeOutgoingRequest("http://service2/enhance", {
data: result1,
});
span.setAttribute("workflow.steps_completed", 2);
span.end();
return result2;
});
}
async function processRequest(req: any) {
return { processed: true };
}Add custom attributes for domain-specific metadata:
import { trace } from "@opentelemetry/api";
import { GEN_AI_OPERATION_NAME, GEN_AI_REQUEST_MODEL } from "langsmith/experimental/otel/exporter";
const tracer = trace.getTracer("my-app");
async function processWithCustomAttributes(userId: string, query: string) {
await tracer.startActiveSpan("user_query", async (span) => {
// Standard GenAI attributes
span.setAttribute(GEN_AI_OPERATION_NAME, "chat");
span.setAttribute(GEN_AI_REQUEST_MODEL, "gpt-4");
// Custom business attributes
span.setAttribute("user.id", userId);
span.setAttribute("query.length", query.length);
span.setAttribute("query.language", detectLanguage(query));
span.setAttribute("app.feature", "customer_support");
span.setAttribute("app.version", "2.1.0");
// Array attributes (stored as JSON)
span.setAttribute("query.tags", JSON.stringify(["urgent", "technical"]));
const result = await processQuery(query);
// Result attributes
span.setAttribute("result.confidence", result.confidence);
span.setAttribute("result.sources_count", result.sources.length);
span.end();
return result;
});
}
function detectLanguage(text: string): string {
return "en"; // Simplified
}
async function processQuery(query: string) {
return {
answer: "Response",
confidence: 0.95,
sources: ["source1", "source2"],
};
}Add timestamped events to spans for detailed logging:
import { trace } from "@opentelemetry/api";
const tracer = trace.getTracer("my-app");
async function processWithEvents(input: string) {
await tracer.startActiveSpan("complex_process", async (span) => {
// Add event at start
span.addEvent("processing_started", {
"input.length": input.length,
});
// Step 1
const result1 = await step1(input);
span.addEvent("step1_completed", {
"step1.output_length": result1.length,
"step1.duration_ms": 150,
});
// Step 2
const result2 = await step2(result1);
span.addEvent("step2_completed", {
"step2.transformations": 5,
});
// Final event
span.addEvent("processing_completed", {
"total.steps": 2,
"final.size": result2.length,
});
span.end();
return result2;
});
}
async function step1(input: string) {
return input.toUpperCase();
}
async function step2(input: string) {
return input + "!";
}Configure trace sampling for high-volume applications:
import { initializeOTEL } from "langsmith/experimental/otel/setup";
import { TraceIdRatioBasedSampler, ParentBasedSampler } from "@opentelemetry/sdk-trace-base";
import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
// Option 1: Configure via environment variable
process.env.OTEL_TRACES_SAMPLER = "traceidratio";
process.env.OTEL_TRACES_SAMPLER_ARG = "0.1"; // Sample 10%
initializeOTEL({ projectName: "sampled-app" });
// Option 2: Configure programmatically
const sampler = new TraceIdRatioBasedSampler(0.1); // 10% sampling
const provider = new NodeTracerProvider({
sampler: sampler,
});
// Option 3: Parent-based sampling (respect upstream sampling decisions)
const parentBasedSampler = new ParentBasedSampler({
root: new TraceIdRatioBasedSampler(0.1), // Sample 10% of root spans
});
const provider2 = new NodeTracerProvider({
sampler: parentBasedSampler,
});
// Custom sampling based on attributes
import { Sampler, SamplingResult } from "@opentelemetry/sdk-trace-base";
class CustomSampler implements Sampler {
shouldSample(context: any, traceId: string, spanName: string, spanKind: number, attributes: any): SamplingResult {
// Always sample errors
if (attributes["error"]) {
return { decision: 1 }; // RECORD_AND_SAMPLE
}
// Always sample high-value operations
if (spanName.includes("critical")) {
return { decision: 1 };
}
// Sample 10% of everything else
const randomValue = parseInt(traceId.slice(0, 8), 16) / 0xffffffff;
return {
decision: randomValue < 0.1 ? 1 : 0, // RECORD_AND_SAMPLE : DROP
};
}
toString(): string {
return "CustomSampler";
}
}The OpenTelemetry integration provides full TypeScript support with comprehensive type definitions:
import type {
InitializeOTELConfig,
LangSmithOTLPTraceExporterConfig,
} from "langsmith/experimental/otel";
// All functions and classes are fully typed
const config: InitializeOTELConfig = {
projectName: "my-project",
instrumentations: [],
};
initializeOTEL(config);For high-volume applications, configure appropriate buffer sizes:
import { BatchSpanProcessor } from "@opentelemetry/sdk-trace-base";
const processor = new BatchSpanProcessor(exporter, {
maxQueueSize: 2048, // Max spans in queue
maxExportBatchSize: 512, // Spans per export
scheduledDelayMillis: 5000, // Export frequency
exportTimeoutMillis: 30000, // Timeout for export
});Add resource attributes to identify service instances:
import { Resource } from "@opentelemetry/resources";
import { SEMRESATTRS_SERVICE_NAME, SEMRESATTRS_SERVICE_VERSION } from "@opentelemetry/semantic-conventions";
const resource = Resource.default().merge(
new Resource({
[SEMRESATTRS_SERVICE_NAME]: "llm-service",
[SEMRESATTRS_SERVICE_VERSION]: "2.1.0",
"deployment.environment": "production",
"cloud.provider": "aws",
"cloud.region": "us-west-2",
})
);
const provider = new NodeTracerProvider({
resource: resource,
});// 1. Verify initialization happened before imports
// 2. Check environment variables
console.log("API Key set:", !!process.env.LANGSMITH_API_KEY);
console.log("Project:", process.env.LANGSMITH_PROJECT);
// 3. Enable debug logging
const client = new Client({ debug: true });
initializeOTEL({ client, projectName: "debug-app" });
// 4. Force flush before shutdown
import { trace } from "@opentelemetry/api";
const provider = trace.getTracerProvider();
await provider.shutdown(); // Ensures all spans are exported// Use batch processor instead of simple processor
import { BatchSpanProcessor } from "@opentelemetry/sdk-trace-base";
// Increase batch size and delay for high throughput
const processor = new BatchSpanProcessor(exporter, {
maxQueueSize: 4096,
maxExportBatchSize: 1024,
scheduledDelayMillis: 10000, // Export every 10s
});
// Enable sampling to reduce volume
process.env.OTEL_TRACES_SAMPLER = "traceidratio";
process.env.OTEL_TRACES_SAMPLER_ARG = "0.05"; // 5% sampling// app.ts
import { initializeOTEL } from "langsmith/experimental/otel/setup";
import { HttpInstrumentation } from "@opentelemetry/instrumentation-http";
import { ExpressInstrumentation } from "@opentelemetry/instrumentation-express";
import express from "express";
import { trace } from "@opentelemetry/api";
import {
GEN_AI_OPERATION_NAME,
GEN_AI_REQUEST_MODEL,
GEN_AI_USAGE_INPUT_TOKENS,
GEN_AI_USAGE_OUTPUT_TOKENS,
} from "langsmith/experimental/otel/exporter";
// Initialize OTEL first
initializeOTEL({
projectName: "example-app",
instrumentations: [
new HttpInstrumentation(),
new ExpressInstrumentation(),
],
});
const app = express();
const tracer = trace.getTracer("example-app");
app.use(express.json());
app.post("/api/chat", async (req, res) => {
// HTTP and Express spans are automatic
// Add custom span for LLM call
await tracer.startActiveSpan("llm_chat", async (span) => {
span.setAttribute(GEN_AI_OPERATION_NAME, "chat");
span.setAttribute(GEN_AI_REQUEST_MODEL, "gpt-4");
try {
const response = await callChatAPI(req.body.message);
span.setAttribute(GEN_AI_USAGE_INPUT_TOKENS, response.usage.input);
span.setAttribute(GEN_AI_USAGE_OUTPUT_TOKENS, response.usage.output);
span.setStatus({ code: 1 });
res.json({ response: response.text });
} catch (error) {
span.recordException(error);
span.setStatus({ code: 2, message: error.message });
res.status(500).json({ error: "Processing failed" });
} finally {
span.end();
}
});
});
async function callChatAPI(message: string) {
// Simulated API call
return {
text: "Response to: " + message,
usage: { input: 20, output: 50 },
};
}
app.listen(3000, () => {
console.log("Server with OpenTelemetry tracing on port 3000");
});
// Graceful shutdown
process.on("SIGTERM", async () => {
console.log("Shutting down...");
// Flush remaining traces
const provider = trace.getTracerProvider();
await provider.shutdown();
process.exit(0);
});