SDK wrappers provide automatic tracing for popular AI/ML SDKs and libraries by instrumenting their method calls. The LangSmith SDK includes generic wrapper functionality along with specialized wrappers for OpenAI and Vercel AI SDK.
langsmith/wrappersimport { wrapSDK } from "langsmith/wrappers";
import { wrapOpenAI } from "langsmith/wrappers/openai";
import { wrapAnthropic } from "langsmith/wrappers/anthropic";
import { wrapAISDK } from "langsmith/experimental/vercel";For CommonJS:
const { wrapSDK } = require("langsmith/wrappers");
const { wrapOpenAI } = require("langsmith/wrappers/openai");
const { wrapAnthropic } = require("langsmith/wrappers/anthropic");
const { wrapAISDK } = require("langsmith/experimental/vercel");import { wrapSDK } from "langsmith/wrappers";
import { wrapOpenAI } from "langsmith/wrappers/openai";
import OpenAI from "openai";
// Wrap OpenAI SDK
const openai = wrapOpenAI(new OpenAI(), {
projectName: "my-openai-project",
runName: "openai-call",
});
// Use as normal - calls are automatically traced
const completion = await openai.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: "Hello!" }],
});
// Wrap any SDK generically
const wrappedSDK = wrapSDK(someSdk, {
name: "my-sdk-wrapper",
projectName: "my-project",
});The wrappers system provides multiple levels of SDK integration:
wrapSDK() provides basic instrumentation for any object/SDK by proxying method callswrapOpenAI() provides specialized tracing for OpenAI SDK with proper input/output handlingwrapAnthropic() provides specialized tracing for Anthropic SDK with proper input/output handlingwrapAISDK() provides specialized tracing for Vercel AI SDK (from experimental/vercel)WrapSDKOptions and related interfaces allow customization of tracing behaviorWrap arbitrary SDK objects for automatic tracing of method calls. Useful for any SDK or library that doesn't have a specialized wrapper.
/**
* Wrap arbitrary SDK for automatic tracing
* @param sdk - The SDK object to wrap
* @param options - Wrapper configuration options
* @returns Wrapped SDK with same interface
*/
function wrapSDK<T extends object>(sdk: T, options?: WrapSDKOptions): T;
interface WrapSDKOptions {
/** Name for the wrapper (used in traces) */
name?: string;
/** LangSmith client instance */
client?: Client;
/** Project name for traces */
projectName?: string;
/** Run name for traces */
runName?: string;
/** Additional metadata */
metadata?: KVMap;
/** Tags for runs */
tags?: string[];
}Usage Examples:
import { wrapSDK } from "langsmith/wrappers";
// Wrap a custom SDK
const mySDK = {
async processData(input: string) {
// ... SDK logic
return { result: "processed" };
},
async analyzeText(text: string) {
// ... SDK logic
return { sentiment: "positive" };
},
};
const wrapped = wrapSDK(mySDK, {
name: "custom-sdk",
projectName: "my-analytics",
metadata: { version: "1.0" },
tags: ["production"],
});
// All method calls are automatically traced
const result = await wrapped.processData("Hello world");
const analysis = await wrapped.analyzeText("Great work!");Specialized wrapper for OpenAI SDK that provides automatic tracing of completions, chat completions, and other OpenAI API calls with proper input/output formatting.
/**
* Wrap OpenAI SDK for automatic tracing
* @param openai - OpenAI client instance
* @param options - Wrapper configuration options
* @returns Wrapped OpenAI client with same interface
*/
function wrapOpenAI(openai: OpenAI, options?: WrapOpenAIOptions): OpenAI;
/**
* Note: wrapOpenAI() adds tracing capabilities and returns the wrapped SDK.
* The wrapper is designed to be a transparent proxy that traces method calls,
* but the exact API surface preserved may depend on the OpenAI SDK version.
*/
interface WrapOpenAIOptions {
/** Name for the wrapper */
name?: string;
/** LangSmith client instance */
client?: Client;
/** Project name for traces */
projectName?: string;
/** Run name for traces */
runName?: string;
/** Additional metadata */
metadata?: KVMap;
/** Tags for runs */
tags?: string[];
}Usage Examples:
import { wrapOpenAI } from "langsmith/wrappers/openai";
import OpenAI from "openai";
// Basic OpenAI wrapping
const openai = wrapOpenAI(
new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
}),
{
projectName: "openai-integration",
}
);
// Chat completion - automatically traced
const chatResponse = await openai.chat.completions.create({
model: "gpt-4",
messages: [
{ role: "system", content: "You are a helpful assistant." },
{ role: "user", content: "What is the capital of France?" },
],
temperature: 0.7,
});
// Streaming completion - also traced
const stream = await openai.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: "Tell me a story" }],
stream: true,
});
for await (const chunk of stream) {
process.stdout.write(chunk.choices[0]?.delta?.content || "");
}
// With additional configuration
const configuredOpenAI = wrapOpenAI(new OpenAI(), {
projectName: "production-openai",
runName: "customer-support",
metadata: { environment: "prod", team: "support" },
tags: ["openai", "gpt-4", "customer-facing"],
});Specialized wrapper for Anthropic SDK that provides automatic tracing of message completions and streaming with proper input/output formatting.
/**
* Wrap Anthropic SDK for automatic tracing
* @param anthropic - Anthropic client instance
* @param options - Wrapper configuration options
* @returns Wrapped Anthropic client with same interface
*/
function wrapAnthropic<T>(anthropic: T, options?: Partial<RunTreeConfig>): T;
/**
* Note: wrapAnthropic() adds tracing capabilities and returns the wrapped SDK.
* The wrapper is designed to be a transparent proxy that traces method calls,
* but the exact API surface preserved may depend on the Anthropic SDK version.
*/Usage Examples:
import { wrapAnthropic } from "langsmith/wrappers/anthropic";
import Anthropic from "@anthropic-ai/sdk";
// Basic Anthropic wrapping
const anthropic = wrapAnthropic(
new Anthropic({
apiKey: process.env.ANTHROPIC_API_KEY,
}),
{
project_name: "anthropic-integration",
}
);
// Non-streaming completion - automatically traced
const message = await anthropic.messages.create({
model: "claude-sonnet-4-20250514",
max_tokens: 1024,
messages: [{ role: "user", content: "Hello!" }],
});
// Streaming completion - also traced
const messageStream = anthropic.messages.stream({
model: "claude-sonnet-4-20250514",
max_tokens: 1024,
messages: [{ role: "user", content: "Tell me a story" }],
});
for await (const event of messageStream) {
if (event.type === "content_block_delta" && event.delta.type === "text_delta") {
process.stdout.write(event.delta.text);
}
}
// Get final message
const finalMessage = await messageStream.finalMessage();
// With additional configuration
const configuredAnthropic = wrapAnthropic(new Anthropic(), {
project_name: "production-anthropic",
name: "customer-support",
metadata: { environment: "prod", team: "support" },
tags: ["anthropic", "claude", "customer-facing"],
});Specialized wrapper for Vercel AI SDK that provides automatic tracing of generation functions with proper telemetry integration.
/**
* Wrap Vercel AI SDK model for automatic tracing
* @param model - Vercel AI SDK model instance
* @param options - Wrapper configuration options
* @returns Wrapped model with same interface
*/
function wrapAISDK<T>(model: T, options?: WrapAISDKOptions): T;
interface WrapAISDKOptions {
/** Name for the wrapper */
name?: string;
/** LangSmith client instance */
client?: Client;
/** Project name for traces */
projectName?: string;
/** Run name for traces */
runName?: string;
/** Additional metadata */
metadata?: KVMap;
/** Tags for runs */
tags?: string[];
}Usage Examples:
import { wrapAISDK } from "langsmith/experimental/vercel";
import { openai } from "@ai-sdk/openai";
import { generateText } from "ai";
// Wrap the model
const model = wrapAISDK(openai("gpt-4"), {
projectName: "vercel-ai-integration",
runName: "text-generation",
});
// Use with generateText - automatically traced
const { text } = await generateText({
model,
prompt: "Write a haiku about programming",
});
// With streaming
const wrappedStreamModel = wrapAISDK(openai("gpt-4-turbo"), {
projectName: "streaming-app",
metadata: { feature: "chat" },
tags: ["vercel", "streaming"],
});
const { textStream } = await streamText({
model: wrappedStreamModel,
prompt: "Explain quantum computing",
});
for await (const chunk of textStream) {
console.log(chunk);
}Creates provider options for runtime configuration of LangSmith tracing.
/**
* Wraps LangSmith config in a way that matches AI SDK provider types
* @param lsConfig - Optional LangSmith configuration
* @returns Provider options object that can be passed to AI SDK functions
*/
function createLangSmithProviderOptions<T>(
lsConfig?: WrapAISDKConfig<T>
): Record<string, JSONValue>;This function creates a configuration object that can be passed via the providerOptions.langsmith parameter to override or extend the base configuration for specific calls.
Parameters:
lsConfig?: WrapAISDKConfig<T> - Optional LangSmith-specific configurationReturns:
providerOptions.langsmithUsage Examples:
import { wrapAISDK, createLangSmithProviderOptions } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText, streamText, generateObject, streamObject } from "ai";
import { openai } from "@ai-sdk/openai";
const { generateText } = wrapAISDK(ai);
// Create runtime configuration
const lsConfig = createLangSmithProviderOptions<typeof ai.generateText>({
name: "summarization",
metadata: {
userId: "user-123",
feature: "summarize",
},
processInputs: (inputs) => ({
...inputs,
prompt: "REDACTED", // Hide sensitive input
}),
});
// Use with provider options
const result = await generateText({
model: openai("gpt-4"),
prompt: "Sensitive data here",
providerOptions: {
langsmith: lsConfig,
},
});Utility function to convert Vercel AI SDK messages to LangSmith trace format.
/**
* Convert Vercel AI SDK message format to LangSmith trace format
* @param message - Message to convert
* @param metadata - Optional additional metadata to include
* @returns Converted message in LangSmith format
*/
function convertMessageToTracedFormat(
message: any,
metadata?: Record<string, unknown>
): Record<string, unknown>;This utility function is used internally by wrapAISDK but is also exported for advanced use cases where you need to manually format messages for tracing.
Usage Examples:
import { convertMessageToTracedFormat } from "langsmith/experimental/vercel";
const message = {
role: "assistant",
content: "Hello!",
};
const traced = convertMessageToTracedFormat(message, {
model: "gpt-4",
tokens: 50,
});Type representing aggregated streaming output from the AI SDK.
type AggregatedDoStreamOutput = any; // Aggregated result from streaming operationsThis type is used internally to represent the aggregated output from streaming AI SDK operations.
import { wrapAISDK, createLangSmithProviderOptions } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText, streamText, generateObject, streamObject } from "ai";
import { openai } from "@ai-sdk/openai";
// Base configuration for all calls
const { generateText } = wrapAISDK(ai, {
project_name: "my-app",
tags: ["base-config"],
});
// Override for specific call
const lsConfig = createLangSmithProviderOptions<typeof ai.generateText>({
name: "special-operation",
metadata: {
userId: "user-456",
priority: "high",
},
tags: ["special", "high-priority"],
});
const result = await generateText({
model: openai("gpt-4"),
prompt: "Important query",
providerOptions: {
langsmith: lsConfig,
},
});import { wrapAISDK, createLangSmithProviderOptions } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText, streamText, generateObject, streamObject } from "ai";
import { openai } from "@ai-sdk/openai";
const { generateText } = wrapAISDK(ai);
// Redact sensitive information from traces
const lsConfig = createLangSmithProviderOptions<typeof ai.generateText>({
processInputs: (inputs) => ({
prompt: "[REDACTED]",
model: inputs.model,
// Keep non-sensitive fields
}),
processOutputs: (outputs) => ({
text: outputs.outputs.text.substring(0, 100) + "...",
// Truncate output for privacy
}),
});
const result = await generateText({
model: openai("gpt-4"),
prompt: "Sensitive PII data here",
providerOptions: {
langsmith: lsConfig,
},
});import { wrapAISDK, createLangSmithProviderOptions } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText, streamText, generateObject, streamObject } from "ai";
import { openai } from "@ai-sdk/openai";
import { NextRequest, NextResponse } from "next/server";
const { generateText } = wrapAISDK(ai, {
project_name: "nextjs-api",
tags: ["api-route"],
});
export async function POST(req: NextRequest) {
const { prompt, userId } = await req.json();
const lsConfig = createLangSmithProviderOptions<typeof ai.generateText>({
metadata: {
userId,
endpoint: "/api/generate",
timestamp: new Date().toISOString(),
},
});
try {
const result = await generateText({
model: openai("gpt-4"),
prompt,
providerOptions: {
langsmith: lsConfig,
},
});
return NextResponse.json({ text: result.text });
} catch (error) {
console.error("Generation failed:", error);
return NextResponse.json(
{ error: "Failed to generate text" },
{ status: 500 }
);
}
}import { wrapAISDK } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText, streamText, generateObject, streamObject } from "ai";
import { openai } from "@ai-sdk/openai";
import { z } from "zod";
const { generateText } = wrapAISDK(ai, {
project_name: "tool-calling",
});
const result = await generateText({
model: openai("gpt-4"),
prompt: "What's the weather in San Francisco?",
tools: {
getWeather: {
description: "Get the weather for a location",
parameters: z.object({
location: z.string(),
}),
execute: async ({ location }) => {
// Tool execution is also traced
return { temperature: 72, condition: "sunny", location };
},
},
},
});
console.log(result.text);
// Both the main call and tool executions are tracedSet common configuration (project name, tags, client) at the wrapper level, and override only when needed:
const wrappedAI = wrapAISDK(ai, {
project_name: "my-app",
tags: ["production"],
client: customClient,
});Use processInputs and processOutputs to redact sensitive information:
const config = {
processInputs: (inputs) => ({
...inputs,
prompt: inputs.prompt?.includes("password") ? "[REDACTED]" : inputs.prompt,
}),
};Include relevant context in metadata for easier debugging and analysis:
const lsConfig = createLangSmithProviderOptions({
metadata: {
userId: user.id,
feature: "chat",
sessionId: session.id,
timestamp: new Date().toISOString(),
},
});Provide meaningful names for operations to improve trace organization:
const lsConfig = createLangSmithProviderOptions({
name: "user-onboarding-welcome-message",
tags: ["onboarding", "automated"],
});When debugging complex flows, enable traceResponseMetadata to capture additional details:
const { generateText } = wrapAISDK(ai, {
traceResponseMetadata: true, // Includes steps and intermediate data
});The generic SDK wrapper provides automatic tracing for any SDK or object by creating a proxy that intercepts method calls.
Creates a traced wrapper around any SDK object.
/**
* Wrap arbitrary SDK for automatic tracing of method calls
* @param sdk - The SDK object to wrap (can be any object with methods)
* @param options - Configuration options for the wrapper
* @returns Wrapped SDK with identical interface but traced method calls
* @template T - The type of the SDK object being wrapped
*/
function wrapSDK<T extends object>(sdk: T, options?: WrapSDKOptions): T;Options for configuring the generic SDK wrapper.
interface WrapSDKOptions {
/**
* Name for the wrapper (appears in trace names)
* @default "wrapped-sdk"
*/
name?: string;
/**
* LangSmith client instance to use for tracing
* @default Uses default client from environment
*/
client?: Client;
/**
* Project name where traces will be logged
* @default Uses LANGCHAIN_PROJECT or auto-generated name
*/
projectName?: string;
/**
* Run name for traced operations
* @default Uses method name
*/
runName?: string;
/**
* Additional metadata to attach to all traces
*/
metadata?: KVMap;
/**
* Tags to attach to all traces
*/
tags?: string[];
}import { wrapSDK } from "langsmith/wrappers";
import { Client } from "langsmith";
// Basic wrapping
const sdk = wrapSDK(mySDK);
// With custom client
const client = new Client({
apiKey: process.env.LANGSMITH_API_KEY,
});
const tracedSDK = wrapSDK(mySDK, {
client,
projectName: "my-sdk-traces",
});
// With full configuration
const fullyConfiguredSDK = wrapSDK(mySDK, {
name: "analytics-sdk",
projectName: "production-analytics",
runName: "data-processing",
metadata: {
version: "2.0",
environment: "production",
region: "us-west-2",
},
tags: ["analytics", "production", "critical"],
});
// Nested SDK wrapping
const wrappedClient = wrapSDK(client, {
name: "api-client",
projectName: "api-traces",
});
// Wrap multiple SDKs with different configs
const openaiSDK = wrapSDK(openaiClient, {
name: "openai",
projectName: "llm-calls",
tags: ["llm", "openai"],
});
const anthropicSDK = wrapSDK(anthropicClient, {
name: "anthropic",
projectName: "llm-calls",
tags: ["llm", "anthropic"],
});The generic SDK wrapper:
The OpenAI SDK wrapper provides specialized tracing for the official OpenAI SDK with proper handling of completions, chat, embeddings, and streaming.
Creates a traced wrapper around an OpenAI client instance.
/**
* Wrap OpenAI SDK client for automatic tracing
* @param openai - OpenAI client instance to wrap
* @param options - Configuration options for the wrapper
* @returns Wrapped OpenAI client with identical interface
*/
function wrapOpenAI(openai: OpenAI, options?: WrapOpenAIOptions): OpenAI;Options specific to OpenAI wrapper (extends base wrapper options).
interface WrapOpenAIOptions {
/**
* Name for the wrapper
* @default "openai"
*/
name?: string;
/**
* LangSmith client instance
*/
client?: Client;
/**
* Project name for traces
*/
projectName?: string;
/**
* Run name for traces
* @default Uses operation name (e.g., "chat.completions.create")
*/
runName?: string;
/**
* Additional metadata
*/
metadata?: KVMap;
/**
* Tags for runs
*/
tags?: string[];
}import { wrapOpenAI } from "langsmith/wrappers/openai";
import OpenAI from "openai";
const openai = wrapOpenAI(new OpenAI(), {
projectName: "chat-app",
});
// Standard completion
const response = await openai.chat.completions.create({
model: "gpt-4",
messages: [
{ role: "system", content: "You are a helpful assistant." },
{ role: "user", content: "Hello!" },
],
});
// Streaming completion
const stream = await openai.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: "Tell me a joke" }],
stream: true,
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content;
if (content) console.log(content);
}
// With function calling
const response = await openai.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: "What's the weather?" }],
functions: [
{
name: "get_weather",
description: "Get current weather",
parameters: {
type: "object",
properties: {
location: { type: "string" },
},
},
},
],
});import { wrapOpenAI } from "langsmith/wrappers/openai";
import OpenAI from "openai";
const openai = wrapOpenAI(new OpenAI(), {
projectName: "completions",
});
// Legacy completion
const completion = await openai.completions.create({
model: "gpt-3.5-turbo-instruct",
prompt: "Once upon a time",
max_tokens: 100,
});
// Streaming legacy completion
const stream = await openai.completions.create({
model: "gpt-3.5-turbo-instruct",
prompt: "Write a story",
stream: true,
});
for await (const chunk of stream) {
console.log(chunk.choices[0]?.text);
}import { wrapOpenAI } from "langsmith/wrappers/openai";
import OpenAI from "openai";
const openai = wrapOpenAI(new OpenAI(), {
projectName: "embeddings",
tags: ["embeddings"],
});
// Create embeddings
const response = await openai.embeddings.create({
model: "text-embedding-ada-002",
input: "The quick brown fox jumps over the lazy dog",
});
const embedding = response.data[0].embedding;
// Batch embeddings
const batchResponse = await openai.embeddings.create({
model: "text-embedding-ada-002",
input: ["First document", "Second document", "Third document"],
});
const embeddings = batchResponse.data.map((d) => d.embedding);import { wrapOpenAI } from "langsmith/wrappers/openai";
import { Client } from "langsmith";
import OpenAI from "openai";
// Custom client
const lsClient = new Client({
apiKey: process.env.LANGSMITH_API_KEY,
apiUrl: "https://api.smith.langchain.com",
});
// Production configuration
const openai = wrapOpenAI(
new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
organization: process.env.OPENAI_ORG_ID,
}),
{
client: lsClient,
projectName: "production-chat",
runName: "customer-support-bot",
metadata: {
version: "2.1.0",
deployment: "us-east-1",
environment: "production",
},
tags: ["openai", "gpt-4", "production", "customer-support"],
}
);
// Multiple wrappers for different use cases
const fastOpenAI = wrapOpenAI(new OpenAI(), {
projectName: "fast-responses",
runName: "gpt-3.5-turbo",
tags: ["fast", "gpt-3.5"],
});
const smartOpenAI = wrapOpenAI(new OpenAI(), {
projectName: "complex-tasks",
runName: "gpt-4",
tags: ["smart", "gpt-4"],
});The OpenAI wrapper automatically captures:
The Anthropic SDK wrapper provides specialized tracing for the official Anthropic SDK with proper handling of message completions and streaming.
Creates a traced wrapper around an Anthropic client instance.
/**
* Wrap Anthropic SDK client for automatic tracing
* @param anthropic - Anthropic client instance to wrap
* @param options - Configuration options for the wrapper
* @returns Wrapped Anthropic client with identical interface
*/
function wrapAnthropic<T>(anthropic: T, options?: Partial<RunTreeConfig>): T;Options for Anthropic wrapper (accepts Partial<RunTreeConfig>).
interface RunTreeConfig {
/** Run name override */
name?: string;
/** Project name for traces */
project_name?: string;
/** Additional metadata */
metadata?: KVMap;
/** Tags for runs */
tags?: string[];
/** LangSmith client instance */
client?: Client;
/** Run type */
run_type?: string;
}import { wrapAnthropic } from "langsmith/wrappers/anthropic";
import Anthropic from "@anthropic-ai/sdk";
const anthropic = wrapAnthropic(
new Anthropic({
apiKey: process.env.ANTHROPIC_API_KEY,
}),
{
project_name: "chat-app",
}
);
// Standard message completion
const message = await anthropic.messages.create({
model: "claude-sonnet-4-20250514",
max_tokens: 1024,
messages: [{ role: "user", content: "Hello! How are you?" }],
});
console.log(message.content[0].text);
// With system message
const response = await anthropic.messages.create({
model: "claude-sonnet-4-20250514",
max_tokens: 1024,
system: "You are a helpful coding assistant.",
messages: [{ role: "user", content: "Explain async/await" }],
});
// With tool use
const toolResponse = await anthropic.messages.create({
model: "claude-sonnet-4-20250514",
max_tokens: 1024,
tools: [
{
name: "get_weather",
description: "Get current weather for a location",
input_schema: {
type: "object",
properties: {
location: { type: "string" },
},
},
},
],
messages: [{ role: "user", content: "What's the weather in San Francisco?" }],
});import { wrapAnthropic } from "langsmith/wrappers/anthropic";
import Anthropic from "@anthropic-ai/sdk";
const anthropic = wrapAnthropic(new Anthropic(), {
project_name: "streaming-chat",
});
// Stream message
const messageStream = anthropic.messages.stream({
model: "claude-sonnet-4-20250514",
max_tokens: 1024,
messages: [{ role: "user", content: "Tell me a story about a robot" }],
});
// Process stream events
for await (const event of messageStream) {
if (event.type === "content_block_delta" && event.delta.type === "text_delta") {
process.stdout.write(event.delta.text);
}
}
// Get final aggregated message
const finalMessage = await messageStream.finalMessage();
console.log("\n\nFinal message:", finalMessage);import { wrapAnthropic } from "langsmith/wrappers/anthropic";
import { Client } from "langsmith";
import Anthropic from "@anthropic-ai/sdk";
// Custom LangSmith client
const lsClient = new Client({
apiKey: process.env.LANGSMITH_API_KEY,
apiUrl: "https://api.smith.langchain.com",
});
// Production configuration
const anthropic = wrapAnthropic(
new Anthropic({
apiKey: process.env.ANTHROPIC_API_KEY,
}),
{
client: lsClient,
project_name: "production-chat",
name: "customer-support-bot",
metadata: {
version: "2.1.0",
deployment: "us-east-1",
environment: "production",
},
tags: ["anthropic", "claude", "production", "customer-support"],
}
);
// Multiple wrappers for different use cases
const fastAnthropic = wrapAnthropic(new Anthropic(), {
project_name: "fast-responses",
name: "claude-haiku",
tags: ["fast", "haiku"],
});
const smartAnthropic = wrapAnthropic(new Anthropic(), {
project_name: "complex-tasks",
name: "claude-opus",
tags: ["smart", "opus"],
});The Anthropic wrapper automatically captures:
The Vercel AI SDK wrapper provides automatic tracing for Vercel's AI SDK functions with proper telemetry integration.
Creates a traced wrapper around a Vercel AI SDK model.
/**
* Wrap Vercel AI SDK model for automatic tracing
* @param model - Vercel AI SDK model instance
* @param options - Configuration options for the wrapper
* @returns Wrapped model with identical interface
* @template T - The type of the model being wrapped
*/
function wrapAISDK<T>(model: T, options?: WrapAISDKOptions): T;Options specific to Vercel AI SDK wrapper.
interface WrapAISDKOptions {
/**
* Name for the wrapper
* @default "vercel-ai-sdk"
*/
name?: string;
/**
* LangSmith client instance
*/
client?: Client;
/**
* Project name for traces
*/
projectName?: string;
/**
* Run name for traces
* @default Uses model ID
*/
runName?: string;
/**
* Additional metadata
*/
metadata?: KVMap;
/**
* Tags for runs
*/
tags?: string[];
}import { wrapAISDK } from "langsmith/experimental/vercel";
import { openai } from "@ai-sdk/openai";
import { generateText } from "ai";
// Wrap the model
const model = wrapAISDK(openai("gpt-4"), {
projectName: "text-generation",
tags: ["vercel-ai", "generation"],
});
// Generate text
const { text, finishReason, usage } = await generateText({
model,
prompt: "Write a professional email declining a meeting.",
});
console.log(text);
console.log(`Tokens: ${usage.totalTokens}`);
// With system message
const { text: assistantText } = await generateText({
model,
system: "You are a helpful coding assistant.",
prompt: "Explain async/await in JavaScript",
});
// With structured output
const { text: jsonText } = await generateText({
model,
prompt: "List 3 colors in JSON format",
});import { wrapAISDK } from "langsmith/experimental/vercel";
import { openai } from "@ai-sdk/openai";
import { streamText } from "ai";
const model = wrapAISDK(openai("gpt-4-turbo"), {
projectName: "streaming-chat",
tags: ["streaming"],
});
// Stream text
const { textStream, usage } = await streamText({
model,
prompt: "Explain quantum computing in simple terms",
});
// Process stream
for await (const chunk of textStream) {
process.stdout.write(chunk);
}
// Wait for completion and get usage
const finalUsage = await usage;
console.log(`\nTotal tokens: ${finalUsage.totalTokens}`);import { wrapAISDK } from "langsmith/experimental/vercel";
import { openai } from "@ai-sdk/openai";
import { generateObject } from "ai";
import { z } from "zod";
const model = wrapAISDK(openai("gpt-4"), {
projectName: "structured-output",
tags: ["object-generation"],
});
// Define schema
const recipeSchema = z.object({
name: z.string(),
ingredients: z.array(z.string()),
steps: z.array(z.string()),
prepTime: z.number(),
});
// Generate structured object
const { object } = await generateObject({
model,
schema: recipeSchema,
prompt: "Create a recipe for chocolate chip cookies",
});
console.log(object.name);
console.log(`Prep time: ${object.prepTime} minutes`);
console.log("Ingredients:", object.ingredients.join(", "));import { wrapAISDK } from "langsmith/experimental/vercel";
import { openai } from "@ai-sdk/openai";
import { anthropic } from "@ai-sdk/anthropic";
import { generateText } from "ai";
// Wrap different providers
const gpt4 = wrapAISDK(openai("gpt-4"), {
projectName: "multi-provider",
runName: "openai-gpt4",
tags: ["openai", "gpt-4"],
});
const claude = wrapAISDK(anthropic("claude-3-opus-20240229"), {
projectName: "multi-provider",
runName: "anthropic-claude",
tags: ["anthropic", "claude"],
});
// Use interchangeably
const openaiResponse = await generateText({
model: gpt4,
prompt: "Explain recursion",
});
const claudeResponse = await generateText({
model: claude,
prompt: "Explain recursion",
});
// Compare responses
console.log("OpenAI:", openaiResponse.text);
console.log("Anthropic:", claudeResponse.text);import { wrapAISDK } from "langsmith/experimental/vercel";
import { openai } from "@ai-sdk/openai";
import { streamText } from "ai";
import { createStreamableUI } from "ai/rsc";
// Production configuration
const model = wrapAISDK(openai("gpt-4-turbo"), {
projectName: "production-chat",
runName: "user-assistant",
metadata: {
version: "3.0",
feature: "chat",
deployment: "vercel",
},
tags: ["production", "chat", "vercel-ai"],
});
// Use with React Server Components
async function Chat({ prompt }: { prompt: string }) {
const stream = createStreamableUI();
(async () => {
const { textStream } = await streamText({
model,
prompt,
temperature: 0.7,
maxTokens: 500,
});
for await (const text of textStream) {
stream.update(<div>{text}</div>);
}
stream.done();
})();
return stream.value;
}
// Error handling
async function safeGenerate(prompt: string) {
try {
const { text } = await generateText({
model,
prompt,
maxRetries: 3,
});
return { success: true, text };
} catch (error) {
console.error("Generation failed:", error);
return { success: false, error: error.message };
}
}The Vercel AI SDK wrapper automatically captures:
traceable() decorator// Use consistent project names
const openai = wrapOpenAI(client, {
projectName: "my-app-production", // Group related traces
});
// Add meaningful metadata
const wrapped = wrapSDK(sdk, {
metadata: {
version: "1.2.3",
environment: process.env.NODE_ENV,
userId: currentUserId,
},
});
// Use descriptive tags
const model = wrapAISDK(model, {
tags: ["production", "customer-facing", "high-priority"],
});// Create separate wrappers for different contexts
const developmentOpenAI = wrapOpenAI(new OpenAI(), {
projectName: "development",
tags: ["dev"],
});
const productionOpenAI = wrapOpenAI(new OpenAI(), {
projectName: "production",
tags: ["prod"],
});
// Use conditionally
const openai = process.env.NODE_ENV === "production" ? productionOpenAI : developmentOpenAI;import { wrapOpenAI } from "langsmith/wrappers/openai";
const openai = wrapOpenAI(new OpenAI());
try {
const response = await openai.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: "Hello" }],
});
} catch (error) {
// Error is automatically logged to trace
console.error("OpenAI call failed:", error);
// Handle error appropriately
}import { traceable } from "langsmith/traceable";
import { wrapOpenAI } from "langsmith/wrappers/openai";
const openai = wrapOpenAI(new OpenAI());
// Wrap business logic with traceable
const processUserQuery = traceable(
async (query: string) => {
// OpenAI call is traced as child
const response = await openai.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: query }],
});
return response.choices[0].message.content;
},
{ name: "processUserQuery", run_type: "chain" }
);
// Creates nested trace: processUserQuery > openai.chat.completions.create
await processUserQuery("What is AI?");// Reuse wrapped clients (don't recreate on every call)
const openai = wrapOpenAI(new OpenAI(), {
projectName: "my-app",
});
// Good: Reuse wrapper
async function chat(message: string) {
return openai.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: message }],
});
}
// Bad: Creates new wrapper each time
async function chatBad(message: string) {
const openai = wrapOpenAI(new OpenAI()); // Don't do this
return openai.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: message }],
});
}