tessl install tessl/npm-langsmith@0.4.3TypeScript client SDK for the LangSmith LLM tracing, evaluation, and monitoring platform.
Native integration with the Vercel AI SDK for automatic tracing of AI model invocations.
langsmith (npm)npm install langsmith ai @ai-sdk/openailangsmith/experimental/vercelLangSmith provides native integration with Vercel AI SDK through wrapper functions that enable automatic tracing and monitoring of generateText, streamText, generateObject, and streamObject operations.
import {
wrapAISDK,
createLangSmithProviderOptions,
LangSmithMiddleware
} from "langsmith/experimental/vercel";For CommonJS:
const {
wrapAISDK,
createLangSmithProviderOptions,
LangSmithMiddleware
} = require("langsmith/experimental/vercel");import { wrapAISDK } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText } from "ai";
import { openai } from "@ai-sdk/openai";
// Wrap AI SDK - wrapLanguageModel is REQUIRED
const wrappedAI = wrapAISDK({ wrapLanguageModel, generateText });
// Use wrapped functions
const { text } = await wrappedAI.generateText({
model: openai("gpt-4"),
prompt: "What is the capital of France?"
});/**
* Wrap Vercel AI SDK functions with LangSmith tracing
* @param ai - AI SDK module (MUST include wrapLanguageModel)
* @param baseLsConfig - Optional base configuration
* @returns Wrapped AI SDK with automatic tracing
*/
function wrapAISDK<T>(ai: T, baseLsConfig?: WrapAISDKConfig<T>): T;
interface WrapAISDKConfig<T = any> {
/** Custom name */
name?: string;
/** LangSmith client */
client?: Client;
/** Project name */
project_name?: string;
/** Metadata */
metadata?: KVMap;
/** Tags */
tags?: string[];
/** Transform inputs before logging */
processInputs?: (inputs: any) => Record<string, unknown>;
/** Transform outputs before logging */
processOutputs?: (outputs: any) => Record<string, unknown>;
/** Transform child LLM run inputs */
processChildLLMRunInputs?: (inputs: any) => Record<string, unknown>;
/** Transform child LLM run outputs */
processChildLLMRunOutputs?: (outputs: any) => Record<string, unknown>;
/** Include response metadata in traces */
traceResponseMetadata?: boolean;
/** Include raw HTTP request/response in traces */
traceRawHttp?: boolean;
}
interface WrapAISDKOptions {
/** Configuration for specific invocation */
config?: Partial<WrapAISDKConfig>;
/** Run tree for context */
runTree?: RunTree;
}
/**
* Aggregated stream output type
*/
type AggregatedDoStreamOutput = {
/** Full text output */
text?: string;
/** Tool calls made */
toolCalls?: Array<{
toolCallId: string;
toolName: string;
args: Record<string, any>;
}>;
/** Tool results */
toolResults?: Array<{
toolCallId: string;
result: any;
}>;
/** Usage statistics */
usage?: {
promptTokens: number;
completionTokens: number;
totalTokens: number;
};
/** Finish reason */
finishReason?: "stop" | "length" | "tool-calls" | "content-filter" | "error";
};import { wrapAISDK } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText } from "ai";
import { openai } from "@ai-sdk/openai";
const wrappedAI = wrapAISDK(
{ wrapLanguageModel, generateText },
{ project_name: "text-generation" }
);
const { text } = await wrappedAI.generateText({
model: openai("gpt-4"),
prompt: "Write a professional email"
});Stream text responses with automatic tracing.
import { wrapAISDK } from "langsmith/experimental/vercel";
import { wrapLanguageModel, streamText } from "ai";
import { openai } from "@ai-sdk/openai";
const wrappedAI = wrapAISDK(
{ wrapLanguageModel, streamText },
{ project_name: "streaming-app" }
);
const { textStream } = await wrappedAI.streamText({
model: openai("gpt-4-turbo"),
prompt: "Explain quantum computing"
});
for await (const chunk of textStream) {
process.stdout.write(chunk);
}Apply configuration during streaming operations.
import { wrapAISDK, createLangSmithProviderOptions } from "langsmith/experimental/vercel";
import { wrapLanguageModel, streamText } from "ai";
import { anthropic } from "@ai-sdk/anthropic";
const wrappedAI = wrapAISDK(
{ wrapLanguageModel, streamText },
{
project_name: "streaming-demo",
tags: ["claude", "streaming"]
}
);
const { textStream } = await wrappedAI.streamText({
model: anthropic("claude-3-opus-20240229"),
prompt: "Write a short story"
});
for await (const chunk of textStream) {
process.stdout.write(chunk);
}Stream structured objects with validation.
import { wrapAISDK } from "langsmith/experimental/vercel";
import { wrapLanguageModel, streamObject } from "ai";
import { openai } from "@ai-sdk/openai";
import { z } from "zod";
const wrappedAI = wrapAISDK(
{ wrapLanguageModel, streamObject },
{ project_name: "stream-objects" }
);
const { partialObjectStream } = await wrappedAI.streamObject({
model: openai("gpt-4"),
schema: z.object({
characters: z.array(z.object({
name: z.string(),
role: z.string()
}))
}),
prompt: "Generate 3 fantasy characters"
});
for await (const partialObject of partialObjectStream) {
console.log("Partial:", partialObject);
}import { wrapAISDK } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateObject } from "ai";
import { openai } from "@ai-sdk/openai";
import { z } from "zod";
const wrappedAI = wrapAISDK(
{ wrapLanguageModel, generateObject },
{ project_name: "structured-output" }
);
const { object } = await wrappedAI.generateObject({
model: openai("gpt-4"),
schema: z.object({
name: z.string(),
age: z.number()
}),
prompt: "Generate a user profile"
});/**
* Create LangSmith provider options for Vercel AI SDK
* @param config - Configuration for this specific invocation
* @returns Provider options object
*/
function createLangSmithProviderOptions<T = any>(
config?: Partial<WrapAISDKConfig>
): T;
/**
* Convert Vercel AI SDK message to traced format
* @param message - AI SDK message
* @returns Traced message format
*/
function convertMessageToTracedFormat(message: any): Record<string, any>;import { wrapAISDK, createLangSmithProviderOptions } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText } from "ai";
import { openai } from "@ai-sdk/openai";
const wrappedAI = wrapAISDK(
{ wrapLanguageModel, generateText },
{ project_name: "my-app" }
);
// Override for specific call
const lsConfig = createLangSmithProviderOptions({
name: "special-operation",
metadata: { userId: "user-456" },
tags: ["high-priority"]
});
const result = await wrappedAI.generateText({
model: openai("gpt-4"),
prompt: "Important query",
providerOptions: {
langsmith: lsConfig
}
});import { wrapAISDK, createLangSmithProviderOptions } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText } from "ai";
const wrappedAI = wrapAISDK({ wrapLanguageModel, generateText });
// Redact sensitive data
const lsConfig = createLangSmithProviderOptions({
processInputs: (inputs) => ({
prompt: "[REDACTED]",
model: inputs.model
}),
processOutputs: (outputs) => ({
text: outputs.outputs.text.substring(0, 100) + "..."
})
});
const result = await wrappedAI.generateText({
model: openai("gpt-4"),
prompt: "Sensitive data",
providerOptions: {
langsmith: lsConfig
}
});import { wrapAISDK, createLangSmithProviderOptions } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText } from "ai";
import { openai } from "@ai-sdk/openai";
import { NextRequest, NextResponse } from "next/server";
const wrappedAI = wrapAISDK(
{ wrapLanguageModel, generateText },
{ project_name: "nextjs-api" }
);
export async function POST(req: NextRequest) {
const { prompt, userId } = await req.json();
const lsConfig = createLangSmithProviderOptions({
metadata: {
userId,
endpoint: "/api/generate"
}
});
const result = await wrappedAI.generateText({
model: openai("gpt-4"),
prompt,
providerOptions: {
langsmith: lsConfig
}
});
return NextResponse.json({ text: result.text });
}Compare responses from multiple models.
import { wrapAISDK, createLangSmithProviderOptions } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText } from "ai";
import { openai } from "@ai-sdk/openai";
import { anthropic } from "@ai-sdk/anthropic";
const wrappedAI = wrapAISDK({ wrapLanguageModel, generateText });
const prompt = "What are the key benefits of TypeScript?";
// Compare GPT-4 and Claude
const [gpt4Result, claudeResult] = await Promise.all([
wrappedAI.generateText({
model: openai("gpt-4"),
prompt,
providerOptions: {
langsmith: createLangSmithProviderOptions({
name: "gpt4-comparison",
metadata: { model: "gpt-4", experiment: "model-comparison" }
})
}
}),
wrappedAI.generateText({
model: anthropic("claude-3-opus-20240229"),
prompt,
providerOptions: {
langsmith: createLangSmithProviderOptions({
name: "claude-comparison",
metadata: { model: "claude-3-opus", experiment: "model-comparison" }
})
}
})
]);
console.log("GPT-4:", gpt4Result.text);
console.log("Claude:", claudeResult.text);
// Both traced separately in LangSmith with comparison metadataTrace tool calls and execution.
import { wrapAISDK } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText } from "ai";
import { openai } from "@ai-sdk/openai";
import { z } from "zod";
const wrappedAI = wrapAISDK(
{ wrapLanguageModel, generateText },
{ project_name: "tool-calling" }
);
const result = await wrappedAI.generateText({
model: openai("gpt-4"),
prompt: "What's the weather in San Francisco?",
tools: {
getWeather: {
description: "Get the weather for a location",
parameters: z.object({
location: z.string()
}),
execute: async ({ location }) => {
// Tool execution is also traced
return { temperature: 72, condition: "sunny", location };
}
}
}
});
console.log(result.text);
// Both the main call and tool executions are tracedEnable detailed response metadata in traces.
import { wrapAISDK } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText } from "ai";
import { openai } from "@ai-sdk/openai";
const wrappedAI = wrapAISDK(
{ wrapLanguageModel, generateText },
{
project_name: "detailed-traces",
traceResponseMetadata: true // Include steps and intermediate data
}
);
const result = await wrappedAI.generateText({
model: openai("gpt-4"),
prompt: "Explain the concept of recursion"
});Trace raw HTTP request/response details.
import { wrapAISDK } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText } from "ai";
import { openai } from "@ai-sdk/openai";
const wrappedAI = wrapAISDK(
{ wrapLanguageModel, generateText },
{
project_name: "http-debugging",
traceRawHttp: true // Include raw HTTP details
}
);
const result = await wrappedAI.generateText({
model: openai("gpt-4"),
prompt: "Debug this request"
});Transform child LLM run inputs and outputs for tracing.
import { wrapAISDK } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText } from "ai";
import { openai } from "@ai-sdk/openai";
const wrappedAI = wrapAISDK(
{ wrapLanguageModel, generateText },
{
project_name: "child-processing",
processChildLLMRunInputs: (inputs) => ({
...inputs,
// Transform child inputs
messages: inputs.messages?.map(m => ({
...m,
content: m.content.substring(0, 100) // Truncate
}))
}),
processChildLLMRunOutputs: (outputs) => ({
...outputs,
// Transform child outputs
response: outputs.response?.substring(0, 200) // Truncate
})
}
);Maintain trace context across Next.js server actions.
import { wrapAISDK, createLangSmithProviderOptions } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText } from "ai";
import { openai } from "@ai-sdk/openai";
const wrappedAI = wrapAISDK(
{ wrapLanguageModel, generateText },
{ project_name: "server-actions" }
);
export async function generateResponse(prompt: string, sessionId: string) {
"use server";
const lsConfig = createLangSmithProviderOptions({
metadata: {
sessionId,
action: "generateResponse",
timestamp: new Date().toISOString()
},
tags: ["server-action", "next.js"]
});
const result = await wrappedAI.generateText({
model: openai("gpt-4"),
prompt,
providerOptions: {
langsmith: lsConfig
}
});
return result.text;
}Implement retry patterns while maintaining trace continuity.
import { wrapAISDK, createLangSmithProviderOptions } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText } from "ai";
import { openai } from "@ai-sdk/openai";
const wrappedAI = wrapAISDK(
{ wrapLanguageModel, generateText },
{ project_name: "retry-pattern" }
);
async function generateWithRetry(prompt: string, maxRetries = 3) {
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
const lsConfig = createLangSmithProviderOptions({
metadata: {
attempt,
maxRetries
},
tags: [`attempt-${attempt}`]
});
const result = await wrappedAI.generateText({
model: openai("gpt-4"),
prompt,
providerOptions: {
langsmith: lsConfig
}
});
return result.text;
} catch (error) {
if (attempt === maxRetries) throw error;
console.log(`Attempt ${attempt} failed, retrying...`);
await new Promise(resolve => setTimeout(resolve, 1000 * attempt));
}
}
}// Set common config at wrapper level
const wrappedAI = wrapAISDK(
{ wrapLanguageModel, generateText },
{
project_name: "my-app",
tags: ["production"],
client: customClient
}
);const config = {
processInputs: (inputs) => ({
...inputs,
prompt: inputs.prompt?.includes("password") ? "[REDACTED]" : inputs.prompt
})
};const lsConfig = createLangSmithProviderOptions({
metadata: {
userId: user.id,
feature: "chat",
sessionId: session.id,
timestamp: new Date().toISOString()
}
});const lsConfig = createLangSmithProviderOptions({
name: "user-onboarding-welcome-message",
tags: ["onboarding", "automated"]
});// In development
const devConfig = {
traceResponseMetadata: true,
traceRawHttp: true
};
// In production
const prodConfig = {
traceResponseMetadata: false,
traceRawHttp: false,
processInputs: (inputs) => ({
// Redact sensitive data
...inputs,
prompt: "[REDACTED]"
})
};
const wrappedAI = wrapAISDK(
{ wrapLanguageModel, generateText },
process.env.NODE_ENV === "development" ? devConfig : prodConfig
);import { wrapAISDK } from "langsmith/experimental/vercel";
import { wrapLanguageModel, streamText } from "ai";
const wrappedAI = wrapAISDK(
{ wrapLanguageModel, streamText },
{ project_name: "streaming-errors" }
);
try {
const { textStream } = await wrappedAI.streamText({
model: openai("gpt-4"),
prompt: "Generate content"
});
for await (const chunk of textStream) {
process.stdout.write(chunk);
}
} catch (error) {
console.error("Streaming failed:", error);
// Error is automatically captured in trace
}// Batch requests when possible
const wrappedAI = wrapAISDK(
{ wrapLanguageModel, generateText },
{
project_name: "batch-optimized",
processInputs: (inputs) => {
// Only log essential information
return {
model: inputs.model,
promptLength: inputs.prompt?.length
};
}
}
);
// Use appropriate trace levels
const lsConfig = createLangSmithProviderOptions({
metadata: {
traceLevel: "summary" // vs "detailed"
}
});For advanced use cases where you need fine-grained control, you can use LangSmithMiddleware directly with Vercel AI SDK's wrapLanguageModel.
/**
* Create LangSmith middleware for Vercel AI SDK language models
* @param config - Middleware configuration
* @returns AI SDK middleware instance
*/
function LangSmithMiddleware(config?: {
/** Model display name */
name: string;
/** Model ID */
modelId?: string;
/** LangSmith configuration */
lsConfig?: {
client?: Client;
project_name?: string;
metadata?: KVMap;
tags?: string[];
processInputs?: (inputs: Record<string, unknown>) => Record<string, unknown>;
processOutputs?: (outputs: Record<string, unknown>) => Record<string, unknown> | Promise<Record<string, unknown>>;
traceRawHttp?: boolean;
[key: string]: any;
};
}): LanguageModelV2Middleware;Use LangSmithMiddleware directly when you need to wrap specific models without using wrapAISDK.
import { LangSmithMiddleware } from "langsmith/experimental/vercel";
import { wrapLanguageModel } from "ai";
import { openai } from "@ai-sdk/openai";
// Create middleware
const middleware = LangSmithMiddleware({
name: "gpt-4-custom",
modelId: "gpt-4",
lsConfig: {
project_name: "custom-project",
metadata: { environment: "production" },
tags: ["critical"],
processInputs: (inputs) => ({
...inputs,
apiKey: "[REDACTED]"
})
}
});
// Wrap model directly
const wrappedModel = wrapLanguageModel({
model: openai("gpt-4"),
middleware
});
// Use wrapped model with any AI SDK function
import { generateText } from "ai";
const { text } = await generateText({
model: wrappedModel,
prompt: "What is LangSmith?"
});Use wrapAISDK (recommended):
Use LangSmithMiddleware directly:
You can compose LangSmithMiddleware with other AI SDK middleware:
import { LangSmithMiddleware } from "langsmith/experimental/vercel";
import { wrapLanguageModel } from "ai";
import { openai } from "@ai-sdk/openai";
// Custom retry middleware
const retryMiddleware = {
wrapGenerate: async ({ doGenerate }) => {
let attempts = 0;
while (attempts < 3) {
try {
return await doGenerate();
} catch (error) {
attempts++;
if (attempts === 3) throw error;
}
}
}
};
// Combine middleware
const model = wrapLanguageModel({
model: openai("gpt-4"),
middleware: [
LangSmithMiddleware({
name: "gpt-4-with-retry",
lsConfig: { metadata: { retries: true } }
}),
retryMiddleware
]
});