or run

tessl search
Log in

Version

Workspace
tessl
Visibility
Public
Created
Last updated
Describes
npmpkg:npm/langsmith@0.4.x

docs

index.md
tile.json

tessl/npm-langsmith

tessl install tessl/npm-langsmith@0.4.3

TypeScript client SDK for the LangSmith LLM tracing, evaluation, and monitoring platform.

quick-reference.mddocs/guides/

Quick Reference

Common patterns, code snippets, and essential APIs for quick lookup.

Essential Imports

// Core
import { Client } from "langsmith";
import { traceable } from "langsmith/traceable";
import { RunTree } from "langsmith";

// Evaluation
import { evaluate } from "langsmith/evaluation";

// Wrappers
import { wrapOpenAI } from "langsmith/wrappers/openai";
import { wrapAnthropic } from "langsmith/wrappers/anthropic";
import { wrapAISDK } from "langsmith/experimental/vercel";

// LangChain
import { getLangchainCallbacks, RunnableTraceable } from "langsmith/langchain";

// Testing
import { test, expect, wrapEvaluator } from "langsmith/jest";
import { test, expect, wrapEvaluator } from "langsmith/vitest";

// Utilities
import { createAnonymizer } from "langsmith/anonymizer";
import { uuid7, uuid7FromTime, getDefaultProjectName } from "langsmith";

Environment Variables

# Required
LANGCHAIN_API_KEY=lsv2_pt_...        # Your API key

# Optional
LANGCHAIN_PROJECT=my-project         # Default project name
LANGCHAIN_ENDPOINT=https://...       # API endpoint
LANGCHAIN_TRACING=true               # Enable/disable tracing

Client Setup

import { Client } from "langsmith";

// Use environment variables
const client = new Client();

// Explicit configuration
const client = new Client({
  apiUrl: "https://api.smith.langchain.com",
  apiKey: process.env.LANGCHAIN_API_KEY,
  timeout_ms: 10000,
});

// Production configuration
const client = new Client({
  autoBatchTracing: true,
  tracingSamplingRate: 0.1,  // 10% sampling
  hideInputs: (inputs) => redactPII(inputs),
});

Tracing Patterns

Basic Traceable

import { traceable } from "langsmith/traceable";

const myFunction = traceable(
  async (input: string) => {
    return `Processed: ${input}`;
  },
  { name: "my-function", run_type: "chain" }
);

await myFunction("test");

Nested Tracing

const retrieve = traceable(
  async (query: string) => await vectorDB.search(query),
  { name: "retrieve", run_type: "retriever" }
);

const generate = traceable(
  async (query: string, docs: string[]) => await llm.generate({ query, context: docs.join("\n") }),
  { name: "generate", run_type: "llm" }
);

const ragPipeline = traceable(
  async (query: string) => {
    const docs = await retrieve(query);
    const answer = await generate(query, docs);
    return answer;
  },
  { name: "rag-pipeline", run_type: "chain" }
);

Access Current Run

import { traceable, getCurrentRunTree } from "langsmith/traceable";

const myFunction = traceable(async (input: string) => {
  const runTree = getCurrentRunTree();

  // Add metadata dynamically
  runTree.metadata = { ...runTree.metadata, processed: true };

  return result;
}, { name: "my-function" });

Manual Run Trees

import { RunTree } from "langsmith";

const parentRun = new RunTree({
  name: "parent-operation",
  run_type: "chain",
  inputs: { query: "What is AI?" },
});

const llmRun = parentRun.createChild({
  name: "llm-call",
  run_type: "llm",
});

await llmRun.end({ response: "AI is..." });
await llmRun.postRun();

await parentRun.end({ result: "Complete" });
await parentRun.postRun();

Evaluation Patterns

Basic Evaluation

import { evaluate } from "langsmith/evaluation";
import { Client } from "langsmith";

const client = new Client();

// Create dataset
const dataset = await client.createDataset({
  datasetName: "qa-eval",
  description: "QA evaluation dataset"
});

await client.createExamples({
  datasetId: dataset.id,
  inputs: [{ question: "What is 2+2?" }],
  outputs: [{ answer: "4" }]
});

// Define target function
async function myBot(input: { question: string }) {
  return { answer: await generateAnswer(input.question) };
}

// Run evaluation
const results = await evaluate(myBot, {
  data: "qa-eval",
  evaluators: [
    ({ run, example }) => ({
      key: "correctness",
      score: run.outputs?.answer === example?.outputs?.answer ? 1 : 0
    })
  ]
});

Custom Evaluators

// Simple correctness
const correctnessEvaluator = ({ run, example }) => ({
  key: "correctness",
  score: run.outputs?.answer === example?.outputs?.answer ? 1 : 0
});

// With LLM judge
const qualityEvaluator = async ({ run, example }) => {
  const judgment = await llmJudge(run.outputs, example.outputs);
  return {
    key: "quality",
    score: judgment.score,
    comment: judgment.reasoning
  };
};

// Latency check
const latencyEvaluator = ({ run }) => {
  const latency = (run.end_time || 0) - (run.start_time || 0);
  return {
    key: "latency",
    score: latency < 1000 ? 1 : 0,
    value: latency,
    comment: `${latency}ms`
  };
};

SDK Wrappers

OpenAI

import { wrapOpenAI } from "langsmith/wrappers/openai";
import OpenAI from "openai";

const openai = wrapOpenAI(new OpenAI(), {
  projectName: "openai-project"
});

const response = await openai.chat.completions.create({
  model: "gpt-4",
  messages: [{ role: "user", content: "Hello!" }]
});

Anthropic

import { wrapAnthropic } from "langsmith/wrappers/anthropic";
import Anthropic from "@anthropic-ai/sdk";

const anthropic = wrapAnthropic(new Anthropic(), {
  project_name: "anthropic-project"
});

const message = await anthropic.messages.create({
  model: "claude-sonnet-4-20250514",
  max_tokens: 1024,
  messages: [{ role: "user", content: "Hello!" }]
});

Vercel AI SDK

import { wrapAISDK } from "langsmith/experimental/vercel";
import { wrapLanguageModel, generateText } from "ai";
import { openai } from "@ai-sdk/openai";

const wrappedAI = wrapAISDK(
  { wrapLanguageModel, generateText },
  { project_name: "vercel-app" }
);

const { text } = await wrappedAI.generateText({
  model: openai("gpt-4"),
  prompt: "Hello!"
});

Dataset Management

Create Dataset

import { Client } from "langsmith";

const client = new Client();

const dataset = await client.createDataset({
  datasetName: "my-dataset",
  description: "Test dataset",
  dataType: "kv"
});

Add Examples

// Single example
await client.createExample({
  dataset_id: dataset.id,
  inputs: { question: "What is 2+2?" },
  outputs: { answer: "4" }
});

// Bulk examples
await client.createExamples({
  datasetName: "my-dataset",
  inputs: [
    { question: "What is 2+2?" },
    { question: "What is 3+3?" }
  ],
  outputs: [
    { answer: "4" },
    { answer: "6" }
  ]
});

List Examples

for await (const example of client.listExamples({
  datasetName: "my-dataset",
  limit: 100
})) {
  console.log(example.inputs, example.outputs);
}

Feedback Collection

Create Feedback

import { Client } from "langsmith";

const client = new Client();

// Thumbs up/down
await client.createFeedback(runId, "user_rating", {
  score: 1,  // 1 = thumbs up, 0 = thumbs down,
  comment: "Great response!",
});

// Numeric score
await client.createFeedback(runId, "accuracy", {
  score: 0.95,
  comment: "Highly accurate",
});

// With correction
await client.createFeedback(runId, "correctness", {
  score: 0,
  correction: { answer: "Correct answer" },
});

Presigned Feedback Tokens

const token = await client.createPresignedFeedbackToken({
  run_id: runId,
  feedback_key: "user_rating",
  expires_in: 86400  // 24 hours
});

// Share token.url with users
// They can POST feedback without API key

Run Queries

List Runs

import { Client } from "langsmith";

const client = new Client();

// Basic listing
for await (const run of client.listRuns({
  projectName: "my-project",
  limit: 100
})) {
  console.log(run.name, run.status);
}

// With filters
for await (const run of client.listRuns({
  projectName: "my-project",
  filter: 'and(eq(error, null), gte(latency, 1000))',
  isRoot: true
})) {
  console.log(`Slow run: ${run.name}`);
}

// Root runs only
for await (const run of client.listRuns({
  projectName: "my-project",
  isRoot: true
})) {
  console.log(`Root: ${run.name}`);
}

Read Run

// Read run with children
const run = await client.readRun(runId, { loadChildRuns: true });
console.log(run.child_runs);

// Get run URL
const url = client.getRunUrl({
  runId: runId,
  projectName: "my-project"
});

Prompt Management

Create and Push

import { Client } from "langsmith";

const client = new Client();

// Create prompt
await client.createPrompt("my-prompt", {
  description: "Customer greeting prompt",
  tags: ["customer-service"]
});

// Push version
await client.pushPrompt("my-prompt", {
  object: {
    type: "chat",
    messages: [
      { role: "system", content: "You are helpful." },
      { role: "user", content: "{query}" }
    ]
  },
  description: "Initial version"
});

Pull and Use

const prompt = await client.pullPrompt({
  promptName: "my-prompt"
});

// Use prompt content
const messages = prompt.content.messages.map(msg => ({
  role: msg.role,
  content: msg.content.replace("{query}", userQuery)
}));

Privacy and Security

Hide Inputs/Outputs

import { Client } from "langsmith";

const client = new Client({
  hideInputs: true,
  hideOutputs: true
});

// Or selective
const client = new Client({
  hideInputs: (inputs) => {
    const { apiKey, password, ...safe } = inputs;
    return safe;
  }
});

Data Anonymization

import { traceable } from "langsmith/traceable";
import { createAnonymizer } from "langsmith/anonymizer";

const anonymizer = createAnonymizer([
  { pattern: /\b[\w\.-]+@[\w\.-]+\.\w+\b/g, replace: "[EMAIL]" },
  { pattern: /\bsk-[a-zA-Z0-9]{32,}\b/g, replace: "[API_KEY]" },
  { pattern: /\b\d{3}-\d{2}-\d{4}\b/g, replace: "[SSN]" }
]);

const privateFunction = traceable(
  async (input: string) => processData(input),
  {
    name: "private-function",
    processInputs: anonymizer,
    processOutputs: anonymizer
  }
);

Testing Integration

Jest

import { test, expect, wrapEvaluator } from "langsmith/jest";

test(
  "greeting generation",
  {
    input: { name: "Alice" },
    expected: { greeting: "Hello, Alice!" }
  },
  async (input) => {
    return { greeting: `Hello, ${input.name}!` };
  }
);

// Custom evaluator
const lengthEvaluator = wrapEvaluator((input, output, expected) => ({
  key: "length",
  score: output.length >= 10 ? 1 : 0
}));

test(
  "with evaluator",
  {
    input: "test",
    evaluators: [lengthEvaluator]
  },
  async (input) => {
    const result = await process(input);
    expect(result).evaluatedBy(lengthEvaluator);
    return result;
  }
);

Vitest

import { test, expect } from "langsmith/vitest";

// Identical API to Jest
test(
  "translation test",
  {
    input: { text: "Hello", lang: "es" },
    expected: { translation: "Hola" }
  },
  async (input) => {
    return await translate(input.text, input.lang);
  }
);

// Custom matchers
expect(output).toBeSemanticCloseTo("Expected meaning", {
  threshold: 0.85
});

expect(output).toBeRelativeCloseTo("Expected text", {
  threshold: 0.8
});

Vitest configuration:

// vitest.config.ts
import { defineConfig } from "vitest/config";

export default defineConfig({
  test: {
    reporters: ["default", "langsmith/vitest/reporter"]
  }
});

LangChain Integration

import { traceable } from "langsmith/traceable";
import { getLangchainCallbacks } from "langsmith/langchain";
import { ChatOpenAI } from "@langchain/openai";

const analyzeText = traceable(async (text: string) => {
  const callbacks = getLangchainCallbacks();

  const model = new ChatOpenAI();
  const result = await model.invoke(text, { callbacks });

  return result;
}, { name: "analyze-text" });

Utility Functions

UUID Generation

import { uuid7, uuid7FromTime } from "langsmith";

// Generate UUID v7
const runId = uuid7();

// From timestamp
const timestampId = uuid7FromTime(Date.now());
const dateId = uuid7FromTime("2024-01-01T00:00:00Z");

Get Default Project

import { getDefaultProjectName } from "langsmith";

const projectName = getDefaultProjectName();
console.log("Using project:", projectName);

Override Fetch

import { overrideFetchImplementation } from "langsmith";

const customFetch = (url: string, init?: RequestInit) => {
  console.log("Fetching:", url);
  return fetch(url, init);
};

overrideFetchImplementation(customFetch);

Best Practices

For Development

  1. Use traceable() decorator for automatic tracing
  2. Add descriptive name and appropriate run_type
  3. Include relevant metadata and tags
  4. Test with real data to ensure traces capture expected information

For Production

  1. Set tracingSamplingRate to control trace volume
  2. Use hideInputs/hideOutputs for sensitive data
  3. Call await client.awaitPendingTraceBatches() before shutdown
  4. Monitor feedback and error rates

For Evaluation

  1. Create versioned datasets for reproducible testing
  2. Use multiple evaluators to measure different aspects
  3. Run comparative evaluations when comparing models
  4. Store evaluation results for historical comparison

For Privacy

  1. Use processInputs/processOutputs to redact sensitive data
  2. Configure hideInputs: true for client-level hiding
  3. Use createAnonymizer() for pattern-based PII removal
  4. Review traces before sharing publicly

Troubleshooting

Traces Not Appearing

  • Verify LANGCHAIN_API_KEY is set correctly
  • Check project name matches in environment and code
  • Call await client.awaitPendingTraceBatches() before app shutdown
  • Ensure network connectivity to api.smith.langchain.com

Import Errors

// Correct - use subpath exports
import { traceable } from "langsmith/traceable";
import { evaluate } from "langsmith/evaluation";

// Incorrect - won't work
import { traceable } from "langsmith";

Missing Types

// Import types from langsmith/schemas
import type { Run, Example, Feedback } from "langsmith/schemas";

Run Types Reference

type RunType =
  | "llm"        // Direct language model API call
  | "chain"      // Sequence of operations
  | "tool"       // Tool/function execution
  | "retriever"  // Document retrieval
  | "embedding"  // Embedding generation
  | "prompt"     // Prompt formatting
  | "parser";    // Output parsing

Key Configuration Interfaces

Client Configuration

interface ClientConfig {
  apiUrl?: string;
  apiKey?: string;
  timeout_ms?: number;
  autoBatchTracing?: boolean;
  hideInputs?: boolean | ((inputs: KVMap) => KVMap);
  hideOutputs?: boolean | ((outputs: KVMap) => KVMap);
  tracingSamplingRate?: number;
}

Traceable Configuration

interface TraceableConfig {
  name?: string;
  run_type?: string;
  metadata?: Record<string, any>;
  tags?: string[];
  client?: Client;
  project_name?: string;
  processInputs?: (inputs: any) => KVMap;
  processOutputs?: (outputs: any) => KVMap;
}

Evaluation Options

interface EvaluateOptions {
  data: string | Example[];
  evaluators: EvaluatorT[];
  summary_evaluators?: SummaryEvaluatorT[];
  experiment_name?: string;
  max_concurrency?: number;
  metadata?: Record<string, any>;
}

Related Documentation

  • Setup Guide - Installation and configuration
  • Tracing Guide - Comprehensive tracing documentation
  • Evaluation Guide - Evaluation framework
  • Core Concepts - Understanding key concepts
  • Client API - Complete API reference