or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

docs

advanced.mdannotation-queues.mdanonymizer.mdclient-api.mddatasets.mdevaluation.mdfeedback.mdgetting-started.mdindex.mdjest.mdlangchain.mdopentelemetry.mdprompts.mdrun-trees.mdschemas.mdtesting.mdtracing.mdvercel.mdvitest.mdworkflows.mdwrappers.md
tile.json

client-api.mddocs/

Client API Reference

Complete API reference for the LangSmith Client class, providing programmatic access to projects, runs, datasets, feedback, and all LangSmith platform features.

Overview

The Client class is the primary interface for interacting with the LangSmith API. It provides methods for managing projects, creating and querying runs, handling datasets and examples, collecting feedback, and much more.

When to use the Client API:

  • Create and manage projects programmatically
  • Query runs and build custom analytics
  • Programmatically create datasets and examples
  • Collect and analyze feedback
  • Manage prompts and annotation queues
  • Build custom workflows and integrations

Core Import

// Core client import
import { Client } from "langsmith";

// Type imports for configuration
import type {
  ClientConfig,
  TracerSession,
  Run,
  RunCreate,
  Dataset,
  Example,
  Feedback
} from "langsmith";

// Schema type imports
import type {
  RunType,
  KVMap,
  Attachments,
  InvocationParamsSchema
} from "langsmith/schemas";

Client Construction

Constructor

Creates a new LangSmith client instance with optional configuration.

/**
 * Create a new LangSmith client instance
 * @param config - Optional client configuration
 */
class Client {
  constructor(config?: ClientConfig);
}

interface ClientConfig {
  /** API URL for LangSmith (defaults to LANGCHAIN_ENDPOINT env var or https://api.smith.langchain.com) */
  apiUrl?: string;
  /** API key for authentication (defaults to LANGCHAIN_API_KEY env var) */
  apiKey?: string;
  /** Default timeout for requests in milliseconds (default: 120000) */
  timeout_ms?: number;
  /** Web URL for LangSmith UI (optional, used for generating links) */
  webUrl?: string;
  /** Custom fetch implementation for custom HTTP handling */
  fetchImplementation?: typeof fetch;
  /** Enable auto-batching of trace uploads for better performance (default: true) */
  autoBatchTracing?: boolean;
  /** Batch size limit in bytes for trace batching (default: 20MB) */
  batchSizeBytesLimit?: number;
  /** Maximum number of operations to batch in a single request */
  batchSizeLimit?: number;
  /** Maximum total memory (in bytes) for batch queues - defaults to 1GB */
  maxIngestMemoryBytes?: number;
  /** Number of concurrent batch uploads (default: 5) */
  traceBatchConcurrency?: number;
  /** Maximum number of pending runs allowed in the batch queue before blocking (default: 100) */
  pendingAutoBatchedRunLimit?: number;
  /** Block on root run finalization, ensuring trace upload before continuing (default: false) */
  blockOnRootRunFinalization?: boolean;
  /** Hide inputs from traces - boolean for all, or function to transform/filter specific inputs */
  hideInputs?: boolean | ((inputs: KVMap) => KVMap | Promise<KVMap>);
  /** Hide outputs from traces - boolean for all, or function to transform/filter specific outputs */
  hideOutputs?: boolean | ((outputs: KVMap) => KVMap | Promise<KVMap>);
  /** Custom anonymizer function to transform traced data before sending */
  anonymizer?: (values: KVMap) => KVMap | Promise<KVMap>;
  /** Whether to omit runtime information from traced runs (SDK version, platform, etc.) */
  omitTracedRuntimeInfo?: boolean;
  /** Workspace ID - required for org-scoped API keys */
  workspaceId?: string;
  /** Custom fetch options passed to all HTTP requests */
  fetchOptions?: RequestInit;
  /** Require manual .flush() calls before sending traces (useful for rate limit management) */
  manualFlushMode?: boolean;
  /** Sampling rate for tracing (0-1, where 1.0 = 100% of traces sent) */
  tracingSamplingRate?: number;
  /** Enable debug mode - logs all HTTP requests */
  debug?: boolean;
  /** Caller function options for advanced async request handling */
  callerOptions?: object;
  /** Caching configuration - true for defaults, Cache instance for custom, or false/undefined to disable */
  cache?: Cache | boolean;
}

Usage Examples:

import { Client } from "langsmith";

// Use environment variables (LANGCHAIN_API_KEY, LANGCHAIN_ENDPOINT)
const client = new Client();

// Explicit configuration
const client = new Client({
  apiUrl: "https://api.smith.langchain.com",
  apiKey: "your-api-key",
  timeout_ms: 10000,
});

// Custom configuration with batching
const client = new Client({
  apiKey: "your-api-key",
  autoBatchTracing: true,
  batchSizeBytesLimit: 20_000_000,
  hideInputs: false,
  hideOutputs: false,
});

Utility Methods

/**
 * Get default client configuration from environment variables
 * @returns Object containing default apiUrl, apiKey, webUrl, and privacy settings
 */
static getDefaultClientConfig(): {
  apiUrl: string;
  apiKey?: string;
  webUrl?: string;
  hideInputs?: boolean;
  hideOutputs?: boolean;
};

/**
 * Get the host URL for the LangSmith web UI
 * @returns The web UI host URL (e.g., https://smith.langchain.com)
 */
getHostUrl(): string;

Usage Examples:

import { Client } from "langsmith";

// Get default configuration
const defaultConfig = Client.getDefaultClientConfig();
console.log("API URL:", defaultConfig.apiUrl);
console.log("API Key configured:", !!defaultConfig.apiKey);

// Create client and get host URL
const client = new Client();
const webUrl = client.getHostUrl();
console.log("Web UI URL:", webUrl);

// Use for constructing custom URLs
const projectUrl = `${client.getHostUrl()}/projects/my-project`;

Project Management

Project (also known as Session or TracerSession) management methods for organizing your traces and runs.

Create Project

/**
 * Create a new project
 * @param params - Project creation parameters
 * @returns Promise resolving to the created project
 */
createProject(params: CreateProjectParams): Promise<TracerSession>;

interface CreateProjectParams {
  /** Project name (required) */
  projectName: string;
  /** Project description */
  description?: string;
  /** Project metadata */
  metadata?: Record<string, any>;
  /** Whether project is public */
  upsert?: boolean;
  /** Reference dataset ID */
  referenceDatasetId?: string;
}

interface TracerSession {
  /** Project ID */
  id: string;
  /** Project name */
  name: string;
  /** Project description */
  description?: string;
  /** Project metadata */
  metadata?: Record<string, any>;
  /** Creation timestamp */
  created_at: string;
  /** Last update timestamp */
  updated_at?: string;
  /** Tenant ID */
  tenant_id: string;
}

Read Project

/**
 * Read project details
 * @param params - Project identifier (ID or name)
 * @returns Promise resolving to the project details
 */
readProject(params: {
  projectId?: string;
  projectName?: string;
}): Promise<TracerSession>;

List Projects

/**
 * List projects with optional filtering
 * @param params - List parameters
 * @returns Promise resolving to array of projects
 */
listProjects(params?: ListProjectsParams): Promise<TracerSession[]>;

interface ListProjectsParams {
  /** Reference dataset ID filter */
  referenceDatasetId?: string;
  /** Reference dataset name filter */
  referenceDatasetName?: string;
  /** Whether to include reference-free projects */
  referenceFree?: boolean;
  /** Limit number of results */
  limit?: number;
  /** Offset for pagination */
  offset?: number;
}

Update Project

/**
 * Update project details
 * @param projectId - Project ID to update
 * @param params - Update parameters
 * @returns Promise resolving to the updated project
 */
updateProject(
  projectId: string,
  params: UpdateProjectParams
): Promise<TracerSession>;

interface UpdateProjectParams {
  /** New project name */
  name?: string;
  /** New description */
  description?: string;
  /** New metadata */
  metadata?: Record<string, any>;
  /** End time timestamp */
  endTime?: Date | string;
}

Delete Project

/**
 * Delete a project
 * @param params - Project identifier (ID or name)
 * @returns Promise resolving when deletion completes
 */
deleteProject(params: {
  projectId?: string;
  projectName?: string;
}): Promise<void>;

Check Project Existence

/**
 * Check if a project exists
 * @param params - Project identifier (ID or name)
 * @returns Promise resolving to boolean indicating existence
 */
hasProject(params: {
  projectId?: string;
  projectName?: string;
}): Promise<boolean>;

Get Project URL

/**
 * Get the URL for viewing a project in the LangSmith UI
 * @param params - Project identifier
 * @returns Project URL string
 */
getProjectUrl(params: {
  projectId?: string;
  projectName?: string;
}): string;

Project Management Examples:

import { Client } from "langsmith";

const client = new Client();

// Create a new project
const project = await client.createProject({
  projectName: "my-chatbot-v1",
  description: "Production chatbot deployment",
  metadata: { version: "1.0.0", env: "production" },
});

// Read project by name
const project = await client.readProject({
  projectName: "my-chatbot-v1",
});

// List all projects
const projects = await client.listProjects({ limit: 100 });

// Update project
await client.updateProject(project.id, {
  description: "Updated description",
  metadata: { version: "1.0.1" },
});

// Delete project
await client.deleteProject({ projectName: "my-chatbot-v1" });

Run Management

Methods for creating, updating, and managing runs (traces) in LangSmith.

Create Run

/**
 * Create a new run/trace
 * @param run - Run creation data
 * @returns Promise resolving when run is created
 */
createRun(run: RunCreate): Promise<void>;

interface RunCreate {
  /** Run ID (auto-generated if not provided) */
  id?: string;
  /** Run name (required) */
  name: string;
  /** Run type: "llm" | "chain" | "tool" | "retriever" | "embedding" | "prompt" | "parser" */
  run_type: string;
  /** Input data */
  inputs?: Record<string, any>;
  /** Output data */
  outputs?: Record<string, any>;
  /** Start time (auto-set if not provided) */
  start_time?: number;
  /** End time */
  end_time?: number;
  /** Error message if run failed */
  error?: string;
  /** Parent run ID for hierarchical traces */
  parent_run_id?: string;
  /** Project name */
  project_name?: string;
  /** Reference example ID for evaluation */
  reference_example_id?: string;
  /** Serialized representation */
  serialized?: object;
  /** Additional metadata */
  extra?: Record<string, any>;
  /** Tags for categorization */
  tags?: string[];
  /** Trace ID for distributed tracing */
  trace_id?: string;
  /** Dotted order for distributed tracing */
  dotted_order?: string;
  /** Session ID (deprecated, use project_name) */
  session_id?: string;
  /** Session name (deprecated, use project_name) */
  session_name?: string;
  /** Attachments map */
  attachments?: Attachments;
  /** LLM invocation parameters */
  invocation_params?: InvocationParamsSchema;
}

type Attachments = Record<string, {
  /** MIME type */
  mime_type: string;
  /** Base64 encoded data */
  data: string;
}>;

interface InvocationParamsSchema {
  /** Model name */
  model?: string;
  /** Temperature parameter */
  temperature?: number;
  /** Max tokens */
  max_tokens?: number;
  /** Top-p parameter */
  top_p?: number;
  /** Stop sequences */
  stop?: string[];
  /** Additional parameters */
  [key: string]: any;
}

Update Run

/**
 * Update an existing run
 * @param runId - Run ID to update
 * @param update - Update data
 * @returns Promise resolving when update completes
 */
updateRun(runId: string, update: RunUpdate): Promise<void>;

interface RunUpdate {
  /** End time */
  end_time?: number;
  /** Error message */
  error?: string;
  /** Input data */
  inputs?: Record<string, any>;
  /** Output data */
  outputs?: Record<string, any>;
  /** Additional metadata */
  extra?: Record<string, any>;
  /** Tags */
  tags?: string[];
  /** Events list */
  events?: Array<{
    name: string;
    time: number;
    kwargs?: Record<string, any>;
  }>;
  /** Attachments */
  attachments?: Attachments;
  /** Session ID (deprecated) */
  session_id?: string;
  /** Dotted order */
  dotted_order?: string;
  /** Trace ID */
  trace_id?: string;
}

Read Run

/**
 * Read run details
 * @param runId - Run ID to read
 * @param options - Read options
 * @returns Promise resolving to the run details
 */
readRun(runId: string, options?: ReadRunOptions): Promise<Run>;

interface ReadRunOptions {
  /** Load child runs recursively */
  loadChildRuns?: boolean;
}

interface Run extends RunCreate {
  /** Run ID */
  id: string;
  /** Status */
  status?: string;
  /** Child runs (if loadChildRuns is true) */
  child_runs?: Run[];
  /** Feedback summary */
  feedback_stats?: Record<string, any>;
  /** Application path */
  app_path?: string;
  /** Manifest ID */
  manifest_id?: string;
  /** Total tokens */
  total_tokens?: number;
  /** Prompt tokens */
  prompt_tokens?: number;
  /** Completion tokens */
  completion_tokens?: number;
  /** Total cost */
  total_cost?: number;
  /** Prompt cost */
  prompt_cost?: number;
  /** Completion cost */
  completion_cost?: number;
  /** First token time */
  first_token_time?: number;
}

List Runs

/**
 * List runs with filtering and pagination
 * @param params - List parameters
 * @returns Async iterable of runs
 */
listRuns(params?: ListRunsParams): AsyncIterable<Run>;

interface ListRunsParams {
  /** Filter by project ID (can be single ID or array of IDs) */
  projectId?: string | string[];
  /** Filter by project name (can be single name or array of names) */
  projectName?: string | string[];
  /** Filter by run type */
  runType?: string;
  /** Filter by reference example ID */
  referenceExampleId?: string;
  /** Filter by trace ID */
  traceId?: string;
  /** Filter by parent run ID (null for root runs only) */
  parentRunId?: string | null;
  /** Filter by execution order */
  executionOrder?: number;
  /** Filter by start time range */
  startTime?: Date;
  /** Filter by end time range */
  endTime?: Date;
  /** Filter by error presence */
  error?: boolean;
  /** Filter by run IDs */
  id?: string[];
  /** Limit number of results */
  limit?: number;
  /** Sort order by run start date */
  order?: "asc" | "desc";
  /** Full-text search query across run names and metadata */
  query?: string;
  /** Select specific fields to include in response (e.g., ["id", "name", "start_time"]) */
  select?: string[];
  /**
   * Filter query string using field comparators for complex filtering
   * Supported comparators: gte (>=), gt (>), lte (<=), lt (<), eq (=), neq (!=), has (contains), search (text search)
   * Examples:
   *   - 'eq(status, "success")' - exact match
   *   - 'gte(start_time, "2024-01-01")' - greater than or equal
   *   - 'and(eq(error, null), gte(latency, 1000))' - combine conditions
   *   - 'has(tags, "production")' - array contains
   *   - 'search(name, "chatbot")' - text search in field
   */
  filter?: string;
  /**
   * Filter to apply to the ROOT run in a trace tree
   * Uses same syntax as filter parameter
   * Example: 'eq(name, "main-chain")' to find traces where the root run is named "main-chain"
   */
  traceFilter?: string;
  /**
   * Filter to apply to OTHER runs in trace tree (non-root runs: siblings and children)
   * Uses same syntax as filter parameter
   * Example: 'eq(run_type, "llm")' to find traces containing at least one LLM run
   */
  treeFilter?: string;
  /** Only include root runs (runs without a parent_run_id) */
  isRoot?: boolean;
}

Advanced Filtering Examples:

import { Client } from "langsmith";

const client = new Client();

// Example 1: Find successful runs with high latency
for await (const run of client.listRuns({
  projectName: "my-project",
  filter: 'and(eq(error, null), gte(latency, 1000))',
})) {
  console.log(`Slow run: ${run.name}, latency: ${run.latency}ms`);
}

// Example 2: Find traces where the root run is a specific chain
// and it contains at least one LLM call
for await (const run of client.listRuns({
  projectName: "my-project",
  traceFilter: 'eq(name, "rag-pipeline")',
  treeFilter: 'eq(run_type, "llm")',
})) {
  console.log(`RAG pipeline trace: ${run.id}`);
}

// Example 3: Find runs with specific tags
for await (const run of client.listRuns({
  projectName: "my-project",
  filter: 'has(tags, "production")',
  startTime: new Date("2024-01-01"),
})) {
  console.log(`Production run: ${run.name}`);
}

// Example 4: Complex filtering with multiple conditions
for await (const run of client.listRuns({
  projectName: "my-project",
  filter: 'and(eq(run_type, "chain"), or(gte(total_tokens, 10000), gte(latency, 5000)))',
  order: "desc",
  limit: 100,
})) {
  console.log(`High-cost chain: ${run.name}`);
}

// Example 5: Text search in run names
for await (const run of client.listRuns({
  projectName: "my-project",
  filter: 'search(name, "customer-support")',
})) {
  console.log(`Customer support run: ${run.name}`);
}

// Example 6: Find failed runs in a date range
for await (const run of client.listRuns({
  projectName: "my-project",
  filter: 'neq(error, null)',
  startTime: new Date("2024-01-01"),
  endTime: new Date("2024-01-31"),
  order: "desc",
})) {
  console.log(`Failed run: ${run.name}, error: ${run.error}`);
}

Share Run

/**
 * Share a run publicly
 * @param runId - Run ID to share
 * @returns Promise resolving to share URL
 */
shareRun(runId: string): Promise<string>;

/**
 * Unshare a previously shared run
 * @param runId - Run ID to unshare
 * @returns Promise resolving when unshare completes
 */
unshareRun(runId: string): Promise<void>;

/**
 * Read the shared link information for a run
 * @param runId - Run ID
 * @returns Promise resolving to shared link details
 */
readRunSharedLink(runId: string): Promise<string>;

/**
 * List all publicly shared runs
 * @param params - Optional filtering parameters
 * @returns Async iterable of shared runs
 */
listSharedRuns(params?: { shareToken?: string; limit?: number }): AsyncIterable<Run>;

Get Run URL

/**
 * Get URL for viewing a run in the LangSmith UI
 * @param params - Run identifier and optional project context
 * @returns Run URL string
 */
getRunUrl(params: {
  runId: string;
  run?: Run;
  projectId?: string;
  projectName?: string;
}): string;

Batch Operations

/**
 * Batch ingest multiple runs
 * @param runs - Array of run creation/update operations
 * @returns Promise resolving when batch ingestion completes
 */
batchIngestRuns(runs: {
  post?: RunCreate[];
  patch?: RunUpdate[];
}): Promise<void>;

/**
 * Multipart ingest for large run batches
 * @param runs - Array of run creation/update operations
 * @returns Promise resolving when multipart ingestion completes
 */
multipartIngestRuns(runs: {
  post?: RunCreate[];
  patch?: RunUpdate[];
}): Promise<void>;

Group Runs

/**
 * List runs grouped by a specific field (e.g., conversation_id)
 * @param params - Group parameters
 * @returns Async iterable of grouped run summaries
 */
listGroupRuns(params: GroupRunsParams): AsyncIterable<Thread>;

interface GroupRunsParams {
  /** Project ID filter */
  projectId?: string;
  /** Project name filter */
  projectName?: string;
  /** Field to group by */
  groupBy: string;
  /** Filter query */
  filter?: string;
  /** Start time filter */
  startTime?: Date;
  /** End time filter */
  endTime?: Date;
  /** Limit results */
  limit?: number;
  /** Offset for pagination */
  offset?: number;
}

interface Thread {
  /** Group key */
  group_key: string;
  /** Number of runs in group */
  count: number;
  /** Total tokens */
  total_tokens: number;
  /** Total cost */
  total_cost: number | null;
  /** Min start time */
  min_start_time: string;
  /** Max start time */
  max_start_time: string;
  /** P50 latency */
  latency_p50: number;
  /** P99 latency */
  latency_p99: number;
  /** Feedback stats */
  feedback_stats: any | null;
  /** Filter string */
  filter: string;
  /** First inputs */
  first_inputs: string;
  /** Last outputs */
  last_outputs: string;
  /** Last error */
  last_error: string | null;
}

Get Run Stats

/**
 * Get statistics for runs in a project
 * @param params - Stats parameters
 * @returns Promise resolving to run statistics
 */
getRunStats(params: {
  projectId?: string;
  projectName?: string;
  filter?: string;
  startTime?: Date;
  endTime?: Date;
}): Promise<Record<string, any>>;

Run Management Examples:

import { Client } from "langsmith";

const client = new Client();

// Create a root run
const runId = "550e8400-e29b-41d4-a716-446655440000";
await client.createRun({
  id: runId,
  name: "ChatBot",
  run_type: "chain",
  inputs: { question: "What is LangSmith?" },
  start_time: Date.now(),
  project_name: "my-chatbot-v1",
  tags: ["production", "chatbot"],
  extra: { userId: "user-123" },
});

// Create a child run
const childRunId = "660e8400-e29b-41d4-a716-446655440000";
await client.createRun({
  id: childRunId,
  name: "OpenAI Chat",
  run_type: "llm",
  inputs: { messages: [{ role: "user", content: "What is LangSmith?" }] },
  start_time: Date.now(),
  parent_run_id: runId,
  project_name: "my-chatbot-v1",
  invocation_params: {
    model: "gpt-4",
    temperature: 0.7,
    max_tokens: 1000,
  },
});

// Update run with outputs
await client.updateRun(childRunId, {
  end_time: Date.now(),
  outputs: { content: "LangSmith is a platform..." },
});

// Update parent run
await client.updateRun(runId, {
  end_time: Date.now(),
  outputs: { answer: "LangSmith is a platform..." },
});

// Read run with child runs
const run = await client.readRun(runId, { loadChildRuns: true });
console.log(run.child_runs);

// List runs for a project
const runs = client.listRuns({
  projectName: "my-chatbot-v1",
  limit: 100,
  isRoot: true,
  order: "desc",
});

for await (const run of runs) {
  console.log(run.name, run.status);
}

// List runs with error filter
const errorRuns = client.listRuns({
  projectName: "my-chatbot-v1",
  error: true,
  tags: ["production"],
});

// Ensure all traces are uploaded before shutdown
await client.awaitPendingTraceBatches();

Dataset Management

Methods for creating and managing datasets for evaluation.

Create Dataset

/**
 * Create a new dataset
 * @param params - Dataset creation parameters
 * @returns Promise resolving to the created dataset
 */
createDataset(params: CreateDatasetParams): Promise<Dataset>;

interface CreateDatasetParams {
  /** Dataset name (required) */
  datasetName: string;
  /** Dataset description */
  description?: string;
  /** Data type: "kv" | "llm" | "chat" */
  dataType?: "kv" | "llm" | "chat";
  /** Dataset metadata */
  metadata?: Record<string, any>;
}

interface Dataset {
  /** Dataset ID */
  id: string;
  /** Dataset name */
  name: string;
  /** Description */
  description?: string;
  /** Data type */
  data_type?: "kv" | "llm" | "chat";
  /** Tenant ID */
  tenant_id: string;
  /** Creation timestamp */
  created_at: string;
  /** Metadata */
  metadata?: Record<string, any>;
  /** Example count */
  example_count?: number;
}

Read Dataset

/**
 * Read dataset details
 * @param params - Dataset identifier (ID or name)
 * @returns Promise resolving to the dataset
 */
readDataset(params: {
  datasetId?: string;
  datasetName?: string;
}): Promise<Dataset>;

List Datasets

/**
 * List datasets
 * @param params - List parameters
 * @returns Promise resolving to array of datasets
 */
listDatasets(params?: ListDatasetsParams): Promise<Dataset[]>;

interface ListDatasetsParams {
  /** Limit number of results */
  limit?: number;
  /** Offset for pagination */
  offset?: number;
  /** Filter by data type */
  dataType?: "kv" | "llm" | "chat";
}

Update Dataset

/**
 * Update dataset details
 * @param projectId - Project ID to update
 * @param params - Update parameters
 * @returns Promise resolving to the updated project
 */
updateDataset(props: {
  datasetId?: string;
  datasetName?: string;
  name?: string;
  description?: string;
}): Promise<Dataset>;

Delete Dataset

/**
 * Delete a dataset
 * @param params - Dataset identifier (ID or name)
 * @returns Promise resolving when deletion completes
 */
deleteDataset(params: {
  datasetId?: string;
  datasetName?: string;
}): Promise<void>;

/**
 * Check if a dataset exists
 * @param params - Dataset identifier (ID or name)
 * @returns Promise resolving to true if dataset exists, false otherwise
 */
hasDataset(params: {
  datasetId?: string;
  datasetName?: string;
}): Promise<boolean>;

Dataset Sharing

/**
 * Read dataset in OpenAI fine-tuning format
 * @param params - Dataset identifier (ID or name)
 * @returns Promise resolving to array of OpenAI-format examples
 */
readDatasetOpenaiFinetuning(params: {
  datasetId?: string;
  datasetName?: string;
}): Promise<unknown[]>;

/**
 * Share a dataset publicly
 * @param datasetId - Dataset ID to share
 * @param shareId - Optional custom share ID
 * @returns Promise resolving to dataset share schema with share token
 */
shareDataset(datasetId: string, shareId?: string): Promise<DatasetShareSchema>;

/**
 * Unshare a previously shared dataset
 * @param datasetId - Dataset ID to unshare
 * @returns Promise resolving when unshare completes
 */
unshareDataset(datasetId: string): Promise<void>;

/**
 * Read a publicly shared dataset using share token
 * @param shareToken - Public share token
 * @returns Promise resolving to the dataset
 */
readSharedDataset(shareToken: string): Promise<Dataset>;

/**
 * Read the dataset share schema (sharing configuration)
 * @param datasetId - Dataset ID
 * @returns Promise resolving to share schema or null if not shared
 */
readDatasetSharedSchema(datasetId?: string): Promise<DatasetShareSchema | null>;

/**
 * List examples from a publicly shared dataset
 * @param shareToken - Public share token
 * @param options - Optional filtering options
 * @returns Async iterable of examples
 */
listSharedExamples(
  shareToken: string,
  options?: { exampleIds?: string[] }
): AsyncIterable<Example>;

Dataset Operations

/**
 * Index a dataset for similarity search
 * @param params - Dataset identifier and optional tag
 * @returns Promise resolving when indexing completes
 */
indexDataset(params: {
  datasetId?: string;
  datasetName?: string;
  tag?: string;
}): Promise<void>;

/**
 * Find similar examples in a dataset using vector similarity search
 * @param inputs - Input data to find similar examples for
 * @param datasetId - Dataset ID to search
 * @param limit - Maximum number of similar examples to return
 * @param filter - Optional filter criteria
 * @returns Promise resolving to array of similar examples
 */
similarExamples(
  inputs: Record<string, any>,
  datasetId: string,
  limit?: number,
  filter?: string | Record<string, any>
): Promise<Example[]>;

/**
 * Compare two versions of a dataset
 * @param params - Dataset identifier and version specifications
 * @returns Promise resolving to dataset diff information
 */
diffDatasetVersions(params: {
  datasetId?: string;
  datasetName?: string;
  fromVersion: string;
  toVersion: string;
}): Promise<DatasetDiffInfo>;

/**
 * Update a dataset tag to point to a specific version
 * @param params - Dataset identifier, tag name, and version
 * @returns Promise resolving when tag update completes
 */
updateDatasetTag(params: {
  datasetId: string;
  tag: string;
  asOf: Date | string;
}): Promise<void>;

/**
 * Read a specific version of a dataset
 * @param params - Dataset identifier and version timestamp
 * @returns Promise resolving to dataset version information
 */
readDatasetVersion(params: {
  datasetId?: string;
  datasetName?: string;
  asOf: Date | string;
}): Promise<DatasetVersion>;

/**
 * List all splits in a dataset
 * @param params - Dataset identifier and optional version
 * @returns Promise resolving to array of split names
 */
listDatasetSplits(params: {
  datasetId?: string;
  datasetName?: string;
  asOf?: Date | string;
}): Promise<string[]>;

/**
 * Update dataset splits by adding or removing examples
 * @param params - Dataset identifier, split name, example IDs, and operation
 * @returns Promise resolving when split update completes
 */
updateDatasetSplits(params: {
  datasetId?: string;
  datasetName?: string;
  splitName: string;
  exampleIds: string[];
  remove?: boolean;
}): Promise<void>;

/**
 * Upload CSV file as a dataset
 * @param params - CSV upload parameters
 * @returns Promise resolving to the created dataset
 */
uploadCsv(params: {
  csvFile: Blob | string;
  fileName: string;
  inputKeys: string[];
  outputKeys: string[];
  datasetName?: string;
  datasetId?: string;
  description?: string;
  dataType?: "kv" | "llm" | "chat";
}): Promise<Dataset>;

/**
 * Get the URL for viewing a dataset in the LangSmith UI
 * @param params - Dataset identifier
 * @returns Promise resolving to the dataset URL
 */
getDatasetUrl(params: {
  datasetId: string;
  datasetName?: string;
}): Promise<string>;

/**
 * Clone a publicly shared dataset into your workspace
 * @param tokenOrUrl - Share token or full share URL of the public dataset
 * @param options - Clone options
 * @returns Promise resolving when clone completes
 */
clonePublicDataset(
  tokenOrUrl: string,
  options?: {
    sourceApiUrl?: string;
    datasetName?: string;
  }
): Promise<void>;

Dataset Management Examples:

import { Client } from "langsmith";

const client = new Client();

// Create a dataset
const dataset = await client.createDataset({
  datasetName: "chatbot-evaluation-v1",
  description: "QA pairs for chatbot evaluation",
  dataType: "chat",
  metadata: { version: "1.0" },
});

// Read dataset by name
const dataset = await client.readDataset({
  datasetName: "chatbot-evaluation-v1",
});

// List all datasets
const datasets = await client.listDatasets({ limit: 50 });

// Check if dataset exists
const exists = await client.hasDataset({
  datasetName: "chatbot-evaluation-v1",
});

// Update dataset properties
const updatedDataset = await client.updateDataset({
  datasetName: "chatbot-evaluation-v1",
  description: "Updated description",
});

// Delete dataset
await client.deleteDataset({
  datasetName: "chatbot-evaluation-v1",
});

Example Management

Methods for managing examples within datasets.

Create Example

/**
 * Create a single example
 * @param example - Example creation data
 * @returns Promise resolving to the created example
 */
createExample(example: ExampleCreate): Promise<Example>;

interface ExampleCreate {
  /** Dataset ID (required) */
  dataset_id: string;
  /** Input data (required) */
  inputs: Record<string, any>;
  /** Expected output data */
  outputs?: Record<string, any>;
  /** Example metadata */
  metadata?: Record<string, any>;
  /** Example ID (auto-generated if not provided) */
  id?: string;
  /** Source run ID */
  source_run_id?: string;
}

interface Example extends ExampleCreate {
  /** Example ID */
  id: string;
  /** Creation timestamp */
  created_at: string;
  /** Modification timestamp */
  modified_at?: string;
  /** Run count */
  runs?: any[];
}

Create Examples (Bulk)

/**
 * Create multiple examples in bulk
 * @param params - Bulk creation parameters
 * @returns Promise resolving to array of created examples
 */
createExamples(params: CreateExamplesParams): Promise<Example[]>;

interface CreateExamplesParams {
  /** Dataset ID or name */
  datasetId?: string;
  datasetName?: string;
  /** Array of examples to create */
  examples: Array<{
    inputs: Record<string, any>;
    outputs?: Record<string, any>;
    metadata?: Record<string, any>;
    id?: string;
    source_run_id?: string;
  }>;
}

/**
 * Create an LLM-format example (single text completion)
 * @param input - Input string
 * @param generation - Output/completion string
 * @param options - Example creation options
 * @returns Promise resolving to the created example
 */
createLLMExample(
  input: string,
  generation: string | undefined,
  options: CreateExampleOptions
): Promise<Example>;

/**
 * Create a chat-format example (multi-turn conversations)
 * @param input - Array of input messages
 * @param generations - Output message or messages
 * @param options - Example creation options
 * @returns Promise resolving to the created example
 */
createChatExample(
  input: KVMap[],
  generations: KVMap | undefined,
  options: CreateExampleOptions
): Promise<Example>;

interface CreateExampleOptions {
  /** Dataset ID to create the example in */
  datasetId?: string;
  /** Dataset name to create the example in (if dataset ID not provided) */
  datasetName?: string;
  /** Creation date of the example */
  createdAt?: Date;
  /** Unique identifier for the example */
  exampleId?: string;
  /** Additional metadata */
  metadata?: KVMap;
  /** Split(s) to assign the example to */
  split?: string | string[];
  /** Source run ID associated with this example */
  sourceRunId?: string;
  /** Whether to use inputs and outputs from the source run */
  useSourceRunIO?: boolean;
  /** Which attachments from the source run to use */
  useSourceRunAttachments?: string[];
  /** Attachments for the example */
  attachments?: Attachments;
}

Update Example

/**
 * Update an example
 * @param example - Example update data
 * @returns Promise resolving to the updated example
 */
updateExample(example: ExampleUpdate): Promise<Example>;

interface ExampleUpdate {
  /** Example ID (required) */
  example_id: string;
  /** Updated inputs */
  inputs?: Record<string, any>;
  /** Updated outputs */
  outputs?: Record<string, any>;
  /** Updated metadata */
  metadata?: Record<string, any>;
  /** Dataset ID */
  dataset_id?: string;
  /** Source run ID */
  source_run_id?: string;
}

Read Example

/**
 * Read example details
 * @param exampleId - Example ID to read
 * @returns Promise resolving to the example
 */
readExample(exampleId: string): Promise<Example>;

List Examples

/**
 * List examples with filtering
 * @param params - List parameters
 * @returns Async iterable of examples
 */
listExamples(params?: ListExamplesParams): AsyncIterable<Example>;

interface ListExamplesParams {
  /** Filter by dataset ID */
  datasetId?: string;
  /** Filter by dataset name */
  datasetName?: string;
  /** Filter by example IDs */
  exampleIds?: string[];
  /** Limit number of results */
  limit?: number;
  /** Offset for pagination */
  offset?: number;
  /** Include run information */
  asOf?: Date | string;
  /** Metadata filter */
  metadata?: Record<string, any>;
}

Delete Examples

/**
 * Delete an example
 * @param exampleId - Example ID to delete
 * @returns Promise resolving when deletion completes
 */
deleteExample(exampleId: string): Promise<void>;

/**
 * Delete multiple examples at once
 * @param exampleIds - Array of example IDs to delete
 * @param options - Optional deletion settings
 * @returns Promise resolving when deletion completes
 */
deleteExamples(
  exampleIds: string[],
  options?: { hardDelete?: boolean }
): Promise<void>;

Multipart Upload/Update

/**
 * Upload multiple examples using multipart API (supports attachments)
 * @param datasetId - Dataset ID
 * @param uploads - Array of examples to upload
 * @returns Promise resolving to upload response with example IDs
 */
uploadExamplesMultipart(
  datasetId: string,
  uploads: ExampleCreate[]
): Promise<UploadExamplesResponse>;

interface UploadExamplesResponse {
  /** Array of created example IDs */
  example_ids: string[];
  /** Number of examples uploaded */
  count: number;
}

/**
 * Update multiple examples using multipart API (supports attachments)
 * @param datasetId - Dataset ID
 * @param updates - Array of example updates
 * @returns Promise resolving to update response with example IDs
 */
updateExamplesMultipart(
  datasetId: string,
  updates: ExampleUpdate[]
): Promise<UpdateExamplesResponse>;

interface UpdateExamplesResponse {
  /** Array of updated example IDs */
  example_ids: string[];
  /** Number of examples updated */
  count: number;
}

Example Management Examples:

import { Client } from "langsmith";

const client = new Client();

// Create a single example
const example = await client.createExample({
  dataset_id: dataset.id,
  inputs: {
    messages: [{ role: "user", content: "What is LangSmith?" }],
  },
  outputs: {
    content: "LangSmith is a platform for LLM observability...",
  },
  metadata: { category: "product-info" },
});

// Create multiple examples in bulk
const examples = await client.createExamples({
  datasetName: "chatbot-evaluation-v1",
  examples: [
    {
      inputs: { question: "What is LangSmith?" },
      outputs: { answer: "LangSmith is a platform..." },
    },
    {
      inputs: { question: "How do I get started?" },
      outputs: { answer: "To get started, install the package..." },
    },
  ],
});

// Update an example
await client.updateExample({
  example_id: example.id,
  outputs: { content: "Updated answer..." },
  metadata: { reviewed: true },
});

// Read an example
const example = await client.readExample(exampleId);

// List examples from a dataset
const examples = client.listExamples({
  datasetName: "chatbot-evaluation-v1",
  limit: 100,
});

for await (const example of examples) {
  console.log(example.inputs, example.outputs);
}

// Delete an example
await client.deleteExample(exampleId);

Feedback Management

Methods for creating and managing feedback on runs.

Create Feedback

/**
 * Create feedback for a run
 * @param params - Feedback creation parameters
 * @returns Promise resolving to the created feedback
 */
createFeedback(params: FeedbackCreate): Promise<Feedback>;

interface FeedbackCreate {
  /** Run ID to attach feedback to (required) */
  run_id: string;
  /** Feedback key/name (required) */
  key: string;
  /** Score value (number or boolean) */
  score?: number | boolean | null;
  /** Value (can be any type) */
  value?: number | boolean | string | object | null;
  /** Comment */
  comment?: string;
  /** Correction data */
  correction?: object;
  /** Feedback ID (auto-generated if not provided) */
  id?: string;
  /** Feedback source type */
  feedbackSourceType?: "api" | "model" | "app";
  /** Additional feedback source metadata */
  feedbackSource?: FeedbackSourceBase;
  /** Feedback config */
  feedbackConfig?: FeedbackConfig;
}

interface Feedback extends FeedbackCreate {
  /** Feedback ID */
  id: string;
  /** Creation timestamp */
  created_at: string;
  /** Modification timestamp */
  modified_at: string;
  /** Project ID */
  session_id?: string;
}

interface FeedbackSourceBase {
  /** Source type */
  type?: string;
  /** Source metadata */
  metadata?: Record<string, any>;
}

interface FeedbackConfig {
  /** Feedback type */
  type?: string;
  /** Min value for numeric feedback */
  min?: number;
  /** Max value for numeric feedback */
  max?: number;
  /** Categories for categorical feedback */
  categories?: FeedbackCategory[];
}

interface FeedbackCategory {
  /** Category value */
  value: number | string;
  /** Category label */
  label?: string;
}

Update Feedback

/**
 * Update feedback
 * @param feedbackId - Feedback ID to update
 * @param params - Update parameters
 * @returns Promise resolving to the updated feedback
 */
updateFeedback(
  feedbackId: string,
  params: FeedbackUpdate
): Promise<Feedback>;

interface FeedbackUpdate {
  /** Updated score */
  score?: number | boolean | null;
  /** Updated value */
  value?: number | boolean | string | object | null;
  /** Updated comment */
  comment?: string;
  /** Updated correction */
  correction?: object;
}

Read Feedback

/**
 * Read feedback details
 * @param feedbackId - Feedback ID to read
 * @returns Promise resolving to the feedback
 */
readFeedback(feedbackId: string): Promise<Feedback>;

List Feedback

/**
 * List feedback with filtering
 * @param params - List parameters
 * @returns Async iterable of feedback
 */
listFeedback(params?: ListFeedbackParams): AsyncIterable<Feedback>;

interface ListFeedbackParams {
  /** Filter by run IDs */
  runIds?: string[];
  /** Filter by feedback keys */
  feedbackKeys?: string[];
  /** Filter by feedback source types */
  feedbackSourceTypes?: string[];
  /** Limit number of results */
  limit?: number;
  /** Offset for pagination */
  offset?: number;
}

Delete Feedback

/**
 * Delete feedback
 * @param feedbackId - Feedback ID to delete
 * @returns Promise resolving when deletion completes
 */
deleteFeedback(feedbackId: string): Promise<void>;

Feedback Management Examples:

import { Client } from "langsmith";

const client = new Client();

// Create numeric feedback
const feedback = await client.createFeedback({
  run_id: runId,
  key: "accuracy",
  score: 0.95,
  comment: "Excellent response quality",
});

// Create boolean feedback
await client.createFeedback({
  run_id: runId,
  key: "correctness",
  score: true,
  feedbackSourceType: "api",
});

// Create categorical feedback
await client.createFeedback({
  run_id: runId,
  key: "sentiment",
  value: "positive",
  comment: "User seemed satisfied",
});

// Create feedback with correction
await client.createFeedback({
  run_id: runId,
  key: "correction",
  score: 0,
  correction: {
    outputs: { answer: "The correct answer is..." },
  },
});

// Update feedback
await client.updateFeedback(feedback.id, {
  score: 0.98,
  comment: "Updated after review",
});

// Read feedback
const feedback = await client.readFeedback(feedbackId);

// List feedback for specific runs
const feedbacks = client.listFeedback({
  runIds: [runId],
  feedbackKeys: ["accuracy", "correctness"],
});

for await (const feedback of feedbacks) {
  console.log(feedback.key, feedback.score);
}

// Delete feedback
await client.deleteFeedback(feedbackId);

Presigned Feedback Tokens

Methods for creating and managing presigned tokens that allow external systems to submit feedback without API keys.

Create Presigned Token

/**
 * Create a presigned feedback token for a run
 * @param runId - Run ID to create token for
 * @param feedbackKey - Feedback key this token is valid for
 * @param options - Optional token configuration
 * @returns Promise resolving to feedback ingest token
 */
createPresignedFeedbackToken(
  runId: string,
  feedbackKey: string,
  options?: {
    expiration?: string | Date;
    feedbackConfig?: FeedbackConfig;
  }
): Promise<FeedbackIngestToken>;

List Presigned Tokens

/**
 * List all presigned feedback tokens for a run
 * @param runId - Run ID to list tokens for
 * @returns Async iterable of feedback ingest tokens
 */
listPresignedFeedbackTokens(runId: string): AsyncIterable<FeedbackIngestToken>;
interface FeedbackIngestToken {
  /** Token ID */
  id: string;
  /** Presigned URL for submitting feedback */
  url: string;
  /** Token string */
  token: string;
  /** Run ID this token is for */
  run_id: string;
  /** Feedback key */
  feedback_key: string;
  /** Token expiration timestamp */
  expires_at?: string;
  /** Feedback configuration */
  feedback_config?: FeedbackConfig;
}

Presigned Token Examples:

import { Client } from "langsmith";

const client = new Client();

// Create a presigned token for user feedback
const token = await client.createPresignedFeedbackToken(runId, "user_rating", {
  expiration: new Date(Date.now() + 7 * 24 * 60 * 60 * 1000), // 7 days
  feedbackConfig: {
    type: "continuous",
    min: 1,
    max: 5,
  },
});

// Share the presigned URL with users
console.log("Feedback URL:", token.url);
// Users can POST feedback to this URL without auth

// List all tokens for a run
for await (const token of client.listPresignedFeedbackTokens(runId)) {
  console.log(`Token for ${token.feedback_key}: ${token.url}`);
}

Evaluation Methods

Methods for logging evaluation feedback.

/**
 * Log evaluation feedback from evaluator response
 * @param evaluatorResponse - Response from evaluator
 * @param run - Optional run information
 * @param sourceInfo - Optional source metadata
 * @returns Promise resolving to array of results and streaming evaluator
 */
logEvaluationFeedback(
  evaluatorResponse: EvaluationResult | EvaluationResults,
  run?: Run,
  sourceInfo?: Record<string, any>
): Promise<[any[], any]>;

Usage Examples:

import { Client } from "langsmith";
import { evaluate } from "langsmith/evaluation";

const client = new Client();

// Define an evaluator
const accuracyEvaluator = async ({ run, example }) => {
  const isCorrect = run.outputs?.answer === example?.outputs?.answer;
  return {
    key: "accuracy",
    score: isCorrect ? 1 : 0,
    comment: isCorrect ? "Correct" : "Incorrect",
  };
};

// Run evaluation on a dataset
const results = await evaluate(myTarget, {
  data: "my-dataset",
  evaluators: [accuracyEvaluator],
  client,
});

Comparative Experiments

Methods for creating and managing comparative experiments for A/B testing different model configurations.

/**
 * Create a comparative experiment
 * @param params - Experiment parameters
 * @returns Promise resolving to comparative experiment object
 */
createComparativeExperiment(params: {
  name: string;
  experimentIds: string[];
  referenceDatasetId?: string;
  description?: string;
  metadata?: Record<string, any>;
}): Promise<ComparativeExperiment>;

interface ComparativeExperiment {
  /** Experiment ID */
  id: string;
  /** Experiment name */
  name: string;
  /** Description */
  description?: string;
  /** IDs of experiments being compared */
  experiment_ids: string[];
  /** Reference dataset ID */
  reference_dataset_id?: string;
  /** Creation timestamp */
  created_at: string;
  /** Modification timestamp */
  modified_at?: string;
  /** Metadata */
  metadata?: Record<string, any>;
}

Usage Examples:

import { Client, evaluate } from "langsmith";

const client = new Client();

// Run two experiments with different configurations
const experiment1 = await evaluate(model1, {
  data: "test-dataset",
  experimentPrefix: "gpt4-config",
});

const experiment2 = await evaluate(model2, {
  data: "test-dataset",
  experimentPrefix: "claude-config",
});

// Create comparative experiment
const comparison = await client.createComparativeExperiment({
  name: "Model Comparison: GPT-4 vs Claude",
  experimentIds: [experiment1.experimentId, experiment2.experimentId],
  description: "Comparing GPT-4 and Claude on customer support dataset",
  metadata: {
    use_case: "customer_support",
    date: new Date().toISOString(),
  },
});

console.log("Comparison created:", comparison.id);

Prompt Management

The Client class provides methods for managing prompts, prompt commits, and versioning. For detailed documentation of all prompt-related methods, see Prompts:

  • createPrompt() - Create a new prompt
  • updatePrompt() - Update an existing prompt
  • deletePrompt() - Delete a prompt
  • pushPrompt() - Push a new prompt commit/version
  • pullPromptCommit() - Pull a specific prompt commit
  • listPrompts() - List all prompts
  • listCommits() - List commits for a prompt
  • getPrompt() - Get prompt details
  • promptExists() - Check if a prompt exists
  • createCommit() - Create a prompt commit
  • likePrompt() - Like a prompt
  • unlikePrompt() - Unlike a prompt

Annotation Queue Management

The Client class provides methods for managing annotation queues for human review workflows. For detailed documentation of all annotation queue methods, see Advanced Topics:

  • createAnnotationQueue() - Create a new annotation queue
  • updateAnnotationQueue() - Update an annotation queue
  • deleteAnnotationQueue() - Delete an annotation queue
  • readAnnotationQueue() - Read annotation queue details
  • listAnnotationQueues() - List all annotation queues
  • addRunsToAnnotationQueue() - Add runs to a queue for review
  • getRunFromAnnotationQueue() - Get a specific run from a queue
  • deleteRunFromAnnotationQueue() - Remove a run from a queue
  • getSizeFromAnnotationQueue() - Get the size of an annotation queue

Batch and Performance

Await Pending Batches

/**
 * Wait for all pending trace batches to flush
 * @returns Promise resolving when all batches are flushed
 */
awaitPendingTraceBatches(): Promise<void>;

Manual Flush

/**
 * Manually flush pending trace batches (when manualFlushMode is enabled)
 * @returns Promise resolving when flush completes
 */
flush(): Promise<void>;

Usage Examples:

import { Client } from "langsmith";

const client = new Client({ autoBatchTracing: true });

// ... create many runs ...

// Before shutting down or finishing critical operations,
// ensure all traces are uploaded
await client.awaitPendingTraceBatches();

console.log("All traces have been uploaded successfully");

Resource Management

Cache Access

/**
 * Access the prompt cache instance
 * @returns The Cache instance if configured, undefined otherwise
 */
get cache(): Cache | undefined;

Cleanup

/**
 * Cleanup resources held by the client
 * Stops background cache refresh timers and other resources
 * Call this when you're done using the client
 */
cleanup(): void;

Usage Examples:

import { Client } from "langsmith";

const client = new Client();

// Access the cache
const cache = client.cache;
if (cache) {
  console.log("Cache available for prompt caching");
}

// Cleanup when done
client.cleanup();

// Application shutdown example
process.on('SIGTERM', () => {
  client.cleanup();
  process.exit(0);
});

Best Practices

Authentication

Always use environment variables for API credentials:

// Preferred: Use environment variables
// Set LANGCHAIN_API_KEY and LANGCHAIN_ENDPOINT in your environment
const client = new Client();

// Alternative: Explicit configuration (avoid hardcoding keys)
const client = new Client({
  apiKey: process.env.LANGCHAIN_API_KEY,
  apiUrl: process.env.LANGCHAIN_ENDPOINT,
});

Batching and Performance

Enable auto-batching for better performance with high-volume tracing:

const client = new Client({
  autoBatchTracing: true,
  batchSizeBytesLimit: 20_000_000,
  pendingAutoBatchedRunLimit: 100,
});

// Always await pending batches before shutdown
process.on("SIGINT", async () => {
  await client.awaitPendingTraceBatches();
  process.exit(0);
});

Error Handling

Implement proper error handling for API calls:

import { Client } from "langsmith";

const client = new Client();

// Basic error handling
try {
  const run = await client.readRun(runId);
  console.log(run);
} catch (error) {
  if (error.status === 404) {
    console.error("Run not found");
  } else if (error.status === 401) {
    console.error("Authentication failed");
  } else if (error.status === 429) {
    console.error("Rate limit exceeded");
  } else {
    console.error("API error:", error.message);
  }
}

// Advanced: Retry logic with exponential backoff
async function readRunWithRetry(
  client: Client,
  runId: string,
  maxRetries = 3
): Promise<Run> {
  for (let i = 0; i < maxRetries; i++) {
    try {
      return await client.readRun(runId);
    } catch (error) {
      // Don't retry on 404 or 401
      if (error.status === 404 || error.status === 401) {
        throw error;
      }

      // Retry on rate limits and transient errors
      if (i < maxRetries - 1 && (error.status === 429 || error.status >= 500)) {
        const delay = Math.pow(2, i) * 1000; // Exponential backoff
        console.log(`Retrying after ${delay}ms...`);
        await new Promise(resolve => setTimeout(resolve, delay));
        continue;
      }

      throw error;
    }
  }
  throw new Error("Max retries exceeded");
}

// Edge case: Handling async iterables with errors
async function safeListRuns(client: Client, projectName: string) {
  try {
    for await (const run of client.listRuns({ projectName })) {
      try {
        // Process each run safely
        await processRun(run);
      } catch (error) {
        // Handle per-run errors without stopping iteration
        console.error(`Error processing run ${run.id}:`, error.message);
      }
    }
  } catch (error) {
    // Handle iteration-level errors
    if (error.status === 404) {
      console.error("Project not found");
    } else {
      console.error("Failed to list runs:", error.message);
    }
  }
}

// Edge case: Graceful degradation when LangSmith is unavailable
async function traceWithFallback(operation: () => Promise<any>) {
  const client = new Client();

  try {
    // Try to trace
    await client.createRun({
      name: "operation",
      run_type: "chain",
      inputs: {},
      start_time: Date.now(),
    });

    const result = await operation();
    return result;
  } catch (error) {
    // If tracing fails, log but continue operation
    console.warn("Tracing unavailable, continuing without tracing:", error.message);
    return await operation();
  }
}

// Edge case: Handling pagination limits
async function getAllRuns(
  client: Client,
  projectName: string,
  maxRuns = 10000
): Promise<Run[]> {
  const runs: Run[] = [];

  try {
    for await (const run of client.listRuns({ projectName, limit: maxRuns })) {
      runs.push(run);

      // Safety limit to prevent memory issues
      if (runs.length >= maxRuns) {
        console.warn(`Reached maximum run limit of ${maxRuns}`);
        break;
      }
    }
  } catch (error) {
    console.error("Error fetching runs:", error.message);
    // Return partial results
  }

  return runs;
}

Privacy Controls

Use privacy controls to hide sensitive data:

const client = new Client({
  hideInputs: true,  // Hide inputs from all traces
  hideOutputs: true, // Hide outputs from all traces
});

Resource Cleanup

Always clean up resources and ensure traces are uploaded:

async function main() {
  const client = new Client();

  try {
    // ... your application code ...
  } finally {
    // Ensure all traces are uploaded before exit
    await client.awaitPendingTraceBatches();
  }
}

Configuration Types

Complete type definitions for client configuration and run management.

Client Configuration

Complete reference for all client configuration options. See the Constructor section above for usage examples.

interface ClientConfig {
  /** API URL for LangSmith (default: LANGCHAIN_ENDPOINT env var or https://api.smith.langchain.com) */
  apiUrl?: string;
  /** API key for authentication (default: LANGCHAIN_API_KEY env var) */
  apiKey?: string;
  /** Default timeout for requests in milliseconds (default: 120000) */
  timeout_ms?: number;
  /** Web URL for LangSmith UI (default: derived from apiUrl or https://smith.langchain.com) */
  webUrl?: string;
  /** Custom fetch implementation for custom HTTP handling (default: global fetch) */
  fetchImplementation?: typeof fetch;
  /** Enable auto-batching of trace uploads for better performance (default: true) */
  autoBatchTracing?: boolean;
  /** Batch size limit in bytes for trace batching (default: 20971520 / 20MB) */
  batchSizeBytesLimit?: number;
  /** Maximum number of operations to batch in a single request (default: undefined / no limit) */
  batchSizeLimit?: number;
  /** Maximum total memory in bytes for batch queues (default: 1073741824 / 1GB) */
  maxIngestMemoryBytes?: number;
  /** Number of concurrent batch uploads (default: 5) */
  traceBatchConcurrency?: number;
  /** Maximum number of pending runs allowed in the batch queue before blocking (default: 100) */
  pendingAutoBatchedRunLimit?: number;
  /** Block on root run finalization, ensuring trace upload before continuing (default: false) */
  blockOnRootRunFinalization?: boolean;
  /** Hide inputs from traces - boolean for all, or function to transform/filter specific inputs (default: false) */
  hideInputs?: boolean | ((inputs: KVMap) => KVMap | Promise<KVMap>);
  /** Hide outputs from traces - boolean for all, or function to transform/filter specific outputs (default: false) */
  hideOutputs?: boolean | ((outputs: KVMap) => KVMap | Promise<KVMap>);
  /** Custom anonymizer function to transform traced data before sending (default: undefined) */
  anonymizer?: (values: KVMap) => KVMap | Promise<KVMap>;
  /** Whether to omit runtime information from traced runs like SDK version and platform (default: false) */
  omitTracedRuntimeInfo?: boolean;
  /** Workspace ID - required for org-scoped API keys (default: undefined) */
  workspaceId?: string;
  /** Custom fetch options passed to all HTTP requests (default: undefined) */
  fetchOptions?: RequestInit;
  /** Require manual .flush() calls before sending traces, useful for rate limit management (default: false) */
  manualFlushMode?: boolean;
  /** Sampling rate for tracing (0-1, where 1.0 = 100% of traces sent) (default: 1.0) */
  tracingSamplingRate?: number;
  /** Enable debug mode - logs all HTTP requests to console (default: false) */
  debug?: boolean;
  /** Caller function options for advanced async request handling (default: undefined) */
  callerOptions?: object;
  /** Caching configuration - true for defaults, Cache instance for custom, false/undefined to disable (default: false) */
  cache?: Cache | boolean;
}

Type Imports:

import type {
  ClientConfig,
  KVMap,
  Cache
} from "langsmith";

Usage Metadata Types

/** Usage metadata for token tracking */
interface UsageMetadata {
  /** Total tokens used */
  total_tokens?: number;
  /** Input/prompt tokens */
  prompt_tokens?: number;
  /** Output/completion tokens */
  completion_tokens?: number;
  /** Input token details */
  input_token_details?: InputTokenDetails;
  /** Output token details */
  output_token_details?: OutputTokenDetails;
}

interface InputTokenDetails {
  /** Cached tokens */
  cached?: number;
  /** Audio tokens */
  audio?: number;
}

interface OutputTokenDetails {
  /** Reasoning tokens */
  reasoning?: number;
  /** Audio tokens */
  audio?: number;
}

Project Configuration Types

Types for project/session management.

/** Project/Session result with statistics */
interface TracerSessionResult extends TracerSession {
  /** Number of runs in project */
  run_count?: number;
  /** Latency statistics */
  latency_p50?: number;
  latency_p99?: number;
  /** Total tokens used */
  total_tokens?: number;
  /** Prompt tokens used */
  prompt_tokens?: number;
  /** Completion tokens used */
  completion_tokens?: number;
  /** First token latency */
  first_token_p50?: number;
  first_token_p99?: number;
  /** Error rate */
  error_rate?: number;
  /** Feedback statistics */
  feedback_stats?: Record<string, {
    count: number;
    avg?: number;
  }>;
  /** Last run timestamp */
  last_run_start_time?: string;
}

Run Configuration Types

Types and interfaces related to run creation and management.

/**
 * Run type enumeration - categorizes the type of operation being traced
 * Use appropriate run types to enable better filtering and analytics in LangSmith UI
 */
type RunType =
  | "llm"        // Direct language model API call (e.g., OpenAI, Anthropic completion)
  | "chain"      // Sequence of multiple operations or high-level workflow
  | "tool"       // Individual tool or function execution (e.g., calculator, API call)
  | "retriever"  // Document or data retrieval operation (e.g., vector store search)
  | "embedding"  // Text embedding generation (e.g., converting text to vectors)
  | "prompt"     // Prompt formatting or templating operation
  | "parser";    // Output parsing or structured extraction from LLM responses

/**
 * Key-value map type for flexible data structures
 * Used throughout the SDK for inputs, outputs, metadata, and other flexible data
 */
type KVMap = Record<string, any>;

/**
 * Attachments type for file/binary data in runs
 * Each attachment is identified by a key and includes MIME type and base64-encoded data
 * Useful for storing images, PDFs, or other binary data alongside traces
 */
type Attachments = Record<string, {
  /** MIME type of the attachment (e.g., "image/png", "application/pdf") */
  mime_type: string;
  /** Base64 encoded binary data */
  data: string;
}>;

/**
 * LLM invocation parameters - captures model configuration used for a run
 * Helps track and reproduce LLM calls with specific settings
 * Provider-specific fields can be added as additional properties
 */
interface InvocationParamsSchema {
  /** Model identifier (e.g., "gpt-4", "claude-3-opus-20240229") */
  model?: string;
  /** Temperature for randomness (typically 0.0-2.0, where 0 is deterministic) */
  temperature?: number;
  /** Maximum tokens to generate in response */
  max_tokens?: number;
  /** Top-p nucleus sampling parameter (0.0-1.0) */
  top_p?: number;
  /** Top-k sampling parameter (number of top tokens to consider) */
  top_k?: number;
  /** Stop sequences that halt generation when encountered */
  stop?: string[];
  /** Presence penalty to reduce repetition (-2.0 to 2.0) */
  presence_penalty?: number;
  /** Frequency penalty to reduce repetition (-2.0 to 2.0) */
  frequency_penalty?: number;
  /** Token-level bias adjustments (token ID -> bias value) */
  logit_bias?: Record<string, number>;
  /** Additional provider-specific parameters */
  [key: string]: any;
}

Type Imports:

import type {
  RunType,
  KVMap,
  Attachments,
  InvocationParamsSchema,
  RunCreate,
  Run
} from "langsmith/schemas";

Related Documentation