or run

tessl search
Log in

Version

Workspace
tessl
Visibility
Public
Created
Last updated
Describes
npmpkg:npm/langsmith@0.4.x

docs

index.md
tile.json

tessl/npm-langsmith

tessl install tessl/npm-langsmith@0.4.3

TypeScript client SDK for the LangSmith LLM tracing, evaluation, and monitoring platform.

runs.mddocs/api/

Run Management API

Methods for creating, updating, querying, and managing runs (traces).

Overview

Runs represent individual traces of function executions, LLM calls, or operations. The Run API provides methods for creating runs manually, querying existing runs, and managing run lifecycle.

Create Run

/**
 * Create a new run/trace
 * @param run - Run creation data
 * @returns Promise resolving when run is created
 */
createRun(run: RunCreate): Promise<void>;

interface RunCreate {
  /** Run ID (auto-generated if not provided) */
  id?: string;
  /** Run name (required) */
  name: string;
  /** Run type (required) */
  run_type: string;
  /** Input data */
  inputs?: Record<string, any>;
  /** Output data */
  outputs?: Record<string, any>;
  /** Start time (auto-set if not provided) */
  start_time?: number;
  /** End time */
  end_time?: number;
  /** Error message if failed */
  error?: string;
  /** Parent run ID for hierarchical traces */
  parent_run_id?: string;
  /** Project name */
  project_name?: string;
  /** Reference example ID for evaluation */
  reference_example_id?: string;
  /** Additional metadata */
  extra?: Record<string, any>;
  /** Tags */
  tags?: string[];
  /** Trace ID */
  trace_id?: string;
  /** Attachments */
  attachments?: Attachments;
  /** LLM invocation parameters */
  invocation_params?: InvocationParamsSchema;
}

Usage Examples

import { Client } from "langsmith";

const client = new Client();

// Create root run
await client.createRun({
  id: runId,
  name: "ChatBot",
  run_type: "chain",
  inputs: { question: "What is LangSmith?" },
  start_time: Date.now(),
  project_name: "my-chatbot",
  tags: ["production"]
});

// Create child run
await client.createRun({
  id: childRunId,
  name: "OpenAI Chat",
  run_type: "llm",
  inputs: { messages: [...] },
  parent_run_id: runId,
  project_name: "my-chatbot"
});

Update Run

/**
 * Update an existing run
 * @param runId - Run ID to update
 * @param update - Update data
 * @returns Promise resolving when update completes
 */
updateRun(runId: string, update: RunUpdate): Promise<void>;

interface RunUpdate {
  /** End time */
  end_time?: number;
  /** Error message */
  error?: string;
  /** Input data */
  inputs?: Record<string, any>;
  /** Output data */
  outputs?: Record<string, any>;
  /** Additional metadata */
  extra?: Record<string, any>;
  /** Tags */
  tags?: string[];
  /** Events */
  events?: Array<{
    name: string;
    time: number;
    kwargs?: Record<string, any>;
  }>;
}

Usage Examples

// Update with outputs
await client.updateRun(runId, {
  end_time: Date.now(),
  outputs: { answer: "LangSmith is a platform..." }
});

// Add error
await client.updateRun(runId, {
  end_time: Date.now(),
  error: "API call failed"
});

Read Run

/**
 * Read run details
 * @param runId - Run ID to read
 * @param options - Read options
 * @returns Promise resolving to run details
 */
readRun(runId: string, options?: ReadRunOptions): Promise<Run>;

interface ReadRunOptions {
  /** Load child runs recursively */
  loadChildRuns?: boolean;
}

Usage Examples

// Read basic run
const run = await client.readRun(runId);

// Read with children
const run = await client.readRun(runId, { loadChildRuns: true });
console.log(run.child_runs);

List Runs

/**
 * List runs with filtering and pagination
 * @param params - List parameters
 * @returns Async iterable of runs
 */
listRuns(params?: ListRunsParams): AsyncIterable<Run>;

interface ListRunsParams {
  /** Filter by project ID or name */
  projectId?: string | string[];
  projectName?: string | string[];
  /** Filter by run type */
  runType?: string;
  /** Filter by reference example ID */
  referenceExampleId?: string;
  /** Filter by trace ID */
  traceId?: string;
  /** Filter by parent run ID */
  parentRunId?: string | null;
  /** Filter by execution order */
  executionOrder?: number;
  /** Filter by start time */
  startTime?: Date;
  /** Filter by end time */
  endTime?: Date;
  /** Filter by error presence */
  error?: boolean;
  /** Filter by run IDs */
  id?: string[];
  /** Limit results */
  limit?: number;
  /** Sort order */
  order?: "asc" | "desc";
  /** Full-text search */
  query?: string;
  /** Select specific fields to include in response */
  select?: string[];
  /**
   * Advanced filter query using field comparators
   * Supported comparators: gte (>=), gt (>), lte (<=), lt (<), eq (=), neq (!=), has (contains), search (text search)
   * Examples:
   *   - 'eq(status, "success")' - exact match
   *   - 'gte(start_time, "2024-01-01")' - greater than or equal
   *   - 'and(eq(error, null), gte(latency, 1000))' - combine conditions
   *   - 'has(tags, "production")' - array contains
   *   - 'search(name, "chatbot")' - text search in field
   */
  filter?: string;
  /**
   * Filter to apply to the ROOT run in a trace tree
   * Uses same syntax as filter parameter
   * Example: 'eq(name, "main-chain")' to find traces where the root run is named "main-chain"
   */
  traceFilter?: string;
  /**
   * Filter to apply to OTHER runs in trace tree (non-root runs)
   * Uses same syntax as filter parameter
   * Example: 'eq(run_type, "llm")' to find traces containing at least one LLM run
   */
  treeFilter?: string;
  /** Only root runs */
  isRoot?: boolean;
}

Usage Examples

// List all runs in project
for await (const run of client.listRuns({
  projectName: "my-project",
  limit: 100
})) {
  console.log(run.name);
}

// Root runs only
for await (const run of client.listRuns({
  projectName: "my-project",
  isRoot: true
})) {
  console.log(run.name);
}

// With error filter
for await (const run of client.listRuns({
  projectName: "my-project",
  error: true
})) {
  console.log(`Failed: ${run.name}`);
}

// Advanced filtering
for await (const run of client.listRuns({
  projectName: "my-project",
  filter: 'and(eq(error, null), gte(latency, 1000))'
})) {
  console.log(`Slow run: ${run.name}`);
}

Advanced Filtering Examples

The filter, traceFilter, and treeFilter parameters support complex filtering using field comparators.

Supported comparators:

  • gte (>=) - greater than or equal
  • gt (>) - greater than
  • lte (<=) - less than or equal
  • lt (<) - less than
  • eq (=) - equal
  • neq (!=) - not equal
  • has - array contains
  • search - text search

Filter Examples

import { Client } from "langsmith";

const client = new Client();

// Example 1: Find successful runs with high latency
for await (const run of client.listRuns({
  projectName: "my-project",
  filter: 'and(eq(error, null), gte(latency, 1000))',
})) {
  console.log(`Slow run: ${run.name}, latency: ${run.latency}ms`);
}

// Example 2: Find traces where the root run is a specific chain
// and it contains at least one LLM call
for await (const run of client.listRuns({
  projectName: "my-project",
  traceFilter: 'eq(name, "rag-pipeline")',
  treeFilter: 'eq(run_type, "llm")',
})) {
  console.log(`RAG pipeline trace: ${run.id}`);
}

// Example 3: Find runs with specific tags
for await (const run of client.listRuns({
  projectName: "my-project",
  filter: 'has(tags, "production")',
  startTime: new Date("2024-01-01"),
})) {
  console.log(`Production run: ${run.name}`);
}

// Example 4: Complex filtering with multiple conditions
for await (const run of client.listRuns({
  projectName: "my-project",
  filter: 'and(eq(run_type, "chain"), or(gte(total_tokens, 10000), gte(latency, 5000)))',
  order: "desc",
  limit: 100,
})) {
  console.log(`High-cost chain: ${run.name}`);
}

// Example 5: Text search in run names
for await (const run of client.listRuns({
  projectName: "my-project",
  filter: 'search(name, "customer-support")',
})) {
  console.log(`Customer support run: ${run.name}`);
}

// Example 6: Find failed runs in a date range
for await (const run of client.listRuns({
  projectName: "my-project",
  filter: 'neq(error, null)',
  startTime: new Date("2024-01-01"),
  endTime: new Date("2024-01-31"),
  order: "desc",
})) {
  console.log(`Failed run: ${run.name}, error: ${run.error}`);
}

Share Run

/**
 * Share a run publicly
 * @param runId - Run ID to share
 * @returns Promise resolving to share URL
 */
shareRun(runId: string): Promise<string>;

/**
 * Unshare a previously shared run
 * @param runId - Run ID to unshare
 * @returns Promise resolving when unshare completes
 */
unshareRun(runId: string): Promise<void>;

/**
 * Read shared link for a run
 * @param runId - Run ID
 * @returns Promise resolving to shared link
 */
readRunSharedLink(runId: string): Promise<string>;

/**
 * List all publicly shared runs
 * @param params - Optional filtering parameters
 * @returns Async iterable of shared runs
 */
listSharedRuns(params?: { shareToken?: string; limit?: number }): AsyncIterable<Run>;

Usage Examples

// Share run
const shareUrl = await client.shareRun(runId);
console.log("Share:", shareUrl);

// Read shared link
const sharedLink = await client.readRunSharedLink(runId);
console.log("Shared link:", sharedLink);

// List all shared runs
for await (const run of client.listSharedRuns({ limit: 10 })) {
  console.log(`Shared run: ${run.name}`);
}

// Unshare
await client.unshareRun(runId);

Get Run URL

/**
 * Get URL for viewing run in LangSmith UI
 * @param params - Run identifier and context
 * @returns Promise resolving to run URL string
 */
getRunUrl(params: {
  runId?: string;
  run?: Run;
  projectOpts?: {
    projectName?: string;
    projectId?: string;
  };
}): Promise<string>;

Usage Examples

const url = await client.getRunUrl({
  runId: runId,
  projectOpts: {
    projectName: "my-project"
  }
});

console.log("View run:", url);

Group Runs

/**
 * List runs grouped by a specific field (e.g., conversation_id)
 * @param params - Group parameters
 * @returns Async iterable of grouped run summaries
 */
listGroupRuns(params: GroupRunsParams): AsyncIterable<Thread>;

interface GroupRunsParams {
  /** Project ID filter */
  projectId?: string;
  /** Project name filter */
  projectName?: string;
  /** Field to group by */
  groupBy: string;
  /** Filter query */
  filter?: string;
  /** Start time filter */
  startTime?: Date;
  /** End time filter */
  endTime?: Date;
  /** Limit results */
  limit?: number;
  /** Offset for pagination */
  offset?: number;
}

interface Thread {
  /** Group key */
  group_key: string;
  /** Number of runs in group */
  count: number;
  /** Total tokens */
  total_tokens: number;
  /** Total cost */
  total_cost: number | null;
  /** Min start time */
  min_start_time: string;
  /** Max start time */
  max_start_time: string;
  /** P50 latency */
  latency_p50: number;
  /** P99 latency */
  latency_p99: number;
  /** Feedback stats */
  feedback_stats: any | null;
  /** Filter string */
  filter: string;
  /** First inputs */
  first_inputs: string;
  /** Last outputs */
  last_outputs: string;
  /** Last error */
  last_error: string | null;
}

Usage Examples

// Group runs by conversation_id
for await (const thread of client.listGroupRuns({
  projectName: "chatbot-app",
  groupBy: "metadata.conversation_id",
  startTime: new Date("2024-01-01")
})) {
  console.log(`Conversation ${thread.group_key}:`);
  console.log(`  - ${thread.count} runs`);
  console.log(`  - ${thread.total_tokens} tokens`);
  console.log(`  - Cost: $${thread.total_cost}`);
  console.log(`  - P50 latency: ${thread.latency_p50}ms`);
}

// Group by user_id with filter
for await (const thread of client.listGroupRuns({
  projectName: "production-app",
  groupBy: "extra.user_id",
  filter: 'eq(run_type, "chain")',
  limit: 100
})) {
  console.log(`User ${thread.group_key}: ${thread.count} chains`);
}

Get Run Stats

/**
 * Get statistics for runs in a project
 * @param params - Stats parameters
 * @returns Promise resolving to run statistics
 */
getRunStats(params: {
  projectId?: string;
  projectName?: string;
  filter?: string;
  startTime?: Date;
  endTime?: Date;
}): Promise<Record<string, any>>;

Usage Examples

const stats = await client.getRunStats({
  projectName: "production-app",
  startTime: new Date("2024-01-01"),
  endTime: new Date("2024-01-31")
});

console.log("Total runs:", stats.run_count);
console.log("Error rate:", stats.error_rate);
console.log("Avg latency:", stats.avg_latency);
console.log("Total tokens:", stats.total_tokens);
console.log("Total cost:", stats.total_cost);

// With filters
const errorStats = await client.getRunStats({
  projectName: "production-app",
  filter: 'neq(error, null)',
  startTime: new Date("2024-01-01")
});

console.log("Failed runs:", errorStats.run_count);

Batch Operations

/**
 * Batch ingest multiple runs
 * @param runs - Array of run creation/update operations
 * @returns Promise resolving when batch ingestion completes
 */
batchIngestRuns(runs: {
  post?: RunCreate[];
  patch?: RunUpdate[];
}): Promise<void>;

/**
 * Multipart ingest for large run batches
 * @param runs - Array of run creation/update operations
 * @returns Promise resolving when multipart ingestion completes
 */
multipartIngestRuns(runs: {
  post?: RunCreate[];
  patch?: RunUpdate[];
}): Promise<void>;

Usage Examples

// Batch create runs
await client.batchIngestRuns({
  post: [
    {
      name: "run1",
      run_type: "chain",
      inputs: { query: "test1" }
    },
    {
      name: "run2",
      run_type: "chain",
      inputs: { query: "test2" }
    }
  ]
});

// Multipart ingest for large batches
await client.multipartIngestRuns({
  post: [
    {
      name: "large-run-1",
      run_type: "chain",
      inputs: { largeData: "..." },
      attachments: {
        "image.png": {
          mime_type: "image/png",
          data: "base64-encoded-data..."
        }
      }
    }
  ],
  patch: [
    {
      runId: "existing-run-id",
      outputs: { result: "updated" }
    }
  ]
});

Comparative Experiments

Methods for creating and managing comparative experiments for A/B testing different model configurations.

/**
 * Create a comparative experiment
 * @param params - Experiment parameters
 * @returns Promise resolving to comparative experiment object
 */
createComparativeExperiment(params: {
  name: string;
  experimentIds: string[];
  referenceDatasetId?: string;
  description?: string;
  metadata?: Record<string, any>;
}): Promise<ComparativeExperiment>;

interface ComparativeExperiment {
  /** Experiment ID */
  id: string;
  /** Experiment name */
  name: string;
  /** Description */
  description?: string;
  /** IDs of experiments being compared */
  experiment_ids: string[];
  /** Reference dataset ID */
  reference_dataset_id?: string;
  /** Creation timestamp */
  created_at: string;
  /** Modification timestamp */
  modified_at?: string;
  /** Metadata */
  metadata?: Record<string, any>;
}

Usage Examples

import { Client, evaluate } from "langsmith";

const client = new Client();

// Run two experiments with different configurations
const experiment1 = await evaluate(model1, {
  data: "test-dataset",
  experimentPrefix: "gpt4-config",
});

const experiment2 = await evaluate(model2, {
  data: "test-dataset",
  experimentPrefix: "claude-config",
});

// Create comparative experiment
const comparison = await client.createComparativeExperiment({
  name: "Model Comparison: GPT-4 vs Claude",
  experimentIds: [experiment1.experimentId, experiment2.experimentId],
  description: "Comparing GPT-4 and Claude on customer support dataset",
  metadata: {
    use_case: "customer_support",
    date: new Date().toISOString(),
  },
});

console.log("Comparison created:", comparison.id);

Best Practices

Always Flush Before Shutdown

const client = new Client();

// ... create runs ...

// Before shutdown
await client.awaitPendingTraceBatches();

Use Appropriate Run Types

// LLM calls
{ run_type: "llm" }

// Sequences of operations
{ run_type: "chain" }

// Individual tools
{ run_type: "tool" }

// Document retrieval
{ run_type: "retriever" }

Add Meaningful Metadata

await client.createRun({
  name: "customer-query",
  run_type: "chain",
  extra: {
    userId: "user-123",
    sessionId: "session-456",
    version: "2.1.0"
  },
  tags: ["production", "customer-facing"]
});

Common Mistakes

❌ Not Setting run_type

Omitting run_type makes runs harder to filter and analyze.

// BAD
await client.createRun({
  name: "MyOperation",
  inputs: {...}
  // Missing run_type
});

// GOOD
await client.createRun({
  name: "MyOperation",
  run_type: "chain", // Explicit type
  inputs: {...}
});

❌ Using Deprecated session_id

Use project_name instead of deprecated session_id.

// BAD: Deprecated fields
await client.createRun({
  name: "MyRun",
  run_type: "chain",
  session_id: "session-123", // Deprecated
  session_name: "MySession" // Deprecated
});

// GOOD: Use project_name
await client.createRun({
  name: "MyRun",
  run_type: "chain",
  project_name: "my-project"
});

❌ Creating Runs with Future Timestamps

Future timestamps break analytics and time-based queries.

// BAD
await client.createRun({
  name: "MyRun",
  run_type: "chain",
  start_time: Date.now() + 1000000 // Future timestamp
});

// GOOD
await client.createRun({
  name: "MyRun",
  run_type: "chain",
  start_time: Date.now()
  // Or omit - auto-set to current time
});

❌ Loading All Runs into Memory

Avoid memory issues with large result sets.

// BAD: May cause OOM
const allRuns = [];
for await (const run of client.listRuns({ projectName: "big-project" })) {
  allRuns.push(run);
}

// GOOD: Stream processing
for await (const run of client.listRuns({
  projectName: "big-project",
  limit: 1000 // Safety limit
})) {
  await processRun(run);
  // run is garbage collected after processing
}

❌ Ignoring Filter Syntax

Using incorrect filter syntax causes queries to fail.

// BAD: Invalid filter syntax
filter: 'status == "success"' // Double equals not supported

// GOOD: Use correct comparators
filter: 'eq(status, "success")'
filter: 'and(eq(error, null), gte(latency, 1000))'
filter: 'has(tags, "production")'

See Decision Trees and Anti-Patterns for more guidance.

Related Documentation