tessl install tessl/npm-langsmith@0.4.3TypeScript client SDK for the LangSmith LLM tracing, evaluation, and monitoring platform.
Essential utility functions and helpers for working with LangSmith.
langsmith (npm)langsmithimport {
uuid7,
uuid7FromTime,
getDefaultProjectName,
overrideFetchImplementation,
Cache,
__version__,
} from "langsmith";For CommonJS:
const {
uuid7,
uuid7FromTime,
getDefaultProjectName,
overrideFetchImplementation,
Cache,
__version__,
} = require("langsmith");Generate a time-ordered UUID v7 identifier.
/**
* Generate a UUID v7 (time-ordered)
* @returns UUID v7 string
*/
function uuid7(): string;Usage Examples:
import { uuid7 } from "langsmith";
// Generate run IDs
const runId = uuid7();
console.log(runId); // e.g., "01234567-89ab-7def-0123-456789abcdef"
// Use for custom run creation
import { Client } from "langsmith";
const client = new Client();
await client.createRun({
id: uuid7(),
name: "my-operation",
run_type: "chain",
inputs: { query: "test" },
});Generate a UUID v7 from a specific timestamp.
/**
* Generate a UUID v7 from a specific timestamp
* @param timestamp - Unix timestamp in milliseconds (number) or ISO 8601 string
* @returns UUID v7 string
*/
function uuid7FromTime(timestamp: number | string): string;Usage Examples:
import { uuid7FromTime } from "langsmith";
// Generate UUID for specific time (number timestamp)
const pastTime = Date.now() - 86400000; // 24 hours ago
const uuid = uuid7FromTime(pastTime);
// Using ISO 8601 string
const uuidFromString = uuid7FromTime("2024-01-01T00:00:00Z");
// Useful for backfilling data
const historicalRunId = uuid7FromTime(1704067200000); // Jan 1, 2024Get the default project name from environment or generate one.
/**
* Get default project name
* @returns Project name from LANGCHAIN_PROJECT env var or auto-generated
*/
function getDefaultProjectName(): string;Usage Examples:
import { getDefaultProjectName } from "langsmith";
// Get current default project
const projectName = getDefaultProjectName();
console.log("Default project:", projectName);
// Use in traceable functions
import { traceable } from "langsmith/traceable";
const myFunction = traceable(
async (input: string) => processInput(input),
{
name: "my-function",
project_name: getDefaultProjectName(), // Use default project
}
);Override the global fetch implementation used by LangSmith.
/**
* Override fetch implementation
* @param fetchImpl - Custom fetch function
*/
function overrideFetchImplementation(
fetchImpl: typeof fetch
): void;Usage Examples:
import { overrideFetchImplementation } from "langsmith";
// Use custom fetch with proxy
import fetchWithProxy from "node-fetch-with-proxy";
overrideFetchImplementation(fetchWithProxy as typeof fetch);
// Or add custom headers to all requests
const customFetch: typeof fetch = async (input, init) => {
const headers = new Headers(init?.headers);
headers.set("X-Custom-Header", "value");
return fetch(input, {
...init,
headers,
});
};
overrideFetchImplementation(customFetch);
// Or add request logging
const loggingFetch: typeof fetch = async (input, init) => {
console.log("Fetch request:", input);
const response = await fetch(input, init);
console.log("Fetch response:", response.status);
return response;
};
overrideFetchImplementation(loggingFetch);Prompt caching system for LangSmith. Note: This cache is specifically designed for caching LangSmith prompts (PromptCommit objects), not for general key-value caching.
/**
* Prompt cache for reducing API calls when fetching prompts
* Specialized for caching PromptCommit objects
*/
class Cache {
/**
* Create a cache instance
* @param config - Cache configuration
*/
constructor(config?: CacheConfig);
/**
* Get value from cache
* @param key - Cache key
* @returns Cached PromptCommit or undefined
*/
get(key: string): Promise<PromptCommit | undefined>;
/**
* Set value in cache
* @param key - Cache key
* @param value - PromptCommit object to cache
*/
set(key: string, value: PromptCommit): void;
/**
* Invalidate/remove a cached entry
* @param key - Cache key to invalidate
*/
invalidate(key: string): void;
/**
* Clear entire cache
*/
clear(): void;
/**
* Cache metrics property (readonly)
*/
get metrics(): Readonly<CacheMetrics>;
}
interface CacheConfig {
/**
* Maximum number of cached items
* @default 100
*/
maxSize?: number;
/**
* Time-to-live in seconds for cached items
* Set to null to disable TTL
* @default 3600 (1 hour)
*/
ttlSeconds?: number | null;
/**
* Interval in seconds for refreshing cached items
* @default 60
*/
refreshIntervalSeconds?: number;
/**
* Custom fetch function for retrieving prompts
* @param key - Cache key
* @returns Promise resolving to PromptCommit
*/
fetchFunc?: (key: string) => Promise<PromptCommit>;
}
interface CacheMetrics {
/** Number of cache hits */
hits: number;
/** Number of cache misses */
misses: number;
/** Cache hit rate (0-1) */
hitRate: number;
/** Current cache size (number of items) */
size: number;
}
interface PromptCommit {
/** Prompt owner */
owner: string;
/** Prompt repository name */
repo: string;
/** Commit hash */
commit_hash: string;
/** Prompt manifest */
manifest: object;
/** Additional prompt data */
[key: string]: any;
}Usage Examples:
import { Cache, Client } from "langsmith";
// Create cache instance
const cache = new Cache({
maxSize: 100, // Max 100 cached prompts
ttlSeconds: 3600, // 1 hour TTL
refreshIntervalSeconds: 60, // Refresh every 60 seconds
});
// Use with client for automatic prompt caching
const client = new Client({
cache: cache,
});
// Or enable default cache
const clientWithCache = new Client({
cache: true, // Uses default cache settings
});
// Manual cache operations with prompts
const promptCommit: PromptCommit = await client.pullPrompt("my-prompt");
cache.set("my-prompt:latest", promptCommit);
// Get cached prompt
const cached = await cache.get("my-prompt:latest");
if (cached) {
console.log("Using cached prompt");
}
// Invalidate a cached prompt when you know it's stale
cache.invalidate("my-prompt:latest");
// Get cache statistics
const metrics = cache.metrics; // Contains hits, misses, refreshes, refreshErrors
console.log(`Hit rate: ${(cache.hitRate * 100).toFixed(2)}%`); // Direct property on cache
console.log(`Cached items: ${cache.size}`); // Direct property on cache
console.log(`Hits: ${metrics.hits}, Misses: ${metrics.misses}`);
// Clear all cached prompts
cache.clear();
// Cache with custom fetch function
const customCache = new Cache({
maxSize: 50,
ttlSeconds: 7200, // 2 hours
fetchFunc: async (key: string) => {
console.log(`Fetching prompt: ${key}`);
return await client.pullPrompt(key);
},
});Package version constant.
/**
* Package version constant
*/
const __version__: string;Usage Examples:
import { __version__ } from "langsmith";
console.log("LangSmith SDK version:", __version__);
// Include in logs or metadata
import { traceable } from "langsmith/traceable";
const myFunction = traceable(
async (input: string) => processInput(input),
{
name: "my-function",
metadata: {
sdkVersion: __version__,
nodeVersion: process.version,
},
}
);
// Version checks
if (__version__.startsWith("0.4")) {
console.log("Using LangSmith SDK v0.4.x");
}Return the current run tree from within a traceable-wrapped function. Available from langsmith/traceable or langsmith/singletons/traceable.
/**
* Get the current run tree from within a traceable function
* Throws an error if called outside of a traceable function
* @returns The current run tree
*/
function getCurrentRunTree(): RunTree;
/**
* Get the current run tree, with explicit permitAbsentRunTree flag
* @param permitAbsentRunTree - If false, throws error when no run tree exists
* @returns The current run tree
*/
function getCurrentRunTree(permitAbsentRunTree: false): RunTree;
/**
* Get the current run tree, permitting undefined result
* @param permitAbsentRunTree - If true, returns undefined when no run tree exists
* @returns The current run tree or undefined
*/
function getCurrentRunTree(permitAbsentRunTree: boolean): RunTree | undefined;Import Options:
// Primary import path
import { getCurrentRunTree } from "langsmith/traceable";
// Alternative import path (singletons module)
import { getCurrentRunTree } from "langsmith/singletons/traceable";Usage Examples:
import { traceable, getCurrentRunTree } from "langsmith/traceable";
const processData = traceable(
async (data: string[]) => {
// Access current run tree to add metadata
const runTree = getCurrentRunTree();
console.log("Current run ID:", runTree.id);
console.log("Current run name:", runTree.name);
// Add custom metadata
runTree.extra = { ...runTree.extra, customField: "value" };
return data.map(item => item.toUpperCase());
},
{ name: "process-data", run_type: "chain" }
);
// Using permitAbsentRunTree to avoid errors
function safeGetRunTree() {
const runTree = getCurrentRunTree(true); // Returns undefined if not in traceable context
if (runTree) {
console.log("Inside traceable function");
} else {
console.log("Outside traceable function");
}
}
// Error handling when run tree is required
try {
const runTree = getCurrentRunTree(); // Throws if not in traceable context
console.log("Run tree:", runTree);
} catch (error) {
console.error("Not in a traceable function context");
}Execute a function with a specific run tree as context. Available from langsmith/traceable or langsmith/singletons/traceable.
/**
* Execute function with run tree context
* @param runTree - Run tree to set as context
* @param fn - Function to execute
* @returns Promise resolving to the result of function execution
*/
function withRunTree<Fn extends (...args: any[]) => any>(
runTree: RunTree,
fn: Fn
): Promise<Awaited<ReturnType<Fn>>>;Import Options:
// Primary import path
import { withRunTree } from "langsmith/traceable";
// Alternative import path (singletons module)
import { withRunTree } from "langsmith/singletons/traceable";Usage Examples:
import { withRunTree } from "langsmith/traceable";
import { RunTree } from "langsmith";
// Create a run tree
const runTree = new RunTree({
name: "custom-run",
run_type: "chain",
});
// Execute with context
const result = await withRunTree(runTree, async () => {
// getCurrentRunTree() will return this runTree
return await someOperation();
});
await runTree.end({ result });
await runTree.postRun();
// Useful for testing or custom run management
const testRun = new RunTree({
name: "test-run",
run_type: "chain",
metadata: { test: true },
});
await withRunTree(testRun, async () => {
// All operations here will be in the context of testRun
await myTracedFunction();
});Check if a function has been wrapped with the traceable() decorator.
/**
* Check if a function is traceable
* @param x - Value to check
* @returns True if the value is a traceable function
*/
function isTraceableFunction(x: unknown): x is TraceableFunction<any>;Import:
import { isTraceableFunction } from "langsmith/traceable";Usage Examples:
import { traceable, isTraceableFunction } from "langsmith/traceable";
const myFunction = traceable(
async (input: string) => input.toUpperCase(),
{ name: "uppercase" }
);
const regularFunction = (input: string) => input.toUpperCase();
console.log(isTraceableFunction(myFunction)); // true
console.log(isTraceableFunction(regularFunction)); // false
// Use for conditional tracing
function processWithOptionalTracing(fn: Function, input: any) {
if (isTraceableFunction(fn)) {
console.log("Function is already traced");
return fn(input);
} else {
console.log("Wrapping function with tracing");
return traceable(fn, { name: "dynamic-trace" })(input);
}
}Convert epoch and run ID to dotted order format for distributed tracing. Available from langsmith/run_trees.
/**
* Convert epoch timestamp and run ID to dotted order format
* Used for distributed tracing to maintain trace hierarchy across services
* @param epoch - Unix timestamp in milliseconds
* @param runId - Run UUID
* @param executionOrder - Execution order number (optional, defaults to 1)
* @returns Dotted order string representation
*/
function convertToDottedOrderFormat(
epoch: number,
runId: string,
executionOrder?: number
): string;Import:
import { convertToDottedOrderFormat } from "langsmith/run_trees";Usage Examples:
import { convertToDottedOrderFormat } from "langsmith/run_trees";
import { RunTree } from "langsmith";
import { uuid7 } from "langsmith";
// Create dotted order for distributed tracing
const runId = uuid7();
const dottedOrder = convertToDottedOrderFormat(
Date.now(),
runId,
1 // Execution order
);
console.log(dottedOrder); // e.g., "20240101T120000.01234567-89ab-7def-0123-456789abcdef.00000001"
// Use with RunTree for distributed tracing
const runTree = RunTree.fromDottedOrder(dottedOrder);
// Propagate trace across services via headers
const headers = {
"langsmith-trace": runTree.toHeaders(),
};
// Send to downstream service
await fetch("https://api.example.com/process", {
method: "POST",
headers,
body: JSON.stringify({ data: "..." }),
});
// In downstream service, reconstruct run tree
import { RunTree } from "langsmith";
const incomingHeaders = request.headers;
const parentRunTree = RunTree.fromHeaders(incomingHeaders);
// Create child run tree
const childRun = await parentRunTree.createChild({
name: "downstream-operation",
run_type: "chain",
});Related:
// Always use uuid7() for run IDs (time-ordered)
import { uuid7 } from "langsmith";
const runId = uuid7(); // Better for distributed systems
// Don't use random UUIDs
import { randomUUID } from "crypto";
const badId = randomUUID(); // Loses time ordering// Use environment variable for default project
process.env.LANGCHAIN_PROJECT = "my-app-production";
// Or get programmatically
import { getDefaultProjectName } from "langsmith";
const projectName = getDefaultProjectName();// Enable caching for production (prompts only)
import { Client, Cache } from "langsmith";
const client = new Client({
cache: new Cache({
maxSize: 100, // Max 100 cached prompts
ttlSeconds: 7200, // 2 hours
}),
});
// Monitor cache performance
const cache = new Cache();
setInterval(() => {
const metrics = cache.metrics; // Property getter, not method
console.log(`Cache hit rate: ${(cache.hitRate * 100).toFixed(2)}%`); // Direct property
console.log(`Cached prompts: ${cache.size}`); // Direct property
console.log(`Hits: ${metrics.hits}, Misses: ${metrics.misses}`); // From metrics object
}, 60000); // Every minute// Use for proxy configuration
import { overrideFetchImplementation } from "langsmith";
import { ProxyAgent } from "undici";
const agent = new ProxyAgent("http://proxy.example.com:8080");
overrideFetchImplementation((url, init) => {
return fetch(url, {
...init,
dispatcher: agent,
});
});