tessl install tessl/npm-langsmith@0.4.3TypeScript client SDK for the LangSmith LLM tracing, evaluation, and monitoring platform.
Client setup, configuration options, and utility methods.
Create a new LangSmith client instance with optional configuration.
/**
* Create a new LangSmith client instance
* @param config - Optional client configuration
*/
class Client {
constructor(config?: ClientConfig);
}
interface ClientConfig {
/** API URL for LangSmith (default: LANGCHAIN_ENDPOINT env var or https://api.smith.langchain.com) */
apiUrl?: string;
/** API key for authentication (default: LANGCHAIN_API_KEY env var) */
apiKey?: string;
/** Default timeout for requests in milliseconds (default: 120000) */
timeout_ms?: number;
/** Web URL for LangSmith UI (default: derived from apiUrl or https://smith.langchain.com) */
webUrl?: string;
/** Custom fetch implementation for custom HTTP handling (default: global fetch) */
fetchImplementation?: typeof fetch;
/** Enable auto-batching of trace uploads for better performance (default: true) */
autoBatchTracing?: boolean;
/** Batch size limit in bytes for trace batching (default: 20971520 / 20MB) */
batchSizeBytesLimit?: number;
/** Maximum number of operations to batch in a single request (default: undefined / no limit) */
batchSizeLimit?: number;
/** Maximum total memory in bytes for batch queues (default: 1073741824 / 1GB) */
maxIngestMemoryBytes?: number;
/** Number of concurrent batch uploads (default: 5) */
traceBatchConcurrency?: number;
/** Block on root run finalization, ensuring trace upload before continuing (default: false) */
blockOnRootRunFinalization?: boolean;
/** Hide inputs from traces - boolean for all, or function to transform/filter specific inputs (default: false) */
hideInputs?: boolean | ((inputs: KVMap) => KVMap | Promise<KVMap>);
/** Hide outputs from traces - boolean for all, or function to transform/filter specific outputs (default: false) */
hideOutputs?: boolean | ((outputs: KVMap) => KVMap | Promise<KVMap>);
/** Custom anonymizer function to transform traced data before sending (default: undefined) */
anonymizer?: (values: KVMap) => KVMap | Promise<KVMap>;
/** Whether to omit runtime information from traced runs like SDK version and platform (default: false) */
omitTracedRuntimeInfo?: boolean;
/** Workspace ID - required for org-scoped API keys (default: undefined) */
workspaceId?: string;
/** Custom fetch options passed to all HTTP requests (default: undefined) */
fetchOptions?: RequestInit;
/** Require manual .flush() calls before sending traces, useful for rate limit management (default: false) */
manualFlushMode?: boolean;
/** Sampling rate for tracing (0-1, where 1.0 = 100% of traces sent) (default: 1.0) */
tracingSamplingRate?: number;
/** Enable debug mode - logs all HTTP requests to console (default: false) */
debug?: boolean;
/** Advanced async request handling options - controls concurrency and queueing for API calls (default: undefined) */
callerOptions?: {
/** Maximum number of concurrent requests (default: 100) */
maxConcurrency?: number;
/** Maximum size of the request queue (default: Infinity) */
maxRetries?: number;
};
/** Caching configuration - true for defaults, Cache instance for custom, false/undefined to disable (default: false) */
cache?: Cache | boolean;
}import { Client } from "langsmith";
// Use environment variables
const client = new Client();
// Explicit configuration
const client = new Client({
apiUrl: "https://api.smith.langchain.com",
apiKey: "your-api-key",
timeout_ms: 10000
});
// Production configuration
const client = new Client({
apiKey: process.env.LANGSMITH_API_KEY,
autoBatchTracing: true,
batchSizeBytesLimit: 20_000_000,
tracingSamplingRate: 0.1, // 10% sampling
hideInputs: (inputs) => redactPII(inputs),
hideOutputs: false
});Get default configuration from environment variables.
/**
* Get default client configuration from environment
* @returns Object with default apiUrl, apiKey, webUrl, and privacy settings
*/
static getDefaultClientConfig(): {
apiUrl: string;
apiKey?: string;
webUrl?: string;
hideInputs?: boolean;
hideOutputs?: boolean;
};import { Client } from "langsmith";
const defaultConfig = Client.getDefaultClientConfig();
console.log("API URL:", defaultConfig.apiUrl);
console.log("API Key configured:", !!defaultConfig.apiKey);Get the web UI host URL.
/**
* Get the host URL for the LangSmith web UI
* @returns The web UI host URL
*/
getHostUrl(): string;const client = new Client();
const webUrl = client.getHostUrl();
console.log("Web UI:", webUrl);
const projectUrl = `${client.getHostUrl()}/projects/my-project`;Wait for all pending trace batches to flush.
/**
* Wait for all pending trace batches to flush
* @returns Promise resolving when all batches are flushed
*/
awaitPendingTraceBatches(): Promise<void>;Manually flush pending batches (when manualFlushMode is enabled).
/**
* Manually flush pending trace batches
* @returns Promise resolving when flush completes
*/
flush(): Promise<void>;import { Client } from "langsmith";
const client = new Client({ autoBatchTracing: true });
// ... create many runs ...
// Before shutting down, ensure all traces uploaded
await client.awaitPendingTraceBatches();
// For manual flush mode
const manualClient = new Client({ manualFlushMode: true });
// ... create runs ...
await manualClient.flush(); // Manually trigger upload/**
* Access the prompt cache instance
* @returns The Cache instance if configured
*/
get cache(): Cache | undefined;/**
* Cleanup resources held by the client
* Stops background cache refresh timers
* Call when done using the client
*/
cleanup(): void;import { Client } from "langsmith";
const client = new Client();
// Access cache
const cache = client.cache;
if (cache) {
console.log("Cache available");
}
// Cleanup when done
client.cleanup();
// Shutdown handler
process.on('SIGTERM', async () => {
await client.awaitPendingTraceBatches();
client.cleanup();
process.exit(0);
});import { Client } from "langsmith";
// Hide all inputs and outputs
const client = new Client({
hideInputs: true,
hideOutputs: true
});
// Selective hiding with functions
const client = new Client({
hideInputs: (inputs) => {
const { apiKey, password, ...safe } = inputs;
return safe;
},
hideOutputs: (outputs) => {
return {
...outputs,
email: "[REDACTED]"
};
}
});import { Client } from "langsmith";
import { createAnonymizer } from "langsmith/anonymizer";
const anonymizer = createAnonymizer([
{ pattern: /\b[\w\.-]+@[\w\.-]+\.\w+\b/g, replace: "[EMAIL]" },
{ pattern: /\bsk-[a-zA-Z0-9]{32,}\b/g, replace: "[API_KEY]" }
]);
const client = new Client({
anonymizer: anonymizer
});const client = new Client({
autoBatchTracing: true,
batchSizeBytesLimit: 20_000_000, // 20 MB
traceBatchConcurrency: 5 // 5 concurrent uploads
});const client = new Client({
tracingSamplingRate: 0.1 // Trace 10% of requests
});Implement proper error handling for API calls.
import { Client } from "langsmith";
const client = new Client();
// Basic error handling
try {
const run = await client.readRun(runId);
console.log(run);
} catch (error) {
if (error.status === 404) {
console.error("Run not found");
} else if (error.status === 401) {
console.error("Authentication failed");
} else if (error.status === 429) {
console.error("Rate limit exceeded");
} else {
console.error("API error:", error.message);
}
}async function readRunWithRetry(
client: Client,
runId: string,
maxRetries = 3
): Promise<Run> {
for (let i = 0; i < maxRetries; i++) {
try {
return await client.readRun(runId);
} catch (error) {
// Don't retry on 404 or 401
if (error.status === 404 || error.status === 401) {
throw error;
}
// Retry on rate limits and transient errors
if (i < maxRetries - 1 && (error.status === 429 || error.status >= 500)) {
const delay = Math.pow(2, i) * 1000; // Exponential backoff
console.log(`Retrying after ${delay}ms...`);
await new Promise(resolve => setTimeout(resolve, delay));
continue;
}
throw error;
}
}
throw new Error("Max retries exceeded");
}async function safeListRuns(client: Client, projectName: string) {
try {
for await (const run of client.listRuns({ projectName })) {
try {
// Process each run safely
await processRun(run);
} catch (error) {
// Handle per-run errors without stopping iteration
console.error(`Error processing run ${run.id}:`, error.message);
}
}
} catch (error) {
// Handle iteration-level errors
if (error.status === 404) {
console.error("Project not found");
} else {
console.error("Failed to list runs:", error.message);
}
}
}async function traceWithFallback(operation: () => Promise<any>) {
const client = new Client();
try {
// Try to trace
await client.createRun({
name: "operation",
run_type: "chain",
inputs: {},
start_time: Date.now(),
});
const result = await operation();
return result;
} catch (error) {
// If tracing fails, log but continue operation
console.warn("Tracing unavailable, continuing without tracing:", error.message);
return await operation();
}
}async function getAllRuns(
client: Client,
projectName: string,
maxRuns = 10000
): Promise<Run[]> {
const runs: Run[] = [];
try {
for await (const run of client.listRuns({ projectName, limit: maxRuns })) {
runs.push(run);
// Safety limit to prevent memory issues
if (runs.length >= maxRuns) {
console.warn(`Reached maximum run limit of ${maxRuns}`);
break;
}
}
} catch (error) {
console.error("Error fetching runs:", error.message);
// Return partial results
}
return runs;
}