Observability and analytics platform for LLM applications with hierarchical tracing, prompt management, dataset operations, and OpenAI integration
Comprehensive tracing capabilities for observability in LLM applications. Langfuse provides hierarchical tracing with traces containing nested spans, generations (LLM calls), and events. All client classes are chainable for fluent API composition.
The primary method for creating a new trace.
/**
* Creates a new trace
* @param body - Optional trace configuration
* @returns Trace client for chaining operations
*/
trace(body?: CreateLangfuseTraceBody): LangfuseTraceClient;
interface CreateLangfuseTraceBody {
/** Custom trace ID */
id?: string;
/** Trace name */
name?: string;
/** Timestamp (defaults to now) */
timestamp?: Date | string;
/** Input data */
input?: any;
/** Output data */
output?: any;
/** Session ID for grouping related traces */
sessionId?: string;
/** User ID */
userId?: string;
/** Custom metadata */
metadata?: any;
/** Tags for filtering */
tags?: string[];
/** Release version */
release?: string;
/** Version identifier */
version?: string;
/** Make trace publicly accessible */
public?: boolean;
/** Environment name */
environment?: string;
}Usage Example:
import { Langfuse } from 'langfuse';
const langfuse = new Langfuse();
const trace = langfuse.trace({
name: 'chat-pipeline',
userId: 'user-123',
sessionId: 'session-456',
metadata: { platform: 'web' },
tags: ['production', 'chat'],
input: { message: 'Hello!' }
});Client for interacting with a trace. Provides methods for updating the trace and creating child observations.
class LangfuseTraceClient {
/** The parent Langfuse client */
client: LangfuseCore;
/** The trace ID */
id: string;
/** The trace ID (alias for id) */
traceId: string;
/** Always null for trace clients */
observationId: null;
/**
* Updates the trace with new data
* @param body - Trace update body (omit id)
* @returns This trace client for chaining
*/
update(body: Omit<CreateLangfuseTraceBody, "id">): this;
/**
* Creates a child span
* @param body - Span configuration (traceId and parentObservationId are auto-set)
* @returns Span client for chaining
*/
span(body: Omit<CreateLangfuseSpanBody, "traceId" | "parentObservationId">): LangfuseSpanClient;
/**
* Creates a child generation (LLM call)
* @param body - Generation configuration
* @returns Generation client for chaining
*/
generation(
body: Omit<CreateLangfuseGenerationBody, "traceId" | "parentObservationId" | "promptName" | "promptVersion"> & PromptInput
): LangfuseGenerationClient;
/**
* Creates a child event
* @param body - Event configuration
* @returns Event client for chaining
*/
event(body: Omit<CreateLangfuseEventBody, "traceId" | "parentObservationId">): LangfuseEventClient;
/**
* Adds a score to the trace
* @param body - Score configuration (traceId is auto-set)
* @returns This trace client for chaining
*/
score(body: Omit<CreateLangfuseScoreBody, "traceId" | "observationId">): this;
/**
* Returns the URL to view the trace in Langfuse UI
* @returns Trace URL
*/
getTraceUrl(): string;
}Usage Example:
const trace = langfuse.trace({ name: 'my-app' });
// Update trace with output
trace.update({
output: { success: true },
metadata: { duration: 1234 }
});
// Add a score
trace.score({
name: 'user-feedback',
value: 5,
comment: 'Excellent response'
});
// Get trace URL
console.log(trace.getTraceUrl());Spans represent units of work within a trace (e.g., a function call, API request, or processing step).
/**
* Creates a span observation
* @param body - Span configuration with traceId
* @returns Span client for chaining operations
*/
span(body: CreateLangfuseSpanBody): LangfuseSpanClient;
interface CreateLangfuseSpanBody {
/** Custom span ID */
id?: string;
/** Parent trace ID (required) */
traceId?: string;
/** Parent observation ID for nesting */
parentObservationId?: string;
/** Span name */
name?: string;
/** Start timestamp (defaults to now) */
startTime?: Date | string;
/** End timestamp */
endTime?: Date | string;
/** Custom metadata */
metadata?: any;
/** Input data */
input?: any;
/** Output data */
output?: any;
/** Log level: DEBUG, DEFAULT, WARNING, ERROR */
level?: ApiObservationLevel;
/** Status message */
statusMessage?: string;
/** Version identifier */
version?: string;
}Usage Example:
// Create span from trace
const span = trace.span({
name: 'data-processing',
input: { records: 100 },
metadata: { source: 'database' }
});
// Or create standalone span
const span2 = langfuse.span({
traceId: 'trace-123',
name: 'api-call'
});Client for interacting with a span observation. Supports nesting and updating.
class LangfuseSpanClient {
/** The parent Langfuse client */
client: LangfuseCore;
/** The span ID */
id: string;
/** The parent trace ID */
traceId: string;
/** The span ID (alias for id) */
observationId: string;
/**
* Updates the span with new data
* @param body - Span update body
* @returns This span client for chaining
*/
update(body: Omit<UpdateLangfuseSpanBody, "id" | "traceId">): this;
/**
* Ends the span by setting endTime to now
* @param body - Optional data to update when ending
* @returns This span client for chaining
*/
end(body?: Omit<UpdateLangfuseSpanBody, "id" | "endTime" | "traceId">): this;
/**
* Creates a nested span
* @param body - Span configuration
* @returns Nested span client
*/
span(body: Omit<CreateLangfuseSpanBody, "traceId" | "parentObservationId">): LangfuseSpanClient;
/**
* Creates a nested generation
* @param body - Generation configuration
* @returns Nested generation client
*/
generation(
body: Omit<CreateLangfuseGenerationBody, "traceId" | "parentObservationId" | "promptName" | "promptVersion"> & PromptInput
): LangfuseGenerationClient;
/**
* Creates a nested event
* @param body - Event configuration
* @returns Nested event client
*/
event(body: Omit<CreateLangfuseEventBody, "traceId" | "parentObservationId">): LangfuseEventClient;
/**
* Adds a score to the span
* @param body - Score configuration
* @returns This span client for chaining
*/
score(body: Omit<CreateLangfuseScoreBody, "traceId" | "observationId">): this;
/**
* Returns the URL to view the parent trace in Langfuse UI
* @returns Trace URL
*/
getTraceUrl(): string;
}
interface UpdateLangfuseSpanBody {
id?: string;
traceId?: string;
parentObservationId?: string;
name?: string;
startTime?: Date | string;
endTime?: Date | string;
metadata?: any;
input?: any;
output?: any;
level?: ApiObservationLevel;
statusMessage?: string;
version?: string;
}Usage Example:
const span = trace.span({ name: 'process-data' });
// Update span with results
span.update({
output: { processed: 100 },
metadata: { duration: 500 }
});
// End the span
span.end({ statusMessage: 'completed' });
// Create nested span
const nestedSpan = span.span({ name: 'sub-process' });Generations represent LLM calls with token usage tracking and model information.
/**
* Creates a generation observation (LLM call)
* @param body - Generation configuration
* @returns Generation client for chaining operations
*/
generation(body: CreateLangfuseGenerationBody): LangfuseGenerationClient;
interface CreateLangfuseGenerationBody extends CreateLangfuseSpanBody {
/** Model name (e.g., gpt-4, claude-3-opus) */
model?: string;
/** Model parameters (temperature, max_tokens, etc.) */
modelParameters?: any;
/** Token usage information */
usage?: Usage;
/** Detailed usage information with provider-specific schemas (OpenAI, etc.) */
usageDetails?: UsageDetails;
/** When completion started (for streaming) */
completionStartTime?: Date | string;
}
interface Usage {
/** Input tokens */
input?: number;
/** Output tokens */
output?: number;
/** Total tokens */
total?: number;
/** Usage unit (TOKENS, CHARACTERS, etc.) */
unit?: ApiModelUsageUnit;
/** Input cost in USD */
inputCost?: number;
/** Output cost in USD */
outputCost?: number;
/** Total cost in USD */
totalCost?: number;
}
type ApiModelUsageUnit =
| "CHARACTERS"
| "TOKENS"
| "MILLISECONDS"
| "SECONDS"
| "IMAGES"
| "REQUESTS";
type UsageDetails =
| { [key: string]: number }
| OpenAICompletionUsageSchema
| OpenAIResponseUsageSchema;
interface OpenAICompletionUsageSchema {
completion_tokens?: number;
prompt_tokens?: number;
total_tokens?: number;
}
interface OpenAIResponseUsageSchema {
completion_tokens?: number;
prompt_tokens?: number;
total_tokens?: number;
completion_tokens_details?: {
reasoning_tokens?: number;
accepted_prediction_tokens?: number;
rejected_prediction_tokens?: number;
};
prompt_tokens_details?: {
cached_tokens?: number;
audio_tokens?: number;
};
}Usage Example:
const generation = trace.generation({
name: 'chat-completion',
model: 'gpt-4',
input: [
{ role: 'system', content: 'You are a helpful assistant' },
{ role: 'user', content: 'Hello!' }
],
output: {
role: 'assistant',
content: 'Hi! How can I help you?'
},
usage: {
input: 25,
output: 15,
total: 40
},
modelParameters: {
temperature: 0.7,
max_tokens: 500
}
});Client for interacting with a generation observation. Includes prompt linking support.
class LangfuseGenerationClient {
/** The parent Langfuse client */
client: LangfuseCore;
/** The generation ID */
id: string;
/** The parent trace ID */
traceId: string;
/** The generation ID (alias for id) */
observationId: string;
/**
* Updates the generation with new data
* @param body - Generation update body
* @returns This generation client for chaining
*/
update(
body: Omit<UpdateLangfuseGenerationBody, "id" | "traceId" | "promptName" | "promptVersion"> & PromptInput
): this;
/**
* Ends the generation by setting endTime to now
* @param body - Optional data to update when ending
* @returns This generation client for chaining
*/
end(
body?: Omit<UpdateLangfuseGenerationBody, "id" | "traceId" | "endTime" | "promptName" | "promptVersion"> & PromptInput
): this;
/**
* Creates a nested span
* @param body - Span configuration
* @returns Nested span client
*/
span(body: Omit<CreateLangfuseSpanBody, "traceId" | "parentObservationId">): LangfuseSpanClient;
/**
* Creates a nested generation
* @param body - Generation configuration
* @returns Nested generation client
*/
generation(
body: Omit<CreateLangfuseGenerationBody, "traceId" | "parentObservationId" | "promptName" | "promptVersion"> & PromptInput
): LangfuseGenerationClient;
/**
* Creates a nested event
* @param body - Event configuration
* @returns Nested event client
*/
event(body: Omit<CreateLangfuseEventBody, "traceId" | "parentObservationId">): LangfuseEventClient;
/**
* Adds a score to the generation
* @param body - Score configuration
* @returns This generation client for chaining
*/
score(body: Omit<CreateLangfuseScoreBody, "traceId" | "observationId">): this;
/**
* Returns the URL to view the parent trace in Langfuse UI
* @returns Trace URL
*/
getTraceUrl(): string;
}
interface UpdateLangfuseGenerationBody extends UpdateLangfuseSpanBody {
model?: string;
modelParameters?: any;
usage?: Usage;
completionStartTime?: Date | string;
}
interface PromptInput {
/** Link to a prompt client or record for automatic tracking */
prompt?: LangfusePromptClient | LangfusePromptRecord;
}Usage Example:
const generation = trace.generation({
name: 'gpt-call',
model: 'gpt-4'
});
// Stream response and update incrementally
generation.update({
output: partialResponse,
usage: { input: 25, output: 10 }
});
// Complete the generation
generation.end({
output: finalResponse,
usage: { input: 25, output: 50, total: 75 },
statusMessage: 'completed'
});
// Link a prompt
const promptClient = await langfuse.getPrompt('chat-template');
const gen = trace.generation({
name: 'templated-chat',
prompt: promptClient,
model: 'gpt-4'
});Events represent single-point observations (e.g., logs, errors, or notable occurrences).
/**
* Creates an event observation
* @param body - Event configuration
* @returns Event client for chaining operations
*/
event(body: CreateLangfuseEventBody): LangfuseEventClient;
interface CreateLangfuseEventBody {
/** Custom event ID */
id?: string;
/** Parent trace ID */
traceId?: string;
/** Parent observation ID for nesting */
parentObservationId?: string;
/** Event name */
name?: string;
/** Timestamp (defaults to now) */
startTime?: Date | string;
/** Custom metadata */
metadata?: any;
/** Input data */
input?: any;
/** Output data */
output?: any;
/** Log level: DEBUG, DEFAULT, WARNING, ERROR */
level?: ApiObservationLevel;
/** Status message */
statusMessage?: string;
/** Version identifier */
version?: string;
}
type ApiObservationLevel = "DEBUG" | "DEFAULT" | "WARNING" | "ERROR";Usage Example:
// Create event from trace
const event = trace.event({
name: 'user-input-received',
input: { query: 'What is AI?' },
level: 'DEFAULT'
});
// Create error event
trace.event({
name: 'api-error',
level: 'ERROR',
statusMessage: 'Rate limit exceeded',
metadata: { errorCode: 429 }
});Client for interacting with an event observation. Events can also have nested observations.
class LangfuseEventClient {
/** The parent Langfuse client */
client: LangfuseCore;
/** The event ID */
id: string;
/** The parent trace ID */
traceId: string;
/** The event ID (alias for id) */
observationId: string;
/**
* Creates a nested span
* @param body - Span configuration
* @returns Nested span client
*/
span(body: Omit<CreateLangfuseSpanBody, "traceId" | "parentObservationId">): LangfuseSpanClient;
/**
* Creates a nested generation
* @param body - Generation configuration
* @returns Nested generation client
*/
generation(
body: Omit<CreateLangfuseGenerationBody, "traceId" | "parentObservationId" | "promptName" | "promptVersion"> & PromptInput
): LangfuseGenerationClient;
/**
* Creates a nested event
* @param body - Event configuration
* @returns Nested event client
*/
event(body: Omit<CreateLangfuseEventBody, "traceId" | "parentObservationId">): LangfuseEventClient;
/**
* Adds a score to the event
* @param body - Score configuration
* @returns This event client for chaining
*/
score(body: Omit<CreateLangfuseScoreBody, "traceId" | "observationId">): this;
/**
* Returns the URL to view the parent trace in Langfuse UI
* @returns Trace URL
*/
getTraceUrl(): string;
}Scores evaluate traces or observations with numeric, boolean, or categorical values.
/**
* Creates a score for a trace or observation
* @param body - Score configuration
* @returns Langfuse instance for chaining
*/
score(body: CreateLangfuseScoreBody): this;
interface CreateLangfuseScoreBody {
/** Custom score ID */
id?: string;
/** Trace ID to score */
traceId: string;
/** Observation ID to score (if scoring an observation) */
observationId?: string;
/** Score name */
name: string;
/** Score value */
value: number | string | boolean;
/** Optional comment */
comment?: string;
/** Data type: NUMERIC, BOOLEAN, CATEGORICAL */
dataType?: ApiScoreDataType;
/** Score configuration ID */
configId?: string;
}
type ApiScoreDataType = "NUMERIC" | "BOOLEAN" | "CATEGORICAL";Usage Example:
// Score a trace
langfuse.score({
traceId: 'trace-123',
name: 'quality',
value: 0.95,
dataType: 'NUMERIC',
comment: 'High quality response'
});
// Score from trace client
trace.score({
name: 'user-satisfaction',
value: 5,
dataType: 'NUMERIC'
});
// Score an observation
generation.score({
name: 'accuracy',
value: true,
dataType: 'BOOLEAN'
});Retrieve existing traces and observations for analysis.
/**
* Fetches traces with optional filtering
* @param query - Filter and pagination options
* @returns Paginated traces response
*/
fetchTraces(query?: GetLangfuseTracesQuery): Promise<GetLangfuseTracesResponse>;
/**
* Fetches a specific trace by ID
* @param traceId - Trace identifier
* @returns Trace with full details
*/
fetchTrace(traceId: string): Promise<{ data: GetLangfuseTraceResponse }>;
/**
* Fetches observations with optional filtering
* @param query - Filter and pagination options
* @returns Paginated observations response
*/
fetchObservations(query?: GetLangfuseObservationsQuery): Promise<GetLangfuseObservationsResponse>;
/**
* Fetches a specific observation by ID
* @param observationId - Observation identifier
* @returns Observation details
*/
fetchObservation(observationId: string): Promise<{ data: GetLangfuseObservationResponse }>;
/**
* Fetches sessions with optional filtering
* @param query - Filter and pagination options
* @returns Paginated sessions response
*/
fetchSessions(query?: GetLangfuseSessionsQuery): Promise<GetLangfuseSessionsResponse>;
interface GetLangfuseTracesQuery {
/** Page number */
page?: number;
/** Page size */
limit?: number;
/** Filter by user ID */
userId?: string;
/** Filter by trace name */
name?: string;
/** Filter by session ID */
sessionId?: string;
/** From timestamp (ISO 8601) */
fromTimestamp?: string;
/** To timestamp (ISO 8601) */
toTimestamp?: string;
/** Sort field */
orderBy?: string;
/** Filter by tags */
tags?: string[];
/** Filter by version */
version?: string;
}
interface GetLangfuseTracesResponse {
/** Array of traces */
data: ApiTraceWithDetails[];
/** Pagination metadata */
meta: {
page: number;
limit: number;
totalItems: number;
totalPages: number;
};
}
interface GetLangfuseObservationsQuery {
page?: number;
limit?: number;
userId?: string;
name?: string;
traceId?: string;
parentObservationId?: string;
fromStartTime?: string;
toStartTime?: string;
type?: ApiObservationType;
}
type ApiObservationType = "SPAN" | "GENERATION" | "EVENT";
interface GetLangfuseObservationsResponse {
data: ApiObservation[];
meta: {
page: number;
limit: number;
totalItems: number;
totalPages: number;
};
}
interface GetLangfuseSessionsQuery {
page?: number;
limit?: number;
fromTimestamp?: string;
toTimestamp?: string;
}
interface GetLangfuseSessionsResponse {
data: ApiSession[];
meta: {
page: number;
limit: number;
totalItems: number;
totalPages: number;
};
}Usage Example:
// Fetch recent traces
const traces = await langfuse.fetchTraces({
page: 1,
limit: 50,
userId: 'user-123',
tags: ['production']
});
// Fetch specific trace
const trace = await langfuse.fetchTrace('trace-id-123');
// Fetch observations for a trace
const observations = await langfuse.fetchObservations({
traceId: 'trace-id-123',
type: 'GENERATION'
});import { Langfuse } from 'langfuse';
const langfuse = new Langfuse({
publicKey: 'pk-lf-...',
secretKey: 'sk-lf-...'
});
// Create a trace for a chat pipeline
const trace = langfuse.trace({
name: 'chat-pipeline',
userId: 'user-123',
sessionId: 'session-456',
tags: ['production']
});
// Add a span for retrieval
const retrievalSpan = trace.span({
name: 'document-retrieval',
input: { query: 'What is AI?' }
});
// Simulate retrieval
retrievalSpan.end({
output: { documents: [/* ... */] },
metadata: { count: 5 }
});
// Add a generation for LLM call
const generation = trace.generation({
name: 'answer-generation',
model: 'gpt-4',
input: [
{ role: 'system', content: 'You are a helpful assistant' },
{ role: 'user', content: 'What is AI?' }
],
modelParameters: {
temperature: 0.7,
max_tokens: 500
}
});
// Update with response
generation.end({
output: {
role: 'assistant',
content: 'AI is...'
},
usage: {
input: 150,
output: 200,
total: 350
}
});
// Add a score
trace.score({
name: 'user-feedback',
value: 5,
comment: 'Very helpful'
});
// Flush to send all events
await langfuse.flushAsync();
// Get trace URL
console.log(trace.getTraceUrl());Install with Tessl CLI
npx tessl i tessl/npm-langfuse