Langfuse instrumentation methods based on OpenTelemetry
Langfuse provides 10 specialized observation classes, each optimized for specific use cases in AI and software workflows. Each observation type shares common methods while providing semantic clarity for different operation types.
All observation types extend a common base class and share core functionality:
id, traceId, otelSpan, typeupdate(), end(), updateTrace(), startObservation()type LangfuseObservation =
| LangfuseSpan
| LangfuseGeneration
| LangfuseEvent
| LangfuseAgent
| LangfuseTool
| LangfuseChain
| LangfuseRetriever
| LangfuseEvaluator
| LangfuseGuardrail
| LangfuseEmbedding;General-purpose observation for tracking operations, functions, and workflows.
/**
* General-purpose observation wrapper for tracking operations, functions, and workflows.
*/
class LangfuseSpan {
/** Unique identifier for this observation */
readonly id: string;
/** Trace ID containing this observation */
readonly traceId: string;
/** Underlying OpenTelemetry span */
readonly otelSpan: Span;
/** Observation type */
readonly type: "span";
/**
* Updates this span with new attributes.
*/
update(attributes: LangfuseSpanAttributes): LangfuseSpan;
/**
* Ends the observation.
*/
end(endTime?: Date | number): void;
/**
* Updates the parent trace.
*/
updateTrace(attributes: LangfuseTraceAttributes): LangfuseSpan;
/**
* Creates a child observation.
*/
startObservation(
name: string,
attributes?: LangfuseObservationAttributes,
options?: { asType?: LangfuseObservationType }
): LangfuseObservation;
}import { startObservation } from '@langfuse/tracing';
const span = startObservation('user-authentication', {
input: { username: 'john_doe', method: 'oauth' },
metadata: { provider: 'google' }
});
try {
const user = await authenticateUser(credentials);
span.update({
output: { userId: user.id, success: true }
});
} catch (error) {
span.update({
level: 'ERROR',
statusMessage: error.message
});
throw error;
} finally {
span.end();
}Specialized observation for LLM calls, text generation, and AI model interactions.
/**
* Specialized observation for tracking LLM interactions and AI model calls.
*/
class LangfuseGeneration {
readonly id: string;
readonly traceId: string;
readonly otelSpan: Span;
readonly type: "generation";
/**
* Updates this generation with new attributes including LLM-specific fields.
*/
update(attributes: LangfuseGenerationAttributes): LangfuseGeneration;
end(endTime?: Date | number): void;
updateTrace(attributes: LangfuseTraceAttributes): LangfuseGeneration;
startObservation(
name: string,
attributes?: LangfuseObservationAttributes,
options?: { asType?: LangfuseObservationType }
): LangfuseObservation;
}
interface LangfuseGenerationAttributes extends LangfuseSpanAttributes {
/** Timestamp when the model started generating completion */
completionStartTime?: Date;
/** Name of the language model used */
model?: string;
/** Parameters passed to the model */
modelParameters?: { [key: string]: string | number };
/** Token usage and other model-specific usage metrics */
usageDetails?: { [key: string]: number };
/** Cost breakdown for the generation */
costDetails?: { [key: string]: number };
/** Information about the prompt used */
prompt?: { name: string; version: number; isFallback: boolean };
}const generation = startObservation('openai-gpt-4', {
model: 'gpt-4-turbo',
input: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Explain quantum computing' }
],
modelParameters: { temperature: 0.7, maxTokens: 500 }
}, { asType: 'generation' });
const response = await openai.chat.completions.create({
model: 'gpt-4-turbo',
messages: generation.attributes.input,
temperature: 0.7,
max_tokens: 500
});
generation.update({
output: response.choices[0].message,
usageDetails: {
promptTokens: response.usage.prompt_tokens,
completionTokens: response.usage.completion_tokens,
totalTokens: response.usage.total_tokens
},
costDetails: { totalCost: 0.025, currency: 'USD' }
});
generation.end();Observation for point-in-time occurrences or log entries (automatically ended).
/**
* Observation for point-in-time occurrences or log entries.
* Events are automatically ended at their timestamp.
*/
class LangfuseEvent {
readonly id: string;
readonly traceId: string;
readonly otelSpan: Span;
readonly type: "event";
// Events are automatically ended at creation
// No update() method - events are immutable after creation
updateTrace(attributes: LangfuseTraceAttributes): LangfuseEvent;
startObservation(
name: string,
attributes?: LangfuseObservationAttributes,
options?: { asType?: LangfuseObservationType }
): LangfuseObservation;
}// Events are automatically ended at creation
const event = startObservation('user-login', {
input: {
userId: '123',
method: 'oauth',
timestamp: new Date().toISOString()
},
level: 'DEFAULT',
metadata: {
ip: '192.168.1.1',
userAgent: 'Chrome/120.0',
sessionId: 'sess_456'
}
}, { asType: 'event' });
// No need to call event.end() - already endedSpecialized observation for AI agent workflows with tool usage and autonomous operations.
/**
* Specialized observation for tracking AI agent workflows.
*/
class LangfuseAgent {
readonly id: string;
readonly traceId: string;
readonly otelSpan: Span;
readonly type: "agent";
update(attributes: LangfuseAgentAttributes): LangfuseAgent;
end(endTime?: Date | number): void;
updateTrace(attributes: LangfuseTraceAttributes): LangfuseAgent;
startObservation(
name: string,
attributes?: LangfuseObservationAttributes,
options?: { asType?: LangfuseObservationType }
): LangfuseObservation;
}
type LangfuseAgentAttributes = LangfuseSpanAttributes;const agent = startObservation('research-agent', {
input: {
task: 'Research renewable energy trends',
tools: ['web-search', 'summarizer'],
maxIterations: 3
},
metadata: { model: 'gpt-4', strategy: 'react' }
}, { asType: 'agent' });
// Agent uses tools
const searchTool = agent.startObservation('web-search', {
input: { query: 'renewable energy 2024' }
}, { asType: 'tool' });
const results = await webSearch('renewable energy 2024');
searchTool.update({ output: results });
searchTool.end();
// Agent generates response
const generation = agent.startObservation('synthesize-findings', {
input: results,
model: 'gpt-4'
}, { asType: 'generation' });
const response = await llm.generate(results);
generation.update({ output: response });
generation.end();
agent.update({
output: {
completed: true,
toolsUsed: 1,
iterationsRequired: 1,
finalResponse: response
},
metadata: { efficiency: 0.85, qualityScore: 0.92 }
});
agent.end();Specialized observation for individual tool calls and external API interactions.
/**
* Specialized observation for tracking tool calls and API interactions.
*/
class LangfuseTool {
readonly id: string;
readonly traceId: string;
readonly otelSpan: Span;
readonly type: "tool";
update(attributes: LangfuseToolAttributes): LangfuseTool;
end(endTime?: Date | number): void;
updateTrace(attributes: LangfuseTraceAttributes): LangfuseTool;
startObservation(
name: string,
attributes?: LangfuseObservationAttributes,
options?: { asType?: LangfuseObservationType }
): LangfuseObservation;
}
type LangfuseToolAttributes = LangfuseSpanAttributes;const tool = startObservation('web-search', {
input: {
query: 'latest AI developments',
maxResults: 10
},
metadata: { provider: 'google-api', timeout: 5000 }
}, { asType: 'tool' });
try {
const results = await webSearch('latest AI developments');
tool.update({
output: {
results: results,
count: results.length,
relevanceScore: 0.89
},
metadata: { latency: 1200, cacheHit: false }
});
} catch (error) {
tool.update({
level: 'ERROR',
statusMessage: 'Search failed',
output: { error: error.message }
});
} finally {
tool.end();
}Specialized observation for structured multi-step workflows and process chains.
/**
* Specialized observation for tracking multi-step workflows.
*/
class LangfuseChain {
readonly id: string;
readonly traceId: string;
readonly otelSpan: Span;
readonly type: "chain";
update(attributes: LangfuseChainAttributes): LangfuseChain;
end(endTime?: Date | number): void;
updateTrace(attributes: LangfuseTraceAttributes): LangfuseChain;
startObservation(
name: string,
attributes?: LangfuseObservationAttributes,
options?: { asType?: LangfuseObservationType }
): LangfuseObservation;
}
type LangfuseChainAttributes = LangfuseSpanAttributes;const chain = startObservation('rag-pipeline', {
input: {
query: 'What is renewable energy?',
steps: ['retrieval', 'generation']
},
metadata: { vectorDb: 'pinecone', model: 'gpt-4' }
}, { asType: 'chain' });
// Step 1: Document retrieval
const retrieval = chain.startObservation('document-retrieval', {
input: { query: 'renewable energy' }
}, { asType: 'retriever' });
const docs = await vectorSearch('renewable energy');
retrieval.update({ output: { documents: docs, count: docs.length } });
retrieval.end();
// Step 2: Generate response
const generation = chain.startObservation('response-generation', {
input: { query: 'What is renewable energy?', context: docs },
model: 'gpt-4'
}, { asType: 'generation' });
const response = await llm.generate({ prompt, context: docs });
generation.update({ output: response });
generation.end();
chain.update({
output: {
finalResponse: response,
stepsCompleted: 2,
documentsUsed: docs.length,
pipelineEfficiency: 0.87
}
});
chain.end();Specialized observation for document retrieval and search operations.
/**
* Specialized observation for tracking document retrieval operations.
*/
class LangfuseRetriever {
readonly id: string;
readonly traceId: string;
readonly otelSpan: Span;
readonly type: "retriever";
update(attributes: LangfuseRetrieverAttributes): LangfuseRetriever;
end(endTime?: Date | number): void;
updateTrace(attributes: LangfuseTraceAttributes): LangfuseRetriever;
startObservation(
name: string,
attributes?: LangfuseObservationAttributes,
options?: { asType?: LangfuseObservationType }
): LangfuseObservation;
}
type LangfuseRetrieverAttributes = LangfuseSpanAttributes;const retriever = startObservation('vector-search', {
input: {
query: 'machine learning applications',
topK: 10,
similarityThreshold: 0.7
},
metadata: {
vectorDB: 'pinecone',
embeddingModel: 'text-embedding-ada-002',
similarity: 'cosine'
}
}, { asType: 'retriever' });
const results = await vectorDB.search({
query: 'machine learning applications',
topK: 10,
threshold: 0.7
});
retriever.update({
output: {
documents: results,
count: results.length,
avgSimilarity: 0.89
},
metadata: { searchLatency: 150, cacheHit: false }
});
retriever.end();Specialized observation for quality assessment and evaluation operations.
/**
* Specialized observation for tracking evaluation operations.
*/
class LangfuseEvaluator {
readonly id: string;
readonly traceId: string;
readonly otelSpan: Span;
readonly type: "evaluator";
update(attributes: LangfuseEvaluatorAttributes): LangfuseEvaluator;
end(endTime?: Date | number): void;
updateTrace(attributes: LangfuseTraceAttributes): LangfuseEvaluator;
startObservation(
name: string,
attributes?: LangfuseObservationAttributes,
options?: { asType?: LangfuseObservationType }
): LangfuseObservation;
}
type LangfuseEvaluatorAttributes = LangfuseSpanAttributes;const evaluator = startObservation('response-quality-eval', {
input: {
response: 'Machine learning is a subset of artificial intelligence...',
reference: 'Expected high-quality explanation',
criteria: ['accuracy', 'completeness', 'clarity']
},
metadata: { evaluator: 'custom-bert-scorer', threshold: 0.8 }
}, { asType: 'evaluator' });
const evaluation = await evaluateResponse({
response: inputText,
criteria: ['accuracy', 'completeness', 'clarity']
});
evaluator.update({
output: {
overallScore: 0.87,
criteriaScores: {
accuracy: 0.92,
completeness: 0.85,
clarity: 0.90
},
passed: true,
grade: 'excellent'
}
});
evaluator.end();Specialized observation for safety checks and compliance enforcement.
/**
* Specialized observation for tracking safety and compliance checks.
*/
class LangfuseGuardrail {
readonly id: string;
readonly traceId: string;
readonly otelSpan: Span;
readonly type: "guardrail";
update(attributes: LangfuseGuardrailAttributes): LangfuseGuardrail;
end(endTime?: Date | number): void;
updateTrace(attributes: LangfuseTraceAttributes): LangfuseGuardrail;
startObservation(
name: string,
attributes?: LangfuseObservationAttributes,
options?: { asType?: LangfuseObservationType }
): LangfuseObservation;
}
type LangfuseGuardrailAttributes = LangfuseSpanAttributes;const guardrail = startObservation('content-safety-check', {
input: {
content: userMessage,
policies: ['no-toxicity', 'no-hate-speech', 'no-pii'],
strictMode: true
},
metadata: { guardrailVersion: 'v2.1', confidence: 0.95 }
}, { asType: 'guardrail' });
const safetyCheck = await checkContentSafety({
text: userMessage,
policies: ['no-toxicity', 'no-hate-speech']
});
guardrail.update({
output: {
safe: safetyCheck.safe,
riskScore: 0.15,
violations: [],
action: 'allow'
}
});
guardrail.end();Specialized observation for text embedding and vector generation operations.
/**
* Specialized observation for tracking embedding generation.
*/
class LangfuseEmbedding {
readonly id: string;
readonly traceId: string;
readonly otelSpan: Span;
readonly type: "embedding";
update(attributes: LangfuseEmbeddingAttributes): LangfuseEmbedding;
end(endTime?: Date | number): void;
updateTrace(attributes: LangfuseTraceAttributes): LangfuseEmbedding;
startObservation(
name: string,
attributes?: LangfuseObservationAttributes,
options?: { asType?: LangfuseObservationType }
): LangfuseObservation;
}
// Inherits generation attributes including model, usage, cost
type LangfuseEmbeddingAttributes = LangfuseGenerationAttributes;const embedding = startObservation('text-embedder', {
input: {
texts: [
'Machine learning is a subset of AI',
'Deep learning uses neural networks'
],
batchSize: 2
},
model: 'text-embedding-ada-002',
metadata: { dimensions: 1536, normalization: 'l2' }
}, { asType: 'embedding' });
const embedResult = await generateEmbeddings({
texts: embedding.attributes.input.texts,
model: 'text-embedding-ada-002'
});
embedding.update({
output: {
embeddings: embedResult.vectors,
count: embedResult.vectors.length,
dimensions: 1536
},
usageDetails: { totalTokens: embedResult.tokenCount },
metadata: { processingTime: 340 }
});
embedding.end();All observation types share these properties:
interface CommonObservationProperties {
/** Unique identifier for this observation (OpenTelemetry span ID) */
readonly id: string;
/** Identifier of the parent trace containing this observation */
readonly traceId: string;
/** Direct access to the underlying OpenTelemetry span */
readonly otelSpan: Span;
/** The observation type */
readonly type: LangfuseObservationType;
}All observation types support these methods:
Updates the observation with new attributes. Returns the observation for method chaining.
observation.update({
output: { result: 'success' },
metadata: { duration: 150 }
});Marks the observation as complete with optional end timestamp.
observation.end(); // Current time
observation.end(new Date('2024-01-01T12:00:00Z')); // Custom timeUpdates the parent trace with trace-level attributes.
observation.updateTrace({
userId: 'user-123',
sessionId: 'session-456',
tags: ['production', 'api-v2']
});Creates a new child observation within this observation's context.
const child = observation.startObservation('child-operation', {
input: { step: 'processing' }
}, { asType: 'span' });eventgenerationembeddingagenttoolretrieverevaluatorguardrailchainspan// LLM interactions
startObservation('openai-call', {}, { asType: 'generation' });
startObservation('embed-text', {}, { asType: 'embedding' });
// Intelligent workflows
startObservation('ai-agent', {}, { asType: 'agent' });
startObservation('rag-pipeline', {}, { asType: 'chain' });
// Individual operations
startObservation('api-call', {}, { asType: 'tool' });
startObservation('vector-search', {}, { asType: 'retriever' });
// Quality and safety
startObservation('evaluate-output', {}, { asType: 'evaluator' });
startObservation('content-filter', {}, { asType: 'guardrail' });
// Events and general ops
startObservation('user-action', {}, { asType: 'event' });
startObservation('data-processing', {}); // defaults to 'span'Install with Tessl CLI
npx tessl i tessl/npm-langfuse--tracing