Observability and analytics platform for LLM applications with hierarchical tracing, prompt management, dataset operations, and OpenAI integration
Comprehensive media handling system for images, PDFs, and other binary content. Langfuse automatically detects media in trace inputs/outputs, uploads to cloud storage, and replaces content with reference strings for efficient storage and retrieval.
Wrapper class for media objects that handles upload and reference generation.
class LangfuseMedia {
/** Optional reference object */
obj?: object;
/** Length of content in bytes */
contentLength: number | undefined;
/** SHA256 hash of content */
contentSha256Hash: string | undefined;
/**
* Creates a media object for upload
* @param params - Media source (one of: obj, base64DataUri, contentBytes, filePath)
*/
constructor(params: {
/** Optional reference object */
obj?: object;
/** Base64 data URI (e.g., data:image/png;base64,...) */
base64DataUri?: string;
/** Content type */
contentType?: MediaContentType;
/** Raw content bytes */
contentBytes?: Buffer;
/** File path to read from */
filePath?: string;
});
/**
* Returns a media reference string for storage
* Format: @@@langfuseMedia:type={contentType}|id={mediaId}|source={source}@@@
* @returns Reference string or undefined
*/
toJSON(): string | undefined;
/**
* Parses a media reference string into its components
* @param referenceString - Reference string to parse
* @returns Parsed media reference
*/
static parseReferenceString(referenceString: string): ParsedMediaReference;
/**
* Recursively traverses an object and replaces all media reference strings
* with actual base64 data URIs
* @param params - Resolution parameters
* @returns Object with resolved media content
*/
static resolveMediaReferences<T>(
params: LangfuseMediaResolveMediaReferencesParams<T>
): Promise<T>;
}
interface ParsedMediaReference {
/** Media ID */
mediaId: string;
/** Source identifier */
source: string;
/** Content type */
contentType: MediaContentType;
}
interface LangfuseMediaResolveMediaReferencesParams<T> {
/** Object to process */
obj: T;
/** Langfuse client for fetching media */
langfuseClient: LangfuseCore;
/** Resolution format (currently only "base64DataUri" supported) */
resolveWith: "base64DataUri";
/** Maximum traversal depth (default: 10) */
maxDepth?: number;
}
type MediaContentType =
// Images
| "image/png"
| "image/jpeg"
| "image/jpg"
| "image/webp"
| "image/gif"
| "image/svg+xml"
| "image/tiff"
| "image/bmp"
// Audio
| "audio/mpeg"
| "audio/mp3"
| "audio/wav"
| "audio/ogg"
| "audio/oga"
| "audio/aac"
| "audio/mp4"
| "audio/flac"
// Video
| "video/mp4"
| "video/webm"
// Text
| "text/plain"
| "text/html"
| "text/css"
| "text/csv"
// Application
| "application/pdf"
| "application/msword"
| "application/vnd.ms-excel"
| "application/zip"
| "application/json"
| "application/xml"
| "application/octet-stream";Usage Example:
import { Langfuse, LangfuseMedia } from 'langfuse';
const langfuse = new Langfuse();
// Create media from base64 data URI
const image = new LangfuseMedia({
base64DataUri: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUg...'
});
// Create media from file
const pdf = new LangfuseMedia({
filePath: '/path/to/document.pdf',
contentType: 'application/pdf'
});
// Create media from bytes
const audio = new LangfuseMedia({
contentBytes: audioBuffer,
contentType: 'audio/mpeg'
});
// Use in trace
const trace = langfuse.trace({
name: 'image-analysis',
input: {
image: image,
prompt: 'What is in this image?'
}
});
// Media is automatically uploaded and replaced with reference
await langfuse.flushAsync();Langfuse automatically detects and handles base64 data URIs in trace inputs and outputs.
import { Langfuse } from 'langfuse';
const langfuse = new Langfuse();
// Base64 data URIs are automatically detected
const trace = langfuse.trace({
name: 'vision-analysis',
input: {
// This will be automatically uploaded and replaced
image: 'data:image/jpeg;base64,/9j/4AAQSkZJRg...',
question: 'Describe this image'
}
});
const generation = trace.generation({
name: 'gpt-4-vision',
model: 'gpt-4-vision-preview',
input: [
{
role: 'user',
content: [
{ type: 'text', text: 'What is in this image?' },
{
type: 'image_url',
// Automatically detected and uploaded
image_url: { url: 'data:image/png;base64,iVBORw0KGg...' }
}
]
}
]
});
generation.end({
output: {
description: 'A beautiful sunset over mountains'
}
});
await langfuse.flushAsync();Retrieve media metadata and content.
class Langfuse {
/**
* Fetches media metadata
* @param id - Media ID
* @returns Media metadata with download URL
*/
fetchMedia(id: string): Promise<GetMediaResponse>;
/**
* Recursively replaces media reference strings in an object with actual content
* @param params - Resolution parameters
* @returns Object with resolved media content
*/
resolveMediaReferences<T>(params: {
obj: T;
resolveWith: "base64DataUri";
maxDepth?: number;
}): Promise<T>;
}
interface GetMediaResponse {
/** Media ID */
mediaId: string;
/** Download URL (temporary, expires) */
url: string;
/** Content type */
contentType: MediaContentType;
/** Size in bytes */
contentLength: number;
/** Upload timestamp */
uploadedAt: string;
}Usage Example:
// Fetch media metadata
const mediaInfo = await langfuse.fetchMedia('media-id-123');
console.log(mediaInfo.contentType); // "image/png"
console.log(mediaInfo.contentLength); // 12345
console.log(mediaInfo.url); // Temporary download URL
// Download the media
const response = await fetch(mediaInfo.url);
const buffer = await response.arrayBuffer();
// Resolve media references in a trace
const trace = await langfuse.fetchTrace('trace-id-123');
// Trace input may contain media references like:
// @@@langfuseMedia:type=image/png|id=media-123|source=bytes@@@
// Resolve them to base64 data URIs
const resolvedTrace = await langfuse.resolveMediaReferences({
obj: trace.data,
resolveWith: 'base64DataUri',
maxDepth: 10
});
// Now resolvedTrace contains actual base64 data URIs
console.log(resolvedTrace.input.image); // "data:image/png;base64,..."Replace media reference strings with actual content throughout nested objects.
/**
* Static method for resolving media references without a Langfuse instance
*/
LangfuseMedia.resolveMediaReferences<T>(
params: LangfuseMediaResolveMediaReferencesParams<T>
): Promise<T>;Usage Example:
import { Langfuse, LangfuseMedia } from 'langfuse';
const langfuse = new Langfuse();
// Fetch traces with media references
const traces = await langfuse.fetchTraces({
limit: 10
});
// Resolve all media references in all traces
for (const trace of traces.data) {
const resolved = await LangfuseMedia.resolveMediaReferences({
obj: trace,
langfuseClient: langfuse,
resolveWith: 'base64DataUri',
maxDepth: 5
});
// Now trace.input and trace.output contain actual media content
console.log(resolved.input);
}Langfuse uses a special string format for media references:
@@@langfuseMedia:type={contentType}|id={mediaId}|source={source}@@@Example:
@@@langfuseMedia:type=image/png|id=550e8400-e29b-41d4-a716-446655440000|source=bytes@@@Components:
type: Media content type (MIME type)id: Unique media identifier (UUID)source: Source of the media (bytes, base64, file, etc.)This format allows efficient storage and retrieval while maintaining references to the actual media content stored in cloud storage.
Handle image inputs for vision models.
import { Langfuse, LangfuseMedia } from 'langfuse';
const langfuse = new Langfuse();
// Read image from file
const image = new LangfuseMedia({
filePath: './screenshot.png',
contentType: 'image/png'
});
const trace = langfuse.trace({
name: 'screenshot-analysis',
input: {
image: image,
task: 'Extract text from screenshot'
}
});
const generation = trace.generation({
name: 'gpt-4-vision',
model: 'gpt-4-vision-preview',
input: [
{
role: 'user',
content: [
{ type: 'text', text: 'Extract all text from this image' },
{
type: 'image_url',
image_url: {
url: image.toJSON() || '', // Use reference
detail: 'high'
}
}
]
}
]
});
// ... get response from model ...
generation.end({
output: { extracted_text: '...' }
});
await langfuse.flushAsync();Handle PDF documents in traces.
import { Langfuse, LangfuseMedia } from 'langfuse';
import fs from 'fs';
const langfuse = new Langfuse();
// Read PDF file
const pdfBuffer = fs.readFileSync('./document.pdf');
const pdf = new LangfuseMedia({
contentBytes: pdfBuffer,
contentType: 'application/pdf'
});
const trace = langfuse.trace({
name: 'pdf-analysis',
input: {
document: pdf,
analysis_type: 'summary'
}
});
const generation = trace.generation({
name: 'document-summary',
model: 'gpt-4',
input: 'Summarize the PDF document',
metadata: {
document_size: pdfBuffer.length
}
});
// ... process PDF and generate summary ...
generation.end({
output: { summary: '...' }
});
await langfuse.flushAsync();Handle audio files for speech-to-text or analysis.
import { Langfuse, LangfuseMedia } from 'langfuse';
const langfuse = new Langfuse();
const audio = new LangfuseMedia({
filePath: './recording.mp3',
contentType: 'audio/mpeg'
});
const trace = langfuse.trace({
name: 'speech-to-text',
input: {
audio: audio,
language: 'en'
}
});
const generation = trace.generation({
name: 'whisper-transcription',
model: 'whisper-1',
input: audio
});
generation.end({
output: { text: 'Transcribed text...' }
});
await langfuse.flushAsync();Handle traces with multiple media items.
import { Langfuse, LangfuseMedia } from 'langfuse';
const langfuse = new Langfuse();
const image1 = new LangfuseMedia({
base64DataUri: 'data:image/png;base64,...'
});
const image2 = new LangfuseMedia({
base64DataUri: 'data:image/jpeg;base64,...'
});
const trace = langfuse.trace({
name: 'multi-image-comparison',
input: {
images: [image1, image2],
task: 'Compare these images'
}
});
const generation = trace.generation({
name: 'image-comparison',
model: 'gpt-4-vision-preview',
input: [
{
role: 'user',
content: [
{ type: 'text', text: 'What are the differences?' },
{ type: 'image_url', image_url: { url: image1.toJSON() || '' } },
{ type: 'image_url', image_url: { url: image2.toJSON() || '' } }
]
}
]
});
generation.end({
output: { differences: ['...'] }
});
await langfuse.flushAsync();Fetch traces and resolve media for local analysis.
import { Langfuse } from 'langfuse';
import fs from 'fs';
const langfuse = new Langfuse();
// Fetch traces with media
const traces = await langfuse.fetchTraces({
name: 'vision-analysis',
limit: 10
});
for (const trace of traces.data) {
// Resolve media references to base64
const resolved = await langfuse.resolveMediaReferences({
obj: trace,
resolveWith: 'base64DataUri'
});
// Extract image from base64 data URI
if (resolved.input?.image) {
const base64Data = resolved.input.image.split(',')[1];
const buffer = Buffer.from(base64Data, 'base64');
// Save to file for analysis
fs.writeFileSync(`./trace-${trace.id}.png`, buffer);
}
}Langfuse supports 50+ MIME types including:
Images:
image/jpeg, image/png, image/gif, image/webp, image/svg+xml, image/bmp, image/tiff, image/heic, image/heif, image/avifDocuments:
application/pdf, text/plain, text/html, text/csv, text/markdownAudio:
audio/mpeg, audio/wav, audio/ogg, audio/mp4, audio/webm, audio/flac, audio/aacVideo:
video/mp4, video/webm, video/ogg, video/mpeg, video/quicktime, video/x-msvideoArchives:
application/zip, application/gzip, application/x-tar, application/x-7z-compressedData:
application/json, application/xml, application/octet-streamimport { Langfuse, LangfuseMedia } from 'langfuse';
import OpenAI from 'openai';
import fs from 'fs';
const langfuse = new Langfuse();
const openai = new OpenAI();
// Step 1: Create media objects
const userImage = new LangfuseMedia({
filePath: './uploads/user-photo.jpg',
contentType: 'image/jpeg'
});
const referenceImage = new LangfuseMedia({
base64DataUri: 'data:image/png;base64,iVBORw0KGg...'
});
// Step 2: Create trace with media
const trace = langfuse.trace({
name: 'image-comparison-workflow',
userId: 'user-123',
input: {
user_image: userImage,
reference_image: referenceImage,
task: 'Compare image similarity'
}
});
// Step 3: Use with OpenAI Vision
const generation = trace.generation({
name: 'vision-comparison',
model: 'gpt-4-vision-preview',
input: [
{
role: 'user',
content: [
{
type: 'text',
text: 'Compare these two images and describe their similarities and differences'
},
{
type: 'image_url',
image_url: {
url: userImage.toJSON() || '',
detail: 'high'
}
},
{
type: 'image_url',
image_url: {
url: referenceImage.toJSON() || '',
detail: 'high'
}
}
]
}
]
});
// Make actual API call
const response = await openai.chat.completions.create({
model: 'gpt-4-vision-preview',
messages: [
{
role: 'user',
content: [
{
type: 'text',
text: 'Compare these images'
},
{
type: 'image_url',
image_url: { url: userImage.toJSON() || '' }
},
{
type: 'image_url',
image_url: { url: referenceImage.toJSON() || '' }
}
]
}
],
max_tokens: 500
});
generation.end({
output: response.choices[0].message,
usage: {
input: response.usage?.prompt_tokens,
output: response.usage?.completion_tokens,
total: response.usage?.total_tokens
}
});
trace.update({
output: {
comparison: response.choices[0].message.content
}
});
// Step 4: Flush to upload media
await langfuse.flushAsync();
// Step 5: Later, retrieve and resolve media
const fetchedTrace = await langfuse.fetchTrace(trace.id);
const resolvedTrace = await langfuse.resolveMediaReferences({
obj: fetchedTrace.data,
resolveWith: 'base64DataUri'
});
// Save resolved images
if (resolvedTrace.input?.user_image) {
const base64 = resolvedTrace.input.user_image.split(',')[1];
fs.writeFileSync('./resolved-user-image.jpg', Buffer.from(base64, 'base64'));
}
console.log('Trace URL:', trace.getTraceUrl());Install with Tessl CLI
npx tessl i tessl/npm-langfuse