CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/npm-langfuse

Observability and analytics platform for LLM applications with hierarchical tracing, prompt management, dataset operations, and OpenAI integration

Overview
Eval results
Files

prompts.mddocs/

Prompt Management

Comprehensive prompt management system with versioning, caching, and support for both text and chat formats. Prompts are templates that can be compiled with variables and linked to generations for tracking.

Capabilities

Fetching Prompts

Retrieve prompts with automatic caching and version management.

/**
 * Fetches a text prompt with caching support
 * @param name - Prompt name
 * @param version - Optional version (defaults to latest production)
 * @param options - Optional cache TTL configuration
 * @returns Text prompt client
 */
getPrompt(name: string, version?: number, options?: GetPromptOptions): Promise<TextPromptClient>;

/**
 * Fetches a chat prompt with caching support
 * @param name - Prompt name
 * @param version - Optional version (defaults to latest production)
 * @param options - Chat-specific configuration
 * @returns Chat prompt client
 */
getPrompt(name: string, version?: number, options?: GetPromptOptionsChat): Promise<ChatPromptClient>;

interface GetPromptOptions {
  /** Label to fetch (default: "production") */
  label?: string;
  /** Cache type: "text" */
  type?: "text";
  /** Cache TTL in seconds (default: 60) */
  cacheTtlSeconds?: number;
  /** Fallback prompt string if not found */
  fallback?: string;
  /** Maximum number of retries for prompt fetch (default: 2, max: 4) */
  maxRetries?: number;
  /** Fetch timeout in milliseconds (default: from client config) */
  fetchTimeoutMs?: number;
}

interface GetPromptOptionsChat {
  /** Label to fetch (default: "production") */
  label?: string;
  /** Cache type: "chat" */
  type: "chat";
  /** Cache TTL in seconds (default: 60) */
  cacheTtlSeconds?: number;
  /** Fallback prompt messages array if not found */
  fallback?: ChatMessage[];
  /** Maximum number of retries for prompt fetch (default: 2, max: 4) */
  maxRetries?: number;
  /** Fetch timeout in milliseconds (default: from client config) */
  fetchTimeoutMs?: number;
}

interface TextPrompt {
  type: "text";
  name: string;
  version: number;
  prompt: string;
  config: unknown;
  labels: string[];
  tags: string[];
  commitMessage?: string | null;
}

interface ChatPrompt {
  type: "chat";
  name: string;
  version: number;
  prompt: ChatMessageWithPlaceholders[];
  config: unknown;
  labels: string[];
  tags: string[];
  commitMessage?: string | null;
}

Usage Example:

import { Langfuse } from 'langfuse';

const langfuse = new Langfuse();

// Fetch latest production text prompt
const textPrompt = await langfuse.getPrompt('greeting-template');

// Fetch specific version
const versionedPrompt = await langfuse.getPrompt('greeting-template', 3);

// Fetch chat prompt with custom cache
const chatPrompt = await langfuse.getPrompt(
  'chat-template',
  undefined,
  { type: 'chat', cacheTtlSeconds: 300 }
);

// Fetch with fallback
const promptWithFallback = await langfuse.getPrompt('new-prompt', undefined, {
  fallback: {
    type: 'text',
    name: 'new-prompt',
    version: 1,
    prompt: 'Default prompt text',
    config: {},
    labels: [],
    tags: []
  }
});

Creating Prompts

Create new prompts programmatically.

/**
 * Creates a new text prompt
 * @param body - Text prompt configuration
 * @returns Text prompt client
 */
createPrompt(body: CreateTextPromptBody): Promise<TextPromptClient>;

/**
 * Creates a new chat prompt
 * @param body - Chat prompt configuration
 * @returns Chat prompt client
 */
createPrompt(body: CreateChatPromptBody): Promise<ChatPromptClient>;

/**
 * Creates a new chat prompt with placeholders
 * @param body - Chat prompt configuration with placeholder support
 * @returns Chat prompt client
 */
createPrompt(body: CreateChatPromptBodyWithPlaceholders): Promise<ChatPromptClient>;

interface CreateTextPromptBody {
  /** Prompt type (default: "text") */
  type?: "text";
  /** Prompt name */
  name: string;
  /** Prompt content with {{variable}} placeholders */
  prompt: string;
  /** Optional configuration (model parameters, etc.) */
  config?: any;
  /** Labels for filtering (e.g., "production") */
  labels?: string[];
  /** Tags for organization */
  tags?: string[];
  /** @deprecated Use labels instead */
  isActive?: boolean;
}

interface CreateChatPromptBody {
  /** Prompt type */
  type: "chat";
  /** Prompt name */
  name: string;
  /** Array of chat messages */
  prompt: ChatMessage[];
  /** Optional configuration (model parameters, etc.) */
  config?: any;
  /** Labels for filtering (e.g., "production") */
  labels?: string[];
  /** Tags for organization */
  tags?: string[];
  /** @deprecated Use labels instead */
  isActive?: boolean;
}

interface CreateChatPromptBodyWithPlaceholders {
  /** Prompt type */
  type: "chat";
  /** Prompt name */
  name: string;
  /** Array of chat messages with placeholders */
  prompt: ChatMessageWithPlaceholders[];
  /** Optional configuration (model parameters, etc.) */
  config?: any;
  /** Labels for filtering (e.g., "production") */
  labels?: string[];
  /** Tags for organization */
  tags?: string[];
}

interface ChatMessage {
  /** Message role (system, user, assistant, etc.) */
  role: string;
  /** Message content with {{variable}} placeholders */
  content: string;
}

interface PlaceholderMessage {
  /** Placeholder variable name */
  name: string;
}

type ChatMessageWithPlaceholders =
  | ({ type: "chatmessage" } & ChatMessage)
  | ({ type: "placeholder" } & PlaceholderMessage);

/**
 * Note: The langfuse package does not export a ChatMessageType enum.
 * Use string literals "chatmessage" or "placeholder" for the type field.
 */

Usage Example:

// Create text prompt
const textPrompt = await langfuse.createPrompt({
  name: 'greeting',
  prompt: 'Hello {{name}}! Welcome to {{app_name}}.',
  labels: ['production'],
  tags: ['greeting']
});

// Create chat prompt
const chatPrompt = await langfuse.createPrompt({
  type: 'chat',
  name: 'assistant-chat',
  prompt: [
    { role: 'system', content: 'You are a helpful assistant for {{company}}.' },
    { role: 'user', content: '{{user_message}}' }
  ],
  labels: ['production'],
  config: {
    temperature: 0.7,
    max_tokens: 500
  }
});

// Create chat prompt with placeholders
const chatPromptWithPlaceholders = await langfuse.createPrompt({
  type: 'chat',
  name: 'dynamic-chat',
  prompt: [
    {
      type: 'chatmessage',
      role: 'system',
      content: 'You are {{assistant_role}}.'
    },
    {
      type: 'placeholder',
      name: 'history'
    },
    {
      type: 'chatmessage',
      role: 'user',
      content: '{{query}}'
    }
  ],
  labels: ['production']
});

Updating Prompts

Update prompt labels for version management.

/**
 * Updates prompt labels
 * @param body - Update configuration
 * @returns Updated prompt client
 */
updatePrompt(body: {
  name: string;
  version: number;
  newLabels: string[];
}): Promise<LangfusePromptClient>;

type LangfusePromptClient = TextPromptClient | ChatPromptClient;

Usage Example:

// Promote a prompt version to production
await langfuse.updatePrompt({
  name: 'greeting',
  version: 5,
  newLabels: ['production', 'stable']
});

TextPromptClient

Client for working with text-based prompts using Mustache templating.

class TextPromptClient {
  /** Prompt name */
  name: string;
  /** Prompt version */
  version: number;
  /** Prompt configuration */
  config: unknown;
  /** Prompt labels (e.g., "production") */
  labels: string[];
  /** Prompt tags */
  tags: string[];
  /** Whether this is a fallback prompt */
  isFallback: boolean;
  /** Prompt type */
  type: "text";
  /** Commit message for this version */
  commitMessage: string | null | undefined;
  /** The raw prompt text */
  prompt: string;
  /** The full prompt response object */
  promptResponse: TextPrompt;

  /**
   * Compiles the prompt by replacing {{variable}} placeholders with provided values
   * @param variables - Variable values for substitution
   * @param _placeholders - Unused for text prompts
   * @returns Compiled prompt string
   */
  compile(variables?: Record<string, string>, _placeholders?: Record<string, any>): string;

  /**
   * Returns a Langchain-compatible prompt string with variables in {variable} format
   * @param options - Unused for text prompts
   * @returns Prompt string with Langchain variable syntax
   */
  getLangchainPrompt(options?: { placeholders?: Record<string, any> }): string;

  /**
   * Returns a JSON string representation of the prompt
   * @returns JSON string
   */
  toJSON(): string;
}

Usage Example:

const prompt = await langfuse.getPrompt('greeting');

// Compile with variables
const compiled = prompt.compile({
  name: 'Alice',
  app_name: 'MyApp'
});
// Result: "Hello Alice! Welcome to MyApp."

// Get Langchain format
const langchainFormat = prompt.getLangchainPrompt();
// Result: "Hello {name}! Welcome to {app_name}."

// Use with generation
const generation = trace.generation({
  name: 'greeting-generation',
  prompt: prompt,
  model: 'gpt-4',
  input: compiled
});

// Access prompt metadata
console.log(prompt.version); // 3
console.log(prompt.labels); // ["production"]
console.log(prompt.config); // { temperature: 0.7 }

ChatPromptClient

Client for working with chat-based prompts with support for message arrays and placeholders.

class ChatPromptClient {
  /** Prompt name */
  name: string;
  /** Prompt version */
  version: number;
  /** Prompt configuration */
  config: unknown;
  /** Prompt labels (e.g., "production") */
  labels: string[];
  /** Prompt tags */
  tags: string[];
  /** Whether this is a fallback prompt */
  isFallback: boolean;
  /** Prompt type */
  type: "chat";
  /** Commit message for this version */
  commitMessage: string | null | undefined;
  /** Array of chat messages with optional placeholders */
  prompt: ChatMessageWithPlaceholders[];
  /** The full prompt response object */
  promptResponse: ChatPrompt;

  /**
   * Compiles the prompt by:
   * 1. Replacing placeholder messages with provided placeholder values
   * 2. Rendering variables in message content using Mustache
   * @param variables - Variable values for {{variable}} substitution
   * @param placeholders - Placeholder values for message array insertion
   * @returns Array of ChatMessage objects and unresolved placeholders
   */
  compile(
    variables?: Record<string, string>,
    placeholders?: Record<string, any>
  ): (ChatMessageOrPlaceholder | any)[];

  /**
   * Returns a Langchain-compatible prompt with:
   * - Placeholders filled in or converted to Langchain MessagesPlaceholder format
   * - Variables transformed from {{var}} to {var} format
   * @param options - Placeholder values
   * @returns Array compatible with Langchain's ChatPromptTemplate
   */
  getLangchainPrompt(
    options?: { placeholders?: Record<string, any> }
  ): (ChatMessage | LangchainMessagesPlaceholder | any)[];

  /**
   * Returns a JSON string representation of the prompt
   * @returns JSON string
   */
  toJSON(): string;
}

type ChatMessageOrPlaceholder =
  | ChatMessage
  | ({ type: "placeholder" } & PlaceholderMessage);

interface LangchainMessagesPlaceholder {
  /** Placeholder variable name */
  variableName: string;
  /** Whether placeholder is optional */
  optional: boolean;
}

Usage Example:

// Fetch chat prompt with placeholders
const chatPrompt = await langfuse.getPrompt('assistant-chat', undefined, {
  type: 'chat'
});

// Compile with variables and placeholders
const compiled = chatPrompt.compile(
  {
    assistant_role: 'customer support agent',
    query: 'How do I reset my password?'
  },
  {
    history: [
      { role: 'user', content: 'Hello' },
      { role: 'assistant', content: 'Hi! How can I help?' }
    ]
  }
);
// Result: [
//   { role: 'system', content: 'You are customer support agent.' },
//   { role: 'user', content: 'Hello' },
//   { role: 'assistant', content: 'Hi! How can I help?' },
//   { role: 'user', content: 'How do I reset my password?' }
// ]

// Get Langchain format
const langchainFormat = chatPrompt.getLangchainPrompt({
  placeholders: {
    history: [
      { role: 'user', content: 'Hello' },
      { role: 'assistant', content: 'Hi!' }
    ]
  }
});

// Use with generation
const messages = chatPrompt.compile(
  { assistant_role: 'helper', query: 'test' },
  { history: [] }
);

const generation = trace.generation({
  name: 'chat-completion',
  prompt: chatPrompt,
  model: 'gpt-4',
  input: messages
});

Prompt Caching

Langfuse automatically caches prompts to reduce API calls. The caching system includes:

  • TTL-based caching: Prompts are cached for the specified cacheTtlSeconds (default: 60 seconds)
  • Background refresh: Cache is refreshed in the background before expiration
  • Label-based fetching: Fetch by label (e.g., "production") to always get the latest version
  • Version locking: Fetch by version number for consistent behavior

Usage Example:

// Short-lived cache (5 seconds)
const prompt1 = await langfuse.getPrompt('template', undefined, {
  cacheTtlSeconds: 5
});

// Long-lived cache (1 hour)
const prompt2 = await langfuse.getPrompt('stable-template', undefined, {
  cacheTtlSeconds: 3600
});

// Fetch specific version (cached separately)
const prompt3 = await langfuse.getPrompt('template', 5);

// Fetch by label
const prodPrompt = await langfuse.getPrompt('template', undefined, {
  label: 'production'
});

const devPrompt = await langfuse.getPrompt('template', undefined, {
  label: 'development'
});

Complete Prompt Example

import { Langfuse } from 'langfuse';

const langfuse = new Langfuse();

// Create a chat prompt with placeholders
await langfuse.createPrompt({
  type: 'chat',
  name: 'customer-support',
  prompt: [
    {
      type: 'chatmessage',
      role: 'system',
      content: 'You are a {{role}} for {{company}}. Be {{tone}}.'
    },
    {
      type: 'placeholder',
      name: 'conversation_history'
    },
    {
      type: 'chatmessage',
      role: 'user',
      content: '{{user_query}}'
    }
  ],
  labels: ['production'],
  tags: ['support', 'chat'],
  config: {
    temperature: 0.7,
    max_tokens: 500
  }
});

// Fetch and use the prompt
const prompt = await langfuse.getPrompt('customer-support', undefined, {
  type: 'chat'
});

const trace = langfuse.trace({ name: 'support-chat' });

// Compile messages
const messages = prompt.compile(
  {
    role: 'helpful customer support agent',
    company: 'Acme Corp',
    tone: 'friendly and professional',
    user_query: 'How do I return a product?'
  },
  {
    conversation_history: [
      { role: 'user', content: 'Hello' },
      { role: 'assistant', content: 'Hi! How can I help you today?' }
    ]
  }
);

// Create generation with linked prompt
const generation = trace.generation({
  name: 'support-response',
  prompt: prompt,
  model: 'gpt-4',
  input: messages,
  modelParameters: prompt.config
});

// ... handle response ...

await langfuse.flushAsync();

Install with Tessl CLI

npx tessl i tessl/npm-langfuse

docs

configuration.md

datasets.md

index.md

media.md

openai-integration.md

prompts.md

public-api.md

tracing.md

tile.json