0
# Prompt Management
1
2
Comprehensive prompt management system with versioning, caching, and support for both text and chat formats. Prompts are templates that can be compiled with variables and linked to generations for tracking.
3
4
## Capabilities
5
6
### Fetching Prompts
7
8
Retrieve prompts with automatic caching and version management.
9
10
```typescript { .api }
11
/**
12
* Fetches a text prompt with caching support
13
* @param name - Prompt name
14
* @param version - Optional version (defaults to latest production)
15
* @param options - Optional cache TTL configuration
16
* @returns Text prompt client
17
*/
18
getPrompt(name: string, version?: number, options?: GetPromptOptions): Promise<TextPromptClient>;
19
20
/**
21
* Fetches a chat prompt with caching support
22
* @param name - Prompt name
23
* @param version - Optional version (defaults to latest production)
24
* @param options - Chat-specific configuration
25
* @returns Chat prompt client
26
*/
27
getPrompt(name: string, version?: number, options?: GetPromptOptionsChat): Promise<ChatPromptClient>;
28
29
interface GetPromptOptions {
30
/** Label to fetch (default: "production") */
31
label?: string;
32
/** Cache type: "text" */
33
type?: "text";
34
/** Cache TTL in seconds (default: 60) */
35
cacheTtlSeconds?: number;
36
/** Fallback prompt string if not found */
37
fallback?: string;
38
/** Maximum number of retries for prompt fetch (default: 2, max: 4) */
39
maxRetries?: number;
40
/** Fetch timeout in milliseconds (default: from client config) */
41
fetchTimeoutMs?: number;
42
}
43
44
interface GetPromptOptionsChat {
45
/** Label to fetch (default: "production") */
46
label?: string;
47
/** Cache type: "chat" */
48
type: "chat";
49
/** Cache TTL in seconds (default: 60) */
50
cacheTtlSeconds?: number;
51
/** Fallback prompt messages array if not found */
52
fallback?: ChatMessage[];
53
/** Maximum number of retries for prompt fetch (default: 2, max: 4) */
54
maxRetries?: number;
55
/** Fetch timeout in milliseconds (default: from client config) */
56
fetchTimeoutMs?: number;
57
}
58
59
interface TextPrompt {
60
type: "text";
61
name: string;
62
version: number;
63
prompt: string;
64
config: unknown;
65
labels: string[];
66
tags: string[];
67
commitMessage?: string | null;
68
}
69
70
interface ChatPrompt {
71
type: "chat";
72
name: string;
73
version: number;
74
prompt: ChatMessageWithPlaceholders[];
75
config: unknown;
76
labels: string[];
77
tags: string[];
78
commitMessage?: string | null;
79
}
80
```
81
82
**Usage Example:**
83
84
```typescript
85
import { Langfuse } from 'langfuse';
86
87
const langfuse = new Langfuse();
88
89
// Fetch latest production text prompt
90
const textPrompt = await langfuse.getPrompt('greeting-template');
91
92
// Fetch specific version
93
const versionedPrompt = await langfuse.getPrompt('greeting-template', 3);
94
95
// Fetch chat prompt with custom cache
96
const chatPrompt = await langfuse.getPrompt(
97
'chat-template',
98
undefined,
99
{ type: 'chat', cacheTtlSeconds: 300 }
100
);
101
102
// Fetch with fallback
103
const promptWithFallback = await langfuse.getPrompt('new-prompt', undefined, {
104
fallback: {
105
type: 'text',
106
name: 'new-prompt',
107
version: 1,
108
prompt: 'Default prompt text',
109
config: {},
110
labels: [],
111
tags: []
112
}
113
});
114
```
115
116
### Creating Prompts
117
118
Create new prompts programmatically.
119
120
```typescript { .api }
121
/**
122
* Creates a new text prompt
123
* @param body - Text prompt configuration
124
* @returns Text prompt client
125
*/
126
createPrompt(body: CreateTextPromptBody): Promise<TextPromptClient>;
127
128
/**
129
* Creates a new chat prompt
130
* @param body - Chat prompt configuration
131
* @returns Chat prompt client
132
*/
133
createPrompt(body: CreateChatPromptBody): Promise<ChatPromptClient>;
134
135
/**
136
* Creates a new chat prompt with placeholders
137
* @param body - Chat prompt configuration with placeholder support
138
* @returns Chat prompt client
139
*/
140
createPrompt(body: CreateChatPromptBodyWithPlaceholders): Promise<ChatPromptClient>;
141
142
interface CreateTextPromptBody {
143
/** Prompt type (default: "text") */
144
type?: "text";
145
/** Prompt name */
146
name: string;
147
/** Prompt content with {{variable}} placeholders */
148
prompt: string;
149
/** Optional configuration (model parameters, etc.) */
150
config?: any;
151
/** Labels for filtering (e.g., "production") */
152
labels?: string[];
153
/** Tags for organization */
154
tags?: string[];
155
/** @deprecated Use labels instead */
156
isActive?: boolean;
157
}
158
159
interface CreateChatPromptBody {
160
/** Prompt type */
161
type: "chat";
162
/** Prompt name */
163
name: string;
164
/** Array of chat messages */
165
prompt: ChatMessage[];
166
/** Optional configuration (model parameters, etc.) */
167
config?: any;
168
/** Labels for filtering (e.g., "production") */
169
labels?: string[];
170
/** Tags for organization */
171
tags?: string[];
172
/** @deprecated Use labels instead */
173
isActive?: boolean;
174
}
175
176
interface CreateChatPromptBodyWithPlaceholders {
177
/** Prompt type */
178
type: "chat";
179
/** Prompt name */
180
name: string;
181
/** Array of chat messages with placeholders */
182
prompt: ChatMessageWithPlaceholders[];
183
/** Optional configuration (model parameters, etc.) */
184
config?: any;
185
/** Labels for filtering (e.g., "production") */
186
labels?: string[];
187
/** Tags for organization */
188
tags?: string[];
189
}
190
191
interface ChatMessage {
192
/** Message role (system, user, assistant, etc.) */
193
role: string;
194
/** Message content with {{variable}} placeholders */
195
content: string;
196
}
197
198
interface PlaceholderMessage {
199
/** Placeholder variable name */
200
name: string;
201
}
202
203
type ChatMessageWithPlaceholders =
204
| ({ type: "chatmessage" } & ChatMessage)
205
| ({ type: "placeholder" } & PlaceholderMessage);
206
207
/**
208
* Note: The langfuse package does not export a ChatMessageType enum.
209
* Use string literals "chatmessage" or "placeholder" for the type field.
210
*/
211
```
212
213
**Usage Example:**
214
215
```typescript
216
// Create text prompt
217
const textPrompt = await langfuse.createPrompt({
218
name: 'greeting',
219
prompt: 'Hello {{name}}! Welcome to {{app_name}}.',
220
labels: ['production'],
221
tags: ['greeting']
222
});
223
224
// Create chat prompt
225
const chatPrompt = await langfuse.createPrompt({
226
type: 'chat',
227
name: 'assistant-chat',
228
prompt: [
229
{ role: 'system', content: 'You are a helpful assistant for {{company}}.' },
230
{ role: 'user', content: '{{user_message}}' }
231
],
232
labels: ['production'],
233
config: {
234
temperature: 0.7,
235
max_tokens: 500
236
}
237
});
238
239
// Create chat prompt with placeholders
240
const chatPromptWithPlaceholders = await langfuse.createPrompt({
241
type: 'chat',
242
name: 'dynamic-chat',
243
prompt: [
244
{
245
type: 'chatmessage',
246
role: 'system',
247
content: 'You are {{assistant_role}}.'
248
},
249
{
250
type: 'placeholder',
251
name: 'history'
252
},
253
{
254
type: 'chatmessage',
255
role: 'user',
256
content: '{{query}}'
257
}
258
],
259
labels: ['production']
260
});
261
```
262
263
### Updating Prompts
264
265
Update prompt labels for version management.
266
267
```typescript { .api }
268
/**
269
* Updates prompt labels
270
* @param body - Update configuration
271
* @returns Updated prompt client
272
*/
273
updatePrompt(body: {
274
name: string;
275
version: number;
276
newLabels: string[];
277
}): Promise<LangfusePromptClient>;
278
279
type LangfusePromptClient = TextPromptClient | ChatPromptClient;
280
```
281
282
**Usage Example:**
283
284
```typescript
285
// Promote a prompt version to production
286
await langfuse.updatePrompt({
287
name: 'greeting',
288
version: 5,
289
newLabels: ['production', 'stable']
290
});
291
```
292
293
### TextPromptClient
294
295
Client for working with text-based prompts using Mustache templating.
296
297
```typescript { .api }
298
class TextPromptClient {
299
/** Prompt name */
300
name: string;
301
/** Prompt version */
302
version: number;
303
/** Prompt configuration */
304
config: unknown;
305
/** Prompt labels (e.g., "production") */
306
labels: string[];
307
/** Prompt tags */
308
tags: string[];
309
/** Whether this is a fallback prompt */
310
isFallback: boolean;
311
/** Prompt type */
312
type: "text";
313
/** Commit message for this version */
314
commitMessage: string | null | undefined;
315
/** The raw prompt text */
316
prompt: string;
317
/** The full prompt response object */
318
promptResponse: TextPrompt;
319
320
/**
321
* Compiles the prompt by replacing {{variable}} placeholders with provided values
322
* @param variables - Variable values for substitution
323
* @param _placeholders - Unused for text prompts
324
* @returns Compiled prompt string
325
*/
326
compile(variables?: Record<string, string>, _placeholders?: Record<string, any>): string;
327
328
/**
329
* Returns a Langchain-compatible prompt string with variables in {variable} format
330
* @param options - Unused for text prompts
331
* @returns Prompt string with Langchain variable syntax
332
*/
333
getLangchainPrompt(options?: { placeholders?: Record<string, any> }): string;
334
335
/**
336
* Returns a JSON string representation of the prompt
337
* @returns JSON string
338
*/
339
toJSON(): string;
340
}
341
```
342
343
**Usage Example:**
344
345
```typescript
346
const prompt = await langfuse.getPrompt('greeting');
347
348
// Compile with variables
349
const compiled = prompt.compile({
350
name: 'Alice',
351
app_name: 'MyApp'
352
});
353
// Result: "Hello Alice! Welcome to MyApp."
354
355
// Get Langchain format
356
const langchainFormat = prompt.getLangchainPrompt();
357
// Result: "Hello {name}! Welcome to {app_name}."
358
359
// Use with generation
360
const generation = trace.generation({
361
name: 'greeting-generation',
362
prompt: prompt,
363
model: 'gpt-4',
364
input: compiled
365
});
366
367
// Access prompt metadata
368
console.log(prompt.version); // 3
369
console.log(prompt.labels); // ["production"]
370
console.log(prompt.config); // { temperature: 0.7 }
371
```
372
373
### ChatPromptClient
374
375
Client for working with chat-based prompts with support for message arrays and placeholders.
376
377
```typescript { .api }
378
class ChatPromptClient {
379
/** Prompt name */
380
name: string;
381
/** Prompt version */
382
version: number;
383
/** Prompt configuration */
384
config: unknown;
385
/** Prompt labels (e.g., "production") */
386
labels: string[];
387
/** Prompt tags */
388
tags: string[];
389
/** Whether this is a fallback prompt */
390
isFallback: boolean;
391
/** Prompt type */
392
type: "chat";
393
/** Commit message for this version */
394
commitMessage: string | null | undefined;
395
/** Array of chat messages with optional placeholders */
396
prompt: ChatMessageWithPlaceholders[];
397
/** The full prompt response object */
398
promptResponse: ChatPrompt;
399
400
/**
401
* Compiles the prompt by:
402
* 1. Replacing placeholder messages with provided placeholder values
403
* 2. Rendering variables in message content using Mustache
404
* @param variables - Variable values for {{variable}} substitution
405
* @param placeholders - Placeholder values for message array insertion
406
* @returns Array of ChatMessage objects and unresolved placeholders
407
*/
408
compile(
409
variables?: Record<string, string>,
410
placeholders?: Record<string, any>
411
): (ChatMessageOrPlaceholder | any)[];
412
413
/**
414
* Returns a Langchain-compatible prompt with:
415
* - Placeholders filled in or converted to Langchain MessagesPlaceholder format
416
* - Variables transformed from {{var}} to {var} format
417
* @param options - Placeholder values
418
* @returns Array compatible with Langchain's ChatPromptTemplate
419
*/
420
getLangchainPrompt(
421
options?: { placeholders?: Record<string, any> }
422
): (ChatMessage | LangchainMessagesPlaceholder | any)[];
423
424
/**
425
* Returns a JSON string representation of the prompt
426
* @returns JSON string
427
*/
428
toJSON(): string;
429
}
430
431
type ChatMessageOrPlaceholder =
432
| ChatMessage
433
| ({ type: "placeholder" } & PlaceholderMessage);
434
435
interface LangchainMessagesPlaceholder {
436
/** Placeholder variable name */
437
variableName: string;
438
/** Whether placeholder is optional */
439
optional: boolean;
440
}
441
```
442
443
**Usage Example:**
444
445
```typescript
446
// Fetch chat prompt with placeholders
447
const chatPrompt = await langfuse.getPrompt('assistant-chat', undefined, {
448
type: 'chat'
449
});
450
451
// Compile with variables and placeholders
452
const compiled = chatPrompt.compile(
453
{
454
assistant_role: 'customer support agent',
455
query: 'How do I reset my password?'
456
},
457
{
458
history: [
459
{ role: 'user', content: 'Hello' },
460
{ role: 'assistant', content: 'Hi! How can I help?' }
461
]
462
}
463
);
464
// Result: [
465
// { role: 'system', content: 'You are customer support agent.' },
466
// { role: 'user', content: 'Hello' },
467
// { role: 'assistant', content: 'Hi! How can I help?' },
468
// { role: 'user', content: 'How do I reset my password?' }
469
// ]
470
471
// Get Langchain format
472
const langchainFormat = chatPrompt.getLangchainPrompt({
473
placeholders: {
474
history: [
475
{ role: 'user', content: 'Hello' },
476
{ role: 'assistant', content: 'Hi!' }
477
]
478
}
479
});
480
481
// Use with generation
482
const messages = chatPrompt.compile(
483
{ assistant_role: 'helper', query: 'test' },
484
{ history: [] }
485
);
486
487
const generation = trace.generation({
488
name: 'chat-completion',
489
prompt: chatPrompt,
490
model: 'gpt-4',
491
input: messages
492
});
493
```
494
495
## Prompt Caching
496
497
Langfuse automatically caches prompts to reduce API calls. The caching system includes:
498
499
- **TTL-based caching**: Prompts are cached for the specified `cacheTtlSeconds` (default: 60 seconds)
500
- **Background refresh**: Cache is refreshed in the background before expiration
501
- **Label-based fetching**: Fetch by label (e.g., "production") to always get the latest version
502
- **Version locking**: Fetch by version number for consistent behavior
503
504
**Usage Example:**
505
506
```typescript
507
// Short-lived cache (5 seconds)
508
const prompt1 = await langfuse.getPrompt('template', undefined, {
509
cacheTtlSeconds: 5
510
});
511
512
// Long-lived cache (1 hour)
513
const prompt2 = await langfuse.getPrompt('stable-template', undefined, {
514
cacheTtlSeconds: 3600
515
});
516
517
// Fetch specific version (cached separately)
518
const prompt3 = await langfuse.getPrompt('template', 5);
519
520
// Fetch by label
521
const prodPrompt = await langfuse.getPrompt('template', undefined, {
522
label: 'production'
523
});
524
525
const devPrompt = await langfuse.getPrompt('template', undefined, {
526
label: 'development'
527
});
528
```
529
530
## Complete Prompt Example
531
532
```typescript
533
import { Langfuse } from 'langfuse';
534
535
const langfuse = new Langfuse();
536
537
// Create a chat prompt with placeholders
538
await langfuse.createPrompt({
539
type: 'chat',
540
name: 'customer-support',
541
prompt: [
542
{
543
type: 'chatmessage',
544
role: 'system',
545
content: 'You are a {{role}} for {{company}}. Be {{tone}}.'
546
},
547
{
548
type: 'placeholder',
549
name: 'conversation_history'
550
},
551
{
552
type: 'chatmessage',
553
role: 'user',
554
content: '{{user_query}}'
555
}
556
],
557
labels: ['production'],
558
tags: ['support', 'chat'],
559
config: {
560
temperature: 0.7,
561
max_tokens: 500
562
}
563
});
564
565
// Fetch and use the prompt
566
const prompt = await langfuse.getPrompt('customer-support', undefined, {
567
type: 'chat'
568
});
569
570
const trace = langfuse.trace({ name: 'support-chat' });
571
572
// Compile messages
573
const messages = prompt.compile(
574
{
575
role: 'helpful customer support agent',
576
company: 'Acme Corp',
577
tone: 'friendly and professional',
578
user_query: 'How do I return a product?'
579
},
580
{
581
conversation_history: [
582
{ role: 'user', content: 'Hello' },
583
{ role: 'assistant', content: 'Hi! How can I help you today?' }
584
]
585
}
586
);
587
588
// Create generation with linked prompt
589
const generation = trace.generation({
590
name: 'support-response',
591
prompt: prompt,
592
model: 'gpt-4',
593
input: messages,
594
modelParameters: prompt.config
595
});
596
597
// ... handle response ...
598
599
await langfuse.flushAsync();
600
```
601