or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

advanced.mdcore-tracing.mddatasets.mdexperiments.mdindex.mdintegrations.mdobservation-types.mdprompts.mdscoring.md

prompts.mddocs/

0

# Prompt Management

1

2

Template management system supporting both text and chat-based prompts with variable interpolation, version control, and LangChain integration. Enables centralized prompt management and experimentation.

3

4

## Capabilities

5

6

### Prompt Client Types

7

8

Base type for prompt template clients supporting both text and chat formats.

9

10

```python { .api }

11

# Base type alias

12

PromptClient = Union[TextPromptClient, ChatPromptClient]

13

```

14

15

### Text Prompt Client

16

17

Manages text-based prompt templates with variable interpolation capabilities.

18

19

```python { .api }

20

class TextPromptClient:

21

def __init__(self, name: str, version: int, config: Dict[str, Any],

22

labels: List[str], tags: List[str], commit_message: str = None,

23

prompt: str = None):

24

"""Initialize text prompt client."""

25

26

def compile(self, **kwargs) -> str:

27

"""Compile prompt template with provided variables.

28

29

Args:

30

**kwargs: Variable values for template interpolation

31

32

Returns:

33

Compiled prompt string with variables replaced

34

35

Raises:

36

ValueError: If required variables are missing

37

"""

38

39

@property

40

def variables(self) -> List[str]:

41

"""Get list of variable names from template.

42

43

Returns:

44

List of variable names found in the prompt template

45

"""

46

47

def get_langchain_prompt(self) -> Any:

48

"""Convert prompt to LangChain PromptTemplate format.

49

50

Returns:

51

LangChain PromptTemplate instance

52

"""

53

54

# Attributes

55

name: str

56

version: int

57

config: Dict[str, Any]

58

labels: List[str]

59

tags: List[str]

60

commit_message: Optional[str]

61

prompt: str

62

```

63

64

### Chat Prompt Client

65

66

Manages chat-based prompt templates supporting message roles and placeholders.

67

68

```python { .api }

69

class ChatPromptClient:

70

def __init__(self, name: str, version: int, config: Dict[str, Any],

71

labels: List[str], tags: List[str], commit_message: str = None,

72

prompt: List[ChatMessageWithPlaceholdersDict] = None):

73

"""Initialize chat prompt client."""

74

75

def compile(self, **kwargs) -> List[Dict[str, str]]:

76

"""Compile chat prompt with variables and resolve placeholders.

77

78

Args:

79

**kwargs: Variable values and placeholder content

80

81

Returns:

82

List of compiled chat messages with roles and content

83

84

Raises:

85

ValueError: If required variables or placeholders are missing

86

"""

87

88

@property

89

def variables(self) -> List[str]:

90

"""Get list of variable names from chat messages.

91

92

Returns:

93

List of variable names found across all chat messages

94

"""

95

96

def get_langchain_prompt(self) -> Any:

97

"""Convert to LangChain ChatPromptTemplate format.

98

99

Returns:

100

LangChain ChatPromptTemplate instance

101

"""

102

103

# Attributes

104

name: str

105

version: int

106

config: Dict[str, Any]

107

labels: List[str]

108

tags: List[str]

109

commit_message: Optional[str]

110

prompt: List[ChatMessageWithPlaceholdersDict]

111

```

112

113

### Prompt Management Methods

114

115

Core methods for managing prompts through the Langfuse client.

116

117

```python { .api }

118

class Langfuse:

119

def get_prompt(self, name: str, *, version: int = None, label: str = None,

120

fallback: Union[str, List[ChatMessageDict]] = None,

121

max_retries: int = 2, fetch_timeout_seconds: int = 3,

122

cache_ttl_seconds: int = 60) -> Union[TextPromptClient, ChatPromptClient]:

123

"""Fetch prompt template by name and version/label.

124

125

Args:

126

name: Prompt name

127

version: Specific version to fetch (if not provided, gets latest)

128

label: Label to fetch (alternative to version)

129

fallback: Fallback prompt if fetch fails

130

max_retries: Maximum retry attempts

131

fetch_timeout_seconds: Request timeout

132

cache_ttl_seconds: Cache time-to-live

133

134

Returns:

135

TextPromptClient or ChatPromptClient based on prompt type

136

137

Raises:

138

Exception: If prompt not found and no fallback provided

139

"""

140

141

def create_prompt(self, *, name: str, prompt: Union[str, List[ChatMessageDict]],

142

config: Dict[str, Any] = None, labels: List[str] = None,

143

tags: List[str] = None) -> Union[TextPromptClient, ChatPromptClient]:

144

"""Create new prompt template.

145

146

Args:

147

name: Prompt name (must be unique for first version)

148

prompt: Prompt content (string for text, list of messages for chat)

149

config: Configuration metadata

150

labels: Labels for categorization

151

tags: Tags for organization

152

153

Returns:

154

Created prompt client (TextPromptClient or ChatPromptClient)

155

"""

156

157

def clear_prompt_cache(self) -> None:

158

"""Clear local prompt cache to force fresh fetches."""

159

```

160

161

### Chat Message Types

162

163

Supporting types for chat-based prompts with role and placeholder support.

164

165

```python { .api }

166

# Basic chat message structure

167

ChatMessageDict = TypedDict('ChatMessageDict', {

168

'role': str, # "system", "user", "assistant", etc.

169

'content': str # Message content

170

})

171

172

# Chat message with placeholder support

173

ChatMessagePlaceholderDict = TypedDict('ChatMessagePlaceholderDict', {

174

'role': str,

175

'content': List[Union[str, Dict[str, str]]] # Mix of text and placeholders

176

})

177

178

# Union type for message handling

179

ChatMessageWithPlaceholdersDict = Union[ChatMessageDict, ChatMessagePlaceholderDict]

180

```

181

182

## Usage Examples

183

184

### Text Prompt Templates

185

186

```python

187

from langfuse import Langfuse

188

189

langfuse = Langfuse()

190

191

# Create a text prompt template

192

text_prompt = langfuse.create_prompt(

193

name="qa-prompt",

194

prompt="Answer the following question: {question}\n\nContext: {context}",

195

labels=["qa", "production"],

196

tags=["v1", "context-aware"]

197

)

198

199

print(f"Prompt: {text_prompt.name}")

200

print(f"Version: {text_prompt.version}")

201

print(f"Variables: {text_prompt.variables}") # ["question", "context"]

202

203

# Use the prompt

204

compiled = text_prompt.compile(

205

question="What is the capital of France?",

206

context="France is a country in Western Europe..."

207

)

208

print(compiled)

209

# Output: "Answer the following question: What is the capital of France?\n\nContext: France is a country in Western Europe..."

210

```

211

212

### Chat Prompt Templates

213

214

```python

215

# Create a chat prompt template

216

chat_messages = [

217

{"role": "system", "content": "You are a helpful assistant specialized in {domain}."},

218

{"role": "user", "content": "Question: {question}"},

219

{"role": "assistant", "content": "I'll help you with that {domain} question."}

220

]

221

222

chat_prompt = langfuse.create_prompt(

223

name="assistant-chat",

224

prompt=chat_messages,

225

config={"temperature": 0.7, "max_tokens": 500},

226

labels=["assistant", "chat"],

227

tags=["conversational"]

228

)

229

230

# Use the chat prompt

231

compiled_messages = chat_prompt.compile(

232

domain="mathematics",

233

question="What is the derivative of x^2?"

234

)

235

236

for message in compiled_messages:

237

print(f"{message['role']}: {message['content']}")

238

# system: You are a helpful assistant specialized in mathematics.

239

# user: Question: What is the derivative of x^2?

240

# assistant: I'll help you with that mathematics question.

241

```

242

243

### Fetching Existing Prompts

244

245

```python

246

# Get latest version of a prompt

247

prompt = langfuse.get_prompt("qa-prompt")

248

249

# Get specific version

250

prompt_v2 = langfuse.get_prompt("qa-prompt", version=2)

251

252

# Get by label

253

production_prompt = langfuse.get_prompt("qa-prompt", label="production")

254

255

# With fallback for reliability

256

prompt = langfuse.get_prompt(

257

name="qa-prompt",

258

fallback="Answer this question: {question}"

259

)

260

261

# Use retrieved prompt

262

response = prompt.compile(question="What is AI?")

263

```

264

265

### LangChain Integration

266

267

```python

268

# Convert Langfuse prompts to LangChain format

269

langfuse_prompt = langfuse.get_prompt("qa-prompt")

270

langchain_prompt = langfuse_prompt.get_langchain_prompt()

271

272

# Use with LangChain

273

from langchain.chains import LLMChain

274

from langchain.llms import OpenAI

275

276

llm = OpenAI()

277

chain = LLMChain(llm=llm, prompt=langchain_prompt)

278

279

result = chain.run(

280

question="What is machine learning?",

281

context="Machine learning is a subset of AI..."

282

)

283

```

284

285

### Complex Chat Templates with Placeholders

286

287

```python

288

# Advanced chat template with placeholders

289

complex_chat = [

290

{

291

"role": "system",

292

"content": [

293

"You are an expert in ",

294

{"type": "placeholder", "key": "expertise_area"},

295

". Use the following context: ",

296

{"type": "placeholder", "key": "context"}

297

]

298

},

299

{

300

"role": "user",

301

"content": "{user_question}"

302

}

303

]

304

305

chat_prompt = langfuse.create_prompt(

306

name="complex-assistant",

307

prompt=complex_chat,

308

config={"model": "gpt-4", "temperature": 0.3}

309

)

310

311

# Compile with placeholders and variables

312

compiled = chat_prompt.compile(

313

expertise_area="quantum physics",

314

context="Recent developments in quantum computing...",

315

user_question="How do quantum computers work?"

316

)

317

```

318

319

### Prompt Versioning and Management

320

321

```python

322

# Create initial version

323

v1_prompt = langfuse.create_prompt(

324

name="summarizer",

325

prompt="Summarize the following text: {text}",

326

labels=["v1"]

327

)

328

329

# Create improved version

330

v2_prompt = langfuse.create_prompt(

331

name="summarizer", # Same name creates new version

332

prompt="Provide a concise summary of the following text, highlighting key points: {text}",

333

labels=["v2", "improved"],

334

tags=["production"]

335

)

336

337

# Compare versions

338

v1 = langfuse.get_prompt("summarizer", version=1)

339

v2 = langfuse.get_prompt("summarizer", version=2)

340

341

print(f"V1 variables: {v1.variables}")

342

print(f"V2 variables: {v2.variables}")

343

344

# Test both versions

345

test_text = "Long article about climate change..."

346

347

v1_result = v1.compile(text=test_text)

348

v2_result = v2.compile(text=test_text)

349

350

print("V1:", v1_result)

351

print("V2:", v2_result)

352

```

353

354

### Prompt Experimentation

355

356

```python

357

def experiment_with_prompts():

358

"""Compare different prompt versions in experiments."""

359

from langfuse import Evaluation

360

361

# Define task functions for different prompt versions

362

def task_v1(*, item, **kwargs):

363

prompt = langfuse.get_prompt("summarizer", version=1)

364

compiled = prompt.compile(text=item["input"])

365

return llm_generate(compiled)

366

367

def task_v2(*, item, **kwargs):

368

prompt = langfuse.get_prompt("summarizer", version=2)

369

compiled = prompt.compile(text=item["input"])

370

return llm_generate(compiled)

371

372

# Evaluation data

373

test_data = [

374

{"input": "Long text to summarize...", "expected_output": "Expected summary..."}

375

]

376

377

# Run experiments with both versions

378

result_v1 = langfuse.run_experiment(

379

name="Summarizer V1 Test",

380

data=test_data,

381

task=task_v1,

382

evaluators=[summary_quality_evaluator]

383

)

384

385

result_v2 = langfuse.run_experiment(

386

name="Summarizer V2 Test",

387

data=test_data,

388

task=task_v2,

389

evaluators=[summary_quality_evaluator]

390

)

391

392

return result_v1, result_v2

393

394

experiment_with_prompts()

395

```

396

397

### Dynamic Prompt Loading

398

399

```python

400

class PromptManager:

401

"""Utility class for managing prompts with caching and fallbacks."""

402

403

def __init__(self, langfuse_client):

404

self.langfuse = langfuse_client

405

self.cache = {}

406

407

def get_prompt_with_cache(self, name, version=None, cache_duration=300):

408

"""Get prompt with local caching."""

409

cache_key = f"{name}:{version or 'latest'}"

410

411

if cache_key in self.cache:

412

prompt, timestamp = self.cache[cache_key]

413

if time.time() - timestamp < cache_duration:

414

return prompt

415

416

# Fetch fresh prompt

417

try:

418

prompt = self.langfuse.get_prompt(name, version=version)

419

self.cache[cache_key] = (prompt, time.time())

420

return prompt

421

except Exception as e:

422

# Return cached version if available, even if expired

423

if cache_key in self.cache:

424

return self.cache[cache_key][0]

425

raise e

426

427

def compile_with_defaults(self, prompt_name, variables, defaults=None):

428

"""Compile prompt with default variable values."""

429

prompt = self.get_prompt_with_cache(prompt_name)

430

431

# Merge variables with defaults

432

all_vars = (defaults or {}).copy()

433

all_vars.update(variables)

434

435

# Check for missing variables

436

missing = set(prompt.variables) - set(all_vars.keys())

437

if missing:

438

raise ValueError(f"Missing variables: {missing}")

439

440

return prompt.compile(**all_vars)

441

442

# Usage

443

prompt_manager = PromptManager(langfuse)

444

445

response = prompt_manager.compile_with_defaults(

446

prompt_name="qa-prompt",

447

variables={"question": "What is AI?"},

448

defaults={"context": "General knowledge base"}

449

)

450

```

451

452

### Conditional Prompt Selection

453

454

```python

455

def get_appropriate_prompt(user_type, domain):

456

"""Select prompt based on user context."""

457

458

prompt_mapping = {

459

("expert", "technical"): "expert-technical-prompt",

460

("beginner", "technical"): "beginner-technical-prompt",

461

("expert", "general"): "expert-general-prompt",

462

("beginner", "general"): "beginner-general-prompt"

463

}

464

465

prompt_name = prompt_mapping.get((user_type, domain), "default-prompt")

466

467

try:

468

return langfuse.get_prompt(prompt_name, label="production")

469

except:

470

# Fallback to default

471

return langfuse.get_prompt("default-prompt")

472

473

# Usage in application

474

def generate_response(question, user_profile):

475

prompt = get_appropriate_prompt(

476

user_profile["experience_level"],

477

user_profile["domain"]

478

)

479

480

compiled = prompt.compile(

481

question=question,

482

user_level=user_profile["experience_level"]

483

)

484

485

return llm_generate(compiled)

486

```