or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

configuration.mdembeddings.mdindex.mdmodels-and-conversations.mdplugins.mdtemplates.mdtools-and-toolboxes.md

templates.mddocs/

0

# Templates

1

2

Prompt template system with variable substitution, attachment handling, and fragment management for reusable prompt components. This module enables structured prompt creation with dynamic content insertion and modular design patterns.

3

4

## Capabilities

5

6

### Template Management

7

8

The Template class provides comprehensive prompt template functionality with variable substitution and configuration management.

9

10

```python { .api }

11

class Template:

12

"""Prompt template with variable substitution support."""

13

14

def __init__(

15

self,

16

name: str,

17

prompt: Optional[str] = None,

18

system: Optional[str] = None,

19

attachments: Optional[List[str]] = None,

20

attachment_types: Optional[List[AttachmentType]] = None,

21

model: Optional[str] = None,

22

defaults: Optional[Dict[str, Any]] = None,

23

options: Optional[Dict[str, Any]] = None,

24

extract: Optional[bool] = None,

25

extract_last: Optional[bool] = None,

26

schema_object: Optional[dict] = None,

27

fragments: Optional[List[str]] = None,

28

system_fragments: Optional[List[str]] = None,

29

tools: Optional[List[str]] = None,

30

functions: Optional[str] = None

31

):

32

"""

33

Initialize template.

34

35

Args:

36

name: Template name/identifier

37

prompt: Main prompt text with variable placeholders

38

system: System message template

39

attachments: List of attachment specifications

40

attachment_types: List of AttachmentType objects with type/value pairs

41

model: Default model to use with this template

42

defaults: Default values for template variables

43

options: Model configuration options

44

extract: Extract fenced code blocks from response

45

extract_last: Extract only the last fenced code block

46

schema_object: JSON schema for structured output

47

fragments: List of fragment specifications to include

48

system_fragments: List of fragment specifications for system prompt

49

tools: List of tool specifications

50

functions: Python functions for dynamic template processing

51

"""

52

53

def evaluate(

54

self,

55

input: str,

56

params: Optional[Dict[str, Any]] = None

57

) -> Tuple[Optional[str], Optional[str]]:

58

"""

59

Evaluate template with input and parameters.

60

61

Args:

62

input: Input value for template evaluation

63

params: Dictionary of variable values

64

65

Returns:

66

Tuple of (evaluated_prompt, evaluated_system)

67

"""

68

69

def vars(self) -> set:

70

"""

71

Extract all variable names from template.

72

73

Returns:

74

Set of variable names found in prompt and system templates

75

"""

76

77

@staticmethod

78

def interpolate(text: str, params: Dict[str, Any]) -> Optional[str]:

79

"""

80

Interpolate variables in text template.

81

82

Args:

83

text: Template text with $variable or ${variable} placeholders

84

params: Dictionary of variable values

85

86

Returns:

87

Interpolated text or None if text is None

88

"""

89

90

name: str

91

prompt: Optional[str]

92

system: Optional[str]

93

attachments: Optional[List[str]]

94

model: Optional[str]

95

defaults: Optional[Dict[str, Any]]

96

options: Optional[Dict[str, Any]]

97

schema_object: Optional[dict]

98

tools: Optional[List[str]]

99

```

100

101

### Template Loaders

102

103

Plugin-based system for loading templates from various sources.

104

105

```python { .api }

106

def get_template_loaders() -> Dict[str, Callable[[str], Template]]:

107

"""

108

Get template loaders registered by plugins.

109

110

Returns:

111

Dictionary mapping loader prefixes to loader functions

112

"""

113

```

114

115

### Attachment Configuration

116

117

Support for specifying attachments within templates.

118

119

```python { .api }

120

class AttachmentType(BaseModel):

121

"""Pydantic model for attachment type specifications."""

122

123

model_config = ConfigDict(extra="forbid")

124

125

type: str

126

value: str

127

```

128

129

### Fragment System

130

131

Fragment objects provide text components with source tracking for modular prompt construction.

132

133

```python { .api }

134

class Fragment(str):

135

"""Text fragment with source tracking for provenance."""

136

137

def __new__(cls, content: str, source: str = ""):

138

"""

139

Create new fragment.

140

141

Args:

142

content: Fragment text content

143

source: Source information for tracking

144

"""

145

obj = str.__new__(cls, content)

146

obj.source = source

147

return obj

148

149

def id(self) -> str:

150

"""Generate stable hash ID for fragment caching."""

151

152

source: str

153

154

def get_fragment_loaders() -> Dict[

155

str,

156

Callable[[str], Union[Fragment, Attachment, List[Union[Fragment, Attachment]]]]

157

]:

158

"""

159

Get fragment loaders registered by plugins.

160

161

Returns:

162

Dictionary mapping loader prefixes to loader functions

163

"""

164

```

165

166

## Usage Examples

167

168

### Basic Template Usage

169

170

```python

171

import llm

172

173

# Create simple template

174

template = llm.Template(

175

name="greeting",

176

prompt="Hello $name, welcome to $place!",

177

system="You are a friendly assistant."

178

)

179

180

# Use template variables

181

variables = template.vars()

182

print(f"Template variables: {variables}") # {'name', 'place'}

183

184

# Evaluate template

185

prompt_text, system_text = template.evaluate(

186

input="", # Not used in this example

187

params={"name": "Alice", "place": "Python programming"}

188

)

189

190

print(f"Prompt: {prompt_text}")

191

print(f"System: {system_text}")

192

```

193

194

### Template with Defaults

195

196

```python

197

import llm

198

199

# Template with default values

200

template = llm.Template(

201

name="code_review",

202

prompt="""

203

Please review this $language code and provide feedback on:

204

- Code quality (scale: $scale)

205

- Best practices

206

- Potential improvements

207

208

Code:

209

$code

210

""",

211

defaults={

212

"language": "Python",

213

"scale": "1-10"

214

}

215

)

216

217

# Use with some defaults, override others

218

prompt_text, _ = template.evaluate(

219

input="",

220

params={

221

"code": "def hello():\n print('Hello, World!')",

222

"language": "JavaScript" # Override default

223

}

224

)

225

226

print(prompt_text)

227

```

228

229

### Template with Model Configuration

230

231

```python

232

import llm

233

234

# Template with model and options specified

235

template = llm.Template(

236

name="creative_writing",

237

prompt="Write a $genre story about $topic",

238

model="gpt-4",

239

options={

240

"temperature": 0.8,

241

"max_tokens": 500

242

},

243

defaults={

244

"genre": "science fiction"

245

}

246

)

247

248

# Use template with model

249

model = llm.get_model(template.model)

250

prompt_text, system_text = template.evaluate(

251

input="",

252

params={"topic": "time travel"}

253

)

254

255

# Apply template options to model

256

response = model.prompt(

257

prompt_text,

258

system=system_text,

259

**template.options

260

)

261

print(response.text())

262

```

263

264

### Template with Schema

265

266

```python

267

import llm

268

269

# Template for structured output

270

schema = {

271

"type": "object",

272

"properties": {

273

"summary": {"type": "string"},

274

"key_points": {

275

"type": "array",

276

"items": {"type": "string"}

277

},

278

"sentiment": {

279

"type": "string",

280

"enum": ["positive", "negative", "neutral"]

281

}

282

},

283

"required": ["summary", "sentiment"]

284

}

285

286

template = llm.Template(

287

name="text_analysis",

288

prompt="Analyze this text and extract key information: $text",

289

schema_object=schema,

290

model="gpt-4"

291

)

292

293

# Use template for structured analysis

294

model = llm.get_model(template.model)

295

prompt_text, _ = template.evaluate(

296

input="",

297

params={"text": "I love this new Python library! It makes development so much easier."}

298

)

299

300

response = model.prompt(prompt_text, schema=template.schema_object)

301

analysis = response.response_json()

302

print(f"Summary: {analysis['summary']}")

303

print(f"Sentiment: {analysis['sentiment']}")

304

```

305

306

### Template with Tools

307

308

```python

309

import llm

310

311

def search_web(query: str) -> str:

312

"""Search the web for information."""

313

return f"Search results for: {query}"

314

315

def get_current_date() -> str:

316

"""Get current date."""

317

from datetime import datetime

318

return datetime.now().strftime("%Y-%m-%d")

319

320

# Template that uses tools

321

template = llm.Template(

322

name="research_assistant",

323

prompt="""

324

Research the topic: $topic

325

Focus on information from $timeframe.

326

Provide a comprehensive overview with recent developments.

327

""",

328

tools=["search_web", "get_current_date"],

329

defaults={

330

"timeframe": "the last year"

331

}

332

)

333

334

# Use template with tools

335

search_tool = llm.Tool.function(search_web)

336

date_tool = llm.Tool.function(get_current_date)

337

tools = [search_tool, date_tool]

338

339

model = llm.get_model("gpt-4")

340

prompt_text, _ = template.evaluate(

341

input="",

342

params={"topic": "artificial intelligence trends"}

343

)

344

345

response = model.prompt(prompt_text, tools=tools)

346

print(response.text())

347

```

348

349

### Template with Attachments

350

351

```python

352

import llm

353

354

# Template that specifies attachments

355

template = llm.Template(

356

name="image_analysis",

357

prompt="Analyze this $image_type image and describe what you see: $description_focus",

358

attachments=["$image_path"],

359

defaults={

360

"image_type": "photograph",

361

"description_focus": "main subjects and activities"

362

}

363

)

364

365

# Use template with image attachment

366

model = llm.get_model("gpt-4-vision")

367

prompt_text, _ = template.evaluate(

368

input="",

369

params={

370

"image_path": "/path/to/photo.jpg",

371

"description_focus": "architectural details"

372

}

373

)

374

375

# Create attachment from template specification

376

attachment = llm.Attachment(path="/path/to/photo.jpg")

377

response = model.prompt(prompt_text, attachments=[attachment])

378

print(response.text())

379

```

380

381

### Fragment-Based Templates

382

383

```python

384

import llm

385

386

# Create reusable fragments

387

code_review_intro = llm.Fragment(

388

"Please review the following code for:",

389

source="code_review_templates"

390

)

391

392

quality_criteria = llm.Fragment("""

393

- Code clarity and readability

394

- Performance considerations

395

- Security best practices

396

- Error handling

397

""", source="review_criteria")

398

399

# Combine fragments in template

400

template = llm.Template(

401

name="comprehensive_review",

402

prompt=f"""

403

{code_review_intro}

404

{quality_criteria}

405

406

Language: $language

407

Code:

408

$code

409

410

Additional focus: $focus

411

""",

412

defaults={

413

"focus": "general best practices"

414

}

415

)

416

417

# Use fragment-based template

418

prompt_text, _ = template.evaluate(

419

input="",

420

params={

421

"language": "Python",

422

"code": "def calculate(x, y):\n return x / y",

423

"focus": "error handling and input validation"

424

}

425

)

426

427

model = llm.get_model()

428

response = model.prompt(prompt_text)

429

print(response.text())

430

```

431

432

### Advanced Variable Interpolation

433

434

```python

435

import llm

436

437

# Template with simple variable substitution

438

# Note: The actual Template.interpolate method uses simple $variable syntax

439

template = llm.Template(

440

name="document_generator",

441

prompt="""

442

Generate a $document_type about $topic.

443

Target audience: $audience

444

Length: $length

445

Style: $style

446

Requirements: $requirements

447

""",

448

defaults={

449

"document_type": "article",

450

"style": "professional",

451

"length": "500 words"

452

}

453

)

454

455

prompt_text, _ = simple_template.evaluate(

456

input="",

457

params={

458

"topic": "machine learning basics",

459

"audience": "beginners",

460

"requirements": "include examples, avoid jargon, provide references"

461

}

462

)

463

464

model = llm.get_model()

465

response = model.prompt(prompt_text)

466

print(response.text())

467

```

468

469

### Plugin-Based Template Loading

470

471

```python

472

import llm

473

474

# Get available template loaders

475

loaders = llm.get_template_loaders()

476

print(f"Available template loaders: {list(loaders.keys())}")

477

478

# Fragment loaders for modular content

479

fragment_loaders = llm.get_fragment_loaders()

480

print(f"Available fragment loaders: {list(fragment_loaders.keys())}")

481

482

# Example of how a plugin might register template loaders

483

@llm.hookimpl

484

def register_template_loaders(register):

485

"""Register custom template loaders."""

486

487

def yaml_template_loader(spec: str) -> llm.Template:

488

"""Load template from YAML specification."""

489

import yaml

490

491

# Parse YAML spec

492

config = yaml.safe_load(spec)

493

494

return llm.Template(

495

name=config['name'],

496

prompt=config.get('prompt'),

497

system=config.get('system'),

498

model=config.get('model'),

499

defaults=config.get('defaults', {}),

500

options=config.get('options', {})

501

)

502

503

register("yaml", yaml_template_loader)

504

505

# Templates would then be loadable via: yaml:template_spec

506

```

507

508

This comprehensive template system enables reusable, configurable prompt patterns that can be shared across projects and customized for specific use cases. The combination of variable substitution, fragments, and plugin-based loading provides maximum flexibility for prompt engineering workflows.