or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

chat-interface.mdclient-management.mddocument-prompt-template.mdembeddings.mdevaluation.mdexplanations.mdindex.mdprompt-construction.mdsteering.mdstructured-output.mdtext-completion.mdtokenization.mdtranslation.mdutilities.md

document-prompt-template.mddocs/

0

# Document & Prompt Templates

1

2

Advanced prompt construction tools for working with documents and creating reusable prompt templates. Supports DOCX documents, template-based prompt generation with Liquid syntax, and flexible document representation.

3

4

## Capabilities

5

6

### Document Handling

7

8

Create and manage documents from various sources including DOCX files, text, and multimodal prompts.

9

10

```python { .api }

11

class Document:

12

docx: Optional[str] = None

13

prompt: Optional[Sequence[Union[str, Text, Image, Tokens]]] = None

14

text: Optional[str] = None

15

16

def __init__(

17

self,

18

docx: Optional[str] = None,

19

prompt: Optional[Sequence[Union[str, Text, Image, Tokens]]] = None,

20

text: Optional[str] = None

21

):

22

"""

23

A document that can be either a DOCX document or text/image prompts.

24

25

Parameters:

26

- docx: Base64 encoded DOCX document

27

- prompt: Sequence of prompt items (text, images, tokens)

28

- text: Plain text content

29

"""

30

31

@classmethod

32

def from_docx_bytes(cls, bytes: bytes) -> Document:

33

"""

34

Create document from DOCX file bytes.

35

36

Parameters:

37

- bytes: DOCX file content as bytes

38

39

Returns:

40

Document instance with DOCX content

41

"""

42

43

@classmethod

44

def from_docx_file(cls, path: str) -> Document:

45

"""

46

Load a DOCX file from disk and prepare it as a document.

47

48

Parameters:

49

- path: Path to DOCX file

50

51

Returns:

52

Document instance loaded from file

53

"""

54

55

@classmethod

56

def from_prompt(cls, prompt: Union[Prompt, Sequence[Union[str, Image]]]) -> Document:

57

"""

58

Create document from prompt containing multiple strings and images.

59

60

Parameters:

61

- prompt: Prompt or sequence of prompt items

62

63

Returns:

64

Document instance from prompt

65

"""

66

67

@classmethod

68

def from_text(cls, text: str) -> Document:

69

"""

70

Create document from plain text.

71

72

Parameters:

73

- text: Text content

74

75

Returns:

76

Document instance with text content

77

"""

78

```

79

80

### Prompt Templates

81

82

Create reusable prompt templates using Liquid template syntax with support for multimodal content.

83

84

```python { .api }

85

class PromptTemplate:

86

template: Template

87

non_text_items: Dict[Placeholder, Union[Image, Tokens]]

88

89

def __init__(self, template_str: str):

90

"""

91

Create prompt template using Liquid template language.

92

93

Allows building prompts with dynamic content and embedded non-text items

94

like images and tokens using placeholder system.

95

96

Parameters:

97

- template_str: Liquid template string

98

"""

99

100

def placeholder(self, prompt_item: Union[Image, Tokens]) -> Placeholder:

101

"""

102

Save a non-text prompt item and return a placeholder.

103

104

The placeholder is used to embed the prompt item in the template.

105

106

Parameters:

107

- prompt_item: Image or Tokens to embed

108

109

Returns:

110

Placeholder UUID for template embedding

111

"""

112

113

def embed_prompt(self, prompt: Prompt) -> str:

114

"""

115

Embed a prompt in a prompt template.

116

117

Adds whitespace between text items if needed and embeds non-text items.

118

119

Parameters:

120

- prompt: Prompt to embed in template

121

122

Returns:

123

String representation with embedded placeholders

124

"""

125

126

def to_prompt(self, **kwargs) -> Prompt:

127

"""

128

Create a Prompt from the template string and parameters.

129

130

Parameters are passed to liquid.Template.render for variable substitution.

131

132

Parameters:

133

- **kwargs: Template variables for rendering

134

135

Returns:

136

Rendered Prompt with all substitutions applied

137

"""

138

```

139

140

### Type Definitions

141

142

Supporting types for template and document functionality.

143

144

```python { .api }

145

Placeholder = NewType("Placeholder", UUID)

146

```

147

148

### Usage Examples

149

150

Working with documents and prompt templates:

151

152

```python

153

from aleph_alpha_client import (

154

Document,

155

PromptTemplate,

156

Image,

157

Prompt,

158

Text,

159

Tokens,

160

Client,

161

CompletionRequest

162

)

163

from pathlib import Path

164

165

client = Client(token="your-api-token")

166

167

# Document creation from different sources

168

169

# 1. From DOCX file

170

docx_document = Document.from_docx_file("./reports/quarterly_report.docx")

171

172

# 2. From plain text

173

text_document = Document.from_text("This is a sample document for analysis.")

174

175

# 3. From multimodal prompt

176

image = Image.from_file(Path("./images/chart.png"))

177

multimodal_prompt = Prompt([

178

Text.from_text("Financial overview:"),

179

image,

180

Text.from_text("See attached chart for details.")

181

])

182

prompt_document = Document.from_prompt(multimodal_prompt)

183

184

# 4. From DOCX bytes (useful for uploaded files)

185

with open("./documents/contract.docx", "rb") as f:

186

docx_bytes = f.read()

187

bytes_document = Document.from_docx_bytes(docx_bytes)

188

189

# Prompt Template Examples

190

191

# Basic template with variables

192

template = PromptTemplate("""

193

{%- for name in names -%}

194

Hello {{name}}!

195

{% endfor -%}

196

Today is {{date}} and the weather is {{weather}}.

197

""")

198

199

prompt = template.to_prompt(

200

names=["Alice", "Bob", "Charlie"],

201

date="Monday",

202

weather="sunny"

203

)

204

205

request = CompletionRequest(

206

prompt=prompt,

207

maximum_tokens=100,

208

temperature=0.7

209

)

210

211

response = client.complete(request, model="luminous-extended")

212

print("Template response:")

213

print(response.completions[0].completion)

214

215

# Template with embedded images

216

image = Image.from_file(Path("./analysis/data_chart.png"))

217

image_template = PromptTemplate("""

218

Please analyze this data visualization:

219

220

{{ image_placeholder }}

221

222

Focus on these aspects:

223

{%- for aspect in analysis_points %}

224

- {{ aspect }}

225

{%- endfor %}

226

227

Provide insights in {{ style }} format.

228

""")

229

230

# Create placeholder for the image

231

image_placeholder = image_template.placeholder(image)

232

233

analysis_prompt = image_template.to_prompt(

234

image_placeholder=image_placeholder,

235

analysis_points=["trends", "outliers", "correlations"],

236

style="bullet points"

237

)

238

239

analysis_request = CompletionRequest(

240

prompt=analysis_prompt,

241

maximum_tokens=200,

242

temperature=0.4

243

)

244

245

analysis_response = client.complete(analysis_request, model="luminous-extended")

246

print("Image analysis:")

247

print(analysis_response.completions[0].completion)

248

249

# Complex template with conditional logic

250

report_template = PromptTemplate("""

251

# {{report_title}}

252

253

{% if include_summary -%}

254

## Executive Summary

255

This report covers {{topic}} for the period {{period}}.

256

{% endif -%}

257

258

## Key Findings

259

{%- for finding in findings %}

260

{{loop.index}}. {{ finding }}

261

{%- endfor %}

262

263

{% if include_image -%}

264

## Visual Analysis

265

{{ chart_placeholder }}

266

{%- endif %}

267

268

{% if recommendations -%}

269

## Recommendations

270

{%- for rec in recommendations %}

271

- {{ rec }}

272

{%- endfor %}

273

{%- endif %}

274

""")

275

276

# Use with conditional content

277

chart_image = Image.from_file(Path("./charts/performance.png"))

278

chart_placeholder = report_template.placeholder(chart_image)

279

280

report_prompt = report_template.to_prompt(

281

report_title="Q3 Performance Analysis",

282

include_summary=True,

283

topic="sales performance",

284

period="Q3 2024",

285

findings=[

286

"Sales increased by 15% compared to Q2",

287

"Customer satisfaction improved to 4.2/5",

288

"New product launch exceeded expectations"

289

],

290

include_image=True,

291

chart_placeholder=chart_placeholder,

292

recommendations=[

293

"Expand successful product lines",

294

"Increase marketing in high-performing regions",

295

"Optimize inventory based on demand patterns"

296

]

297

)

298

299

report_request = CompletionRequest(

300

prompt=report_prompt,

301

maximum_tokens=300,

302

temperature=0.3

303

)

304

305

report_response = client.complete(report_request, model="luminous-extended")

306

print("Generated report:")

307

print(report_response.completions[0].completion)

308

309

# Embedding existing prompts in templates

310

user_query = Prompt([

311

Text.from_text("How does machine learning work?"),

312

Image.from_file(Path("./diagrams/ml_workflow.png"))

313

])

314

315

qa_template = PromptTemplate("""

316

User Question: {{ embedded_question }}

317

318

Please provide a comprehensive answer that:

319

1. Addresses the specific question

320

2. References the provided diagram

321

3. Uses {{complexity_level}} language

322

4. Includes {{num_examples}} practical examples

323

324

Answer:

325

""")

326

327

embedded_question = qa_template.embed_prompt(user_query)

328

329

qa_prompt = qa_template.to_prompt(

330

embedded_question=embedded_question,

331

complexity_level="intermediate",

332

num_examples=2

333

)

334

335

qa_request = CompletionRequest(

336

prompt=qa_prompt,

337

maximum_tokens=400,

338

temperature=0.5

339

)

340

341

qa_response = client.complete(qa_request, model="luminous-extended")

342

print("Q&A response:")

343

print(qa_response.completions[0].completion)

344

345

# Reusable template for different contexts

346

conversation_template = PromptTemplate("""

347

Context: {{context}}

348

Tone: {{tone}}

349

Audience: {{audience}}

350

351

{%- if previous_context %}

352

Previous conversation:

353

{{ previous_context }}

354

{%- endif %}

355

356

Current request: {{user_input}}

357

358

Please respond appropriately:

359

""")

360

361

# Use same template for different scenarios

362

contexts = [

363

{

364

"context": "Customer support chat",

365

"tone": "helpful and professional",

366

"audience": "confused customer",

367

"user_input": "I can't find my order confirmation",

368

"previous_context": None

369

},

370

{

371

"context": "Technical documentation",

372

"tone": "precise and informative",

373

"audience": "software developers",

374

"user_input": "How do I implement rate limiting?",

375

"previous_context": "Previous discussion about API design patterns"

376

}

377

]

378

379

for ctx in contexts:

380

prompt = conversation_template.to_prompt(**ctx)

381

request = CompletionRequest(

382

prompt=prompt,

383

maximum_tokens=150,

384

temperature=0.6

385

)

386

387

response = client.complete(request, model="luminous-extended")

388

print(f"\n{ctx['context']} response:")

389

print(response.completions[0].completion)

390

```