Python client to interact with Aleph Alpha API endpoints
—
Quality
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Advanced prompt construction tools for working with documents and creating reusable prompt templates. Supports DOCX documents, template-based prompt generation with Liquid syntax, and flexible document representation.
Create and manage documents from various sources including DOCX files, text, and multimodal prompts.
class Document:
docx: Optional[str] = None
prompt: Optional[Sequence[Union[str, Text, Image, Tokens]]] = None
text: Optional[str] = None
def __init__(
self,
docx: Optional[str] = None,
prompt: Optional[Sequence[Union[str, Text, Image, Tokens]]] = None,
text: Optional[str] = None
):
"""
A document that can be either a DOCX document or text/image prompts.
Parameters:
- docx: Base64 encoded DOCX document
- prompt: Sequence of prompt items (text, images, tokens)
- text: Plain text content
"""
@classmethod
def from_docx_bytes(cls, bytes: bytes) -> Document:
"""
Create document from DOCX file bytes.
Parameters:
- bytes: DOCX file content as bytes
Returns:
Document instance with DOCX content
"""
@classmethod
def from_docx_file(cls, path: str) -> Document:
"""
Load a DOCX file from disk and prepare it as a document.
Parameters:
- path: Path to DOCX file
Returns:
Document instance loaded from file
"""
@classmethod
def from_prompt(cls, prompt: Union[Prompt, Sequence[Union[str, Image]]]) -> Document:
"""
Create document from prompt containing multiple strings and images.
Parameters:
- prompt: Prompt or sequence of prompt items
Returns:
Document instance from prompt
"""
@classmethod
def from_text(cls, text: str) -> Document:
"""
Create document from plain text.
Parameters:
- text: Text content
Returns:
Document instance with text content
"""Create reusable prompt templates using Liquid template syntax with support for multimodal content.
class PromptTemplate:
template: Template
non_text_items: Dict[Placeholder, Union[Image, Tokens]]
def __init__(self, template_str: str):
"""
Create prompt template using Liquid template language.
Allows building prompts with dynamic content and embedded non-text items
like images and tokens using placeholder system.
Parameters:
- template_str: Liquid template string
"""
def placeholder(self, prompt_item: Union[Image, Tokens]) -> Placeholder:
"""
Save a non-text prompt item and return a placeholder.
The placeholder is used to embed the prompt item in the template.
Parameters:
- prompt_item: Image or Tokens to embed
Returns:
Placeholder UUID for template embedding
"""
def embed_prompt(self, prompt: Prompt) -> str:
"""
Embed a prompt in a prompt template.
Adds whitespace between text items if needed and embeds non-text items.
Parameters:
- prompt: Prompt to embed in template
Returns:
String representation with embedded placeholders
"""
def to_prompt(self, **kwargs) -> Prompt:
"""
Create a Prompt from the template string and parameters.
Parameters are passed to liquid.Template.render for variable substitution.
Parameters:
- **kwargs: Template variables for rendering
Returns:
Rendered Prompt with all substitutions applied
"""Supporting types for template and document functionality.
Placeholder = NewType("Placeholder", UUID)Working with documents and prompt templates:
from aleph_alpha_client import (
Document,
PromptTemplate,
Image,
Prompt,
Text,
Tokens,
Client,
CompletionRequest
)
from pathlib import Path
client = Client(token="your-api-token")
# Document creation from different sources
# 1. From DOCX file
docx_document = Document.from_docx_file("./reports/quarterly_report.docx")
# 2. From plain text
text_document = Document.from_text("This is a sample document for analysis.")
# 3. From multimodal prompt
image = Image.from_file(Path("./images/chart.png"))
multimodal_prompt = Prompt([
Text.from_text("Financial overview:"),
image,
Text.from_text("See attached chart for details.")
])
prompt_document = Document.from_prompt(multimodal_prompt)
# 4. From DOCX bytes (useful for uploaded files)
with open("./documents/contract.docx", "rb") as f:
docx_bytes = f.read()
bytes_document = Document.from_docx_bytes(docx_bytes)
# Prompt Template Examples
# Basic template with variables
template = PromptTemplate("""
{%- for name in names -%}
Hello {{name}}!
{% endfor -%}
Today is {{date}} and the weather is {{weather}}.
""")
prompt = template.to_prompt(
names=["Alice", "Bob", "Charlie"],
date="Monday",
weather="sunny"
)
request = CompletionRequest(
prompt=prompt,
maximum_tokens=100,
temperature=0.7
)
response = client.complete(request, model="luminous-extended")
print("Template response:")
print(response.completions[0].completion)
# Template with embedded images
image = Image.from_file(Path("./analysis/data_chart.png"))
image_template = PromptTemplate("""
Please analyze this data visualization:
{{ image_placeholder }}
Focus on these aspects:
{%- for aspect in analysis_points %}
- {{ aspect }}
{%- endfor %}
Provide insights in {{ style }} format.
""")
# Create placeholder for the image
image_placeholder = image_template.placeholder(image)
analysis_prompt = image_template.to_prompt(
image_placeholder=image_placeholder,
analysis_points=["trends", "outliers", "correlations"],
style="bullet points"
)
analysis_request = CompletionRequest(
prompt=analysis_prompt,
maximum_tokens=200,
temperature=0.4
)
analysis_response = client.complete(analysis_request, model="luminous-extended")
print("Image analysis:")
print(analysis_response.completions[0].completion)
# Complex template with conditional logic
report_template = PromptTemplate("""
# {{report_title}}
{% if include_summary -%}
## Executive Summary
This report covers {{topic}} for the period {{period}}.
{% endif -%}
## Key Findings
{%- for finding in findings %}
{{loop.index}}. {{ finding }}
{%- endfor %}
{% if include_image -%}
## Visual Analysis
{{ chart_placeholder }}
{%- endif %}
{% if recommendations -%}
## Recommendations
{%- for rec in recommendations %}
- {{ rec }}
{%- endfor %}
{%- endif %}
""")
# Use with conditional content
chart_image = Image.from_file(Path("./charts/performance.png"))
chart_placeholder = report_template.placeholder(chart_image)
report_prompt = report_template.to_prompt(
report_title="Q3 Performance Analysis",
include_summary=True,
topic="sales performance",
period="Q3 2024",
findings=[
"Sales increased by 15% compared to Q2",
"Customer satisfaction improved to 4.2/5",
"New product launch exceeded expectations"
],
include_image=True,
chart_placeholder=chart_placeholder,
recommendations=[
"Expand successful product lines",
"Increase marketing in high-performing regions",
"Optimize inventory based on demand patterns"
]
)
report_request = CompletionRequest(
prompt=report_prompt,
maximum_tokens=300,
temperature=0.3
)
report_response = client.complete(report_request, model="luminous-extended")
print("Generated report:")
print(report_response.completions[0].completion)
# Embedding existing prompts in templates
user_query = Prompt([
Text.from_text("How does machine learning work?"),
Image.from_file(Path("./diagrams/ml_workflow.png"))
])
qa_template = PromptTemplate("""
User Question: {{ embedded_question }}
Please provide a comprehensive answer that:
1. Addresses the specific question
2. References the provided diagram
3. Uses {{complexity_level}} language
4. Includes {{num_examples}} practical examples
Answer:
""")
embedded_question = qa_template.embed_prompt(user_query)
qa_prompt = qa_template.to_prompt(
embedded_question=embedded_question,
complexity_level="intermediate",
num_examples=2
)
qa_request = CompletionRequest(
prompt=qa_prompt,
maximum_tokens=400,
temperature=0.5
)
qa_response = client.complete(qa_request, model="luminous-extended")
print("Q&A response:")
print(qa_response.completions[0].completion)
# Reusable template for different contexts
conversation_template = PromptTemplate("""
Context: {{context}}
Tone: {{tone}}
Audience: {{audience}}
{%- if previous_context %}
Previous conversation:
{{ previous_context }}
{%- endif %}
Current request: {{user_input}}
Please respond appropriately:
""")
# Use same template for different scenarios
contexts = [
{
"context": "Customer support chat",
"tone": "helpful and professional",
"audience": "confused customer",
"user_input": "I can't find my order confirmation",
"previous_context": None
},
{
"context": "Technical documentation",
"tone": "precise and informative",
"audience": "software developers",
"user_input": "How do I implement rate limiting?",
"previous_context": "Previous discussion about API design patterns"
}
]
for ctx in contexts:
prompt = conversation_template.to_prompt(**ctx)
request = CompletionRequest(
prompt=prompt,
maximum_tokens=150,
temperature=0.6
)
response = client.complete(request, model="luminous-extended")
print(f"\n{ctx['context']} response:")
print(response.completions[0].completion)Install with Tessl CLI
npx tessl i tessl/pypi-aleph-alpha-client