CLI utility and Python library for interacting with Large Language Models from multiple providers including OpenAI, Anthropic, Google, and Meta plus locally installed models.
—
Prompt template system with variable substitution, attachment handling, and fragment management for reusable prompt components. This module enables structured prompt creation with dynamic content insertion and modular design patterns.
The Template class provides comprehensive prompt template functionality with variable substitution and configuration management.
class Template:
"""Prompt template with variable substitution support."""
def __init__(
self,
name: str,
prompt: Optional[str] = None,
system: Optional[str] = None,
attachments: Optional[List[str]] = None,
attachment_types: Optional[List[AttachmentType]] = None,
model: Optional[str] = None,
defaults: Optional[Dict[str, Any]] = None,
options: Optional[Dict[str, Any]] = None,
extract: Optional[bool] = None,
extract_last: Optional[bool] = None,
schema_object: Optional[dict] = None,
fragments: Optional[List[str]] = None,
system_fragments: Optional[List[str]] = None,
tools: Optional[List[str]] = None,
functions: Optional[str] = None
):
"""
Initialize template.
Args:
name: Template name/identifier
prompt: Main prompt text with variable placeholders
system: System message template
attachments: List of attachment specifications
attachment_types: List of AttachmentType objects with type/value pairs
model: Default model to use with this template
defaults: Default values for template variables
options: Model configuration options
extract: Extract fenced code blocks from response
extract_last: Extract only the last fenced code block
schema_object: JSON schema for structured output
fragments: List of fragment specifications to include
system_fragments: List of fragment specifications for system prompt
tools: List of tool specifications
functions: Python functions for dynamic template processing
"""
def evaluate(
self,
input: str,
params: Optional[Dict[str, Any]] = None
) -> Tuple[Optional[str], Optional[str]]:
"""
Evaluate template with input and parameters.
Args:
input: Input value for template evaluation
params: Dictionary of variable values
Returns:
Tuple of (evaluated_prompt, evaluated_system)
"""
def vars(self) -> set:
"""
Extract all variable names from template.
Returns:
Set of variable names found in prompt and system templates
"""
@staticmethod
def interpolate(text: str, params: Dict[str, Any]) -> Optional[str]:
"""
Interpolate variables in text template.
Args:
text: Template text with $variable or ${variable} placeholders
params: Dictionary of variable values
Returns:
Interpolated text or None if text is None
"""
name: str
prompt: Optional[str]
system: Optional[str]
attachments: Optional[List[str]]
model: Optional[str]
defaults: Optional[Dict[str, Any]]
options: Optional[Dict[str, Any]]
schema_object: Optional[dict]
tools: Optional[List[str]]Plugin-based system for loading templates from various sources.
def get_template_loaders() -> Dict[str, Callable[[str], Template]]:
"""
Get template loaders registered by plugins.
Returns:
Dictionary mapping loader prefixes to loader functions
"""Support for specifying attachments within templates.
class AttachmentType(BaseModel):
"""Pydantic model for attachment type specifications."""
model_config = ConfigDict(extra="forbid")
type: str
value: strFragment objects provide text components with source tracking for modular prompt construction.
class Fragment(str):
"""Text fragment with source tracking for provenance."""
def __new__(cls, content: str, source: str = ""):
"""
Create new fragment.
Args:
content: Fragment text content
source: Source information for tracking
"""
obj = str.__new__(cls, content)
obj.source = source
return obj
def id(self) -> str:
"""Generate stable hash ID for fragment caching."""
source: str
def get_fragment_loaders() -> Dict[
str,
Callable[[str], Union[Fragment, Attachment, List[Union[Fragment, Attachment]]]]
]:
"""
Get fragment loaders registered by plugins.
Returns:
Dictionary mapping loader prefixes to loader functions
"""import llm
# Create simple template
template = llm.Template(
name="greeting",
prompt="Hello $name, welcome to $place!",
system="You are a friendly assistant."
)
# Use template variables
variables = template.vars()
print(f"Template variables: {variables}") # {'name', 'place'}
# Evaluate template
prompt_text, system_text = template.evaluate(
input="", # Not used in this example
params={"name": "Alice", "place": "Python programming"}
)
print(f"Prompt: {prompt_text}")
print(f"System: {system_text}")import llm
# Template with default values
template = llm.Template(
name="code_review",
prompt="""
Please review this $language code and provide feedback on:
- Code quality (scale: $scale)
- Best practices
- Potential improvements
Code:
$code
""",
defaults={
"language": "Python",
"scale": "1-10"
}
)
# Use with some defaults, override others
prompt_text, _ = template.evaluate(
input="",
params={
"code": "def hello():\n print('Hello, World!')",
"language": "JavaScript" # Override default
}
)
print(prompt_text)import llm
# Template with model and options specified
template = llm.Template(
name="creative_writing",
prompt="Write a $genre story about $topic",
model="gpt-4",
options={
"temperature": 0.8,
"max_tokens": 500
},
defaults={
"genre": "science fiction"
}
)
# Use template with model
model = llm.get_model(template.model)
prompt_text, system_text = template.evaluate(
input="",
params={"topic": "time travel"}
)
# Apply template options to model
response = model.prompt(
prompt_text,
system=system_text,
**template.options
)
print(response.text())import llm
# Template for structured output
schema = {
"type": "object",
"properties": {
"summary": {"type": "string"},
"key_points": {
"type": "array",
"items": {"type": "string"}
},
"sentiment": {
"type": "string",
"enum": ["positive", "negative", "neutral"]
}
},
"required": ["summary", "sentiment"]
}
template = llm.Template(
name="text_analysis",
prompt="Analyze this text and extract key information: $text",
schema_object=schema,
model="gpt-4"
)
# Use template for structured analysis
model = llm.get_model(template.model)
prompt_text, _ = template.evaluate(
input="",
params={"text": "I love this new Python library! It makes development so much easier."}
)
response = model.prompt(prompt_text, schema=template.schema_object)
analysis = response.response_json()
print(f"Summary: {analysis['summary']}")
print(f"Sentiment: {analysis['sentiment']}")import llm
def search_web(query: str) -> str:
"""Search the web for information."""
return f"Search results for: {query}"
def get_current_date() -> str:
"""Get current date."""
from datetime import datetime
return datetime.now().strftime("%Y-%m-%d")
# Template that uses tools
template = llm.Template(
name="research_assistant",
prompt="""
Research the topic: $topic
Focus on information from $timeframe.
Provide a comprehensive overview with recent developments.
""",
tools=["search_web", "get_current_date"],
defaults={
"timeframe": "the last year"
}
)
# Use template with tools
search_tool = llm.Tool.function(search_web)
date_tool = llm.Tool.function(get_current_date)
tools = [search_tool, date_tool]
model = llm.get_model("gpt-4")
prompt_text, _ = template.evaluate(
input="",
params={"topic": "artificial intelligence trends"}
)
response = model.prompt(prompt_text, tools=tools)
print(response.text())import llm
# Template that specifies attachments
template = llm.Template(
name="image_analysis",
prompt="Analyze this $image_type image and describe what you see: $description_focus",
attachments=["$image_path"],
defaults={
"image_type": "photograph",
"description_focus": "main subjects and activities"
}
)
# Use template with image attachment
model = llm.get_model("gpt-4-vision")
prompt_text, _ = template.evaluate(
input="",
params={
"image_path": "/path/to/photo.jpg",
"description_focus": "architectural details"
}
)
# Create attachment from template specification
attachment = llm.Attachment(path="/path/to/photo.jpg")
response = model.prompt(prompt_text, attachments=[attachment])
print(response.text())import llm
# Create reusable fragments
code_review_intro = llm.Fragment(
"Please review the following code for:",
source="code_review_templates"
)
quality_criteria = llm.Fragment("""
- Code clarity and readability
- Performance considerations
- Security best practices
- Error handling
""", source="review_criteria")
# Combine fragments in template
template = llm.Template(
name="comprehensive_review",
prompt=f"""
{code_review_intro}
{quality_criteria}
Language: $language
Code:
$code
Additional focus: $focus
""",
defaults={
"focus": "general best practices"
}
)
# Use fragment-based template
prompt_text, _ = template.evaluate(
input="",
params={
"language": "Python",
"code": "def calculate(x, y):\n return x / y",
"focus": "error handling and input validation"
}
)
model = llm.get_model()
response = model.prompt(prompt_text)
print(response.text())import llm
# Template with simple variable substitution
# Note: The actual Template.interpolate method uses simple $variable syntax
template = llm.Template(
name="document_generator",
prompt="""
Generate a $document_type about $topic.
Target audience: $audience
Length: $length
Style: $style
Requirements: $requirements
""",
defaults={
"document_type": "article",
"style": "professional",
"length": "500 words"
}
)
prompt_text, _ = simple_template.evaluate(
input="",
params={
"topic": "machine learning basics",
"audience": "beginners",
"requirements": "include examples, avoid jargon, provide references"
}
)
model = llm.get_model()
response = model.prompt(prompt_text)
print(response.text())import llm
# Get available template loaders
loaders = llm.get_template_loaders()
print(f"Available template loaders: {list(loaders.keys())}")
# Fragment loaders for modular content
fragment_loaders = llm.get_fragment_loaders()
print(f"Available fragment loaders: {list(fragment_loaders.keys())}")
# Example of how a plugin might register template loaders
@llm.hookimpl
def register_template_loaders(register):
"""Register custom template loaders."""
def yaml_template_loader(spec: str) -> llm.Template:
"""Load template from YAML specification."""
import yaml
# Parse YAML spec
config = yaml.safe_load(spec)
return llm.Template(
name=config['name'],
prompt=config.get('prompt'),
system=config.get('system'),
model=config.get('model'),
defaults=config.get('defaults', {}),
options=config.get('options', {})
)
register("yaml", yaml_template_loader)
# Templates would then be loadable via: yaml:template_specThis comprehensive template system enables reusable, configurable prompt patterns that can be shared across projects and customized for specific use cases. The combination of variable substitution, fragments, and plugin-based loading provides maximum flexibility for prompt engineering workflows.
Install with Tessl CLI
npx tessl i tessl/pypi-llm