Interface between LLMs and your data
—
Flexible prompt templating system supporting chat templates, conditional prompts, and integration with various LLM formats. The prompt system provides powerful abstractions for creating reusable, parameterized prompts with support for different message roles and dynamic content generation.
Foundation interface for all prompt template implementations with standardized formatting and validation.
class BasePromptTemplate:
"""
Base interface for prompt template implementations.
Parameters:
- metadata: Optional[dict], metadata about the prompt template
- template_vars: Optional[List[str]], list of template variable names
- function_mappings: Optional[dict], mappings for function-based variables
"""
def __init__(
self,
metadata: Optional[dict] = None,
template_vars: Optional[List[str]] = None,
function_mappings: Optional[dict] = None,
**kwargs
): ...
def format(self, **kwargs: Any) -> str:
"""
Format the prompt template with provided variables.
Parameters:
- **kwargs: template variables and their values
Returns:
- str, formatted prompt string
"""
def format_messages(self, **kwargs: Any) -> List[ChatMessage]:
"""
Format template as list of chat messages.
Parameters:
- **kwargs: template variables and their values
Returns:
- List[ChatMessage], formatted messages for chat models
"""
def get_template(self) -> str:
"""Get the raw template string."""
def partial_format(self, **kwargs: Any) -> "BasePromptTemplate":
"""
Create partially formatted template with some variables filled.
Parameters:
- **kwargs: subset of template variables to fill
Returns:
- BasePromptTemplate, partially formatted template
"""
@property
def template_vars(self) -> List[str]:
"""Get list of template variable names."""Core prompt template implementation for text-based prompts with variable substitution.
class PromptTemplate(BasePromptTemplate):
"""
Standard prompt template for text-based prompts with variable substitution.
Parameters:
- template: str, the prompt template string with {variable} placeholders
- template_var_mappings: Optional[dict], mappings for template variable names
- function_mappings: Optional[dict], function-based variable mappings
- output_parser: Optional[BaseOutputParser], parser for template output
"""
def __init__(
self,
template: str,
template_var_mappings: Optional[dict] = None,
function_mappings: Optional[dict] = None,
output_parser: Optional[BaseOutputParser] = None,
**kwargs
): ...
@classmethod
def from_langchain_prompt(cls, prompt: Any) -> "PromptTemplate":
"""Create PromptTemplate from Langchain prompt."""
# Alias for backward compatibility
Prompt = PromptTemplateSpecialized templates for chat-based interactions with support for different message roles and conversation flows.
class ChatPromptTemplate(BasePromptTemplate):
"""
Chat-based prompt template supporting multiple message roles and conversation structure.
Parameters:
- message_templates: List[ChatMessage], list of message templates
- system_template: Optional[ChatMessage], system message template
- template_var_mappings: Optional[dict], variable name mappings
- function_mappings: Optional[dict], function-based mappings
- output_parser: Optional[BaseOutputParser], output parser for responses
"""
def __init__(
self,
message_templates: List[ChatMessage],
system_template: Optional[ChatMessage] = None,
template_var_mappings: Optional[dict] = None,
function_mappings: Optional[dict] = None,
output_parser: Optional[BaseOutputParser] = None,
**kwargs
): ...
@classmethod
def from_messages(cls, messages: List[ChatMessage]) -> "ChatPromptTemplate":
"""Create ChatPromptTemplate from list of messages."""
def format_messages(self, **kwargs: Any) -> List[ChatMessage]:
"""Format template into chat messages."""Advanced templates with conditional logic and dynamic template selection based on runtime conditions.
class SelectorPromptTemplate(BasePromptTemplate):
"""
Prompt template with conditional logic for dynamic template selection.
Parameters:
- default_template: BasePromptTemplate, default template when no conditions match
- conditionals: List[Tuple[Callable, BasePromptTemplate]], condition-template pairs
"""
def __init__(
self,
default_template: BasePromptTemplate,
conditionals: Optional[List[Tuple[Callable, BasePromptTemplate]]] = None,
**kwargs
): ...
def select(self, **kwargs: Any) -> BasePromptTemplate:
"""
Select appropriate template based on conditions.
Parameters:
- **kwargs: variables for condition evaluation
Returns:
- BasePromptTemplate, selected template based on conditions
"""Templates for integrating with external prompt systems and frameworks.
class LangchainPromptTemplate(BasePromptTemplate):
"""
Wrapper for Langchain prompt templates to provide LlamaIndex compatibility.
Parameters:
- prompt: Any, Langchain prompt template object
- template_var_mappings: Optional[dict], variable name mappings
- function_mappings: Optional[dict], function mappings
- output_parser: Optional[BaseOutputParser], output parser
"""
def __init__(
self,
prompt: Any,
template_var_mappings: Optional[dict] = None,
function_mappings: Optional[dict] = None,
output_parser: Optional[BaseOutputParser] = None,
**kwargs
): ...
class RichPromptTemplate(BasePromptTemplate):
"""
Rich text prompt template with advanced formatting capabilities.
Parameters:
- template: str, rich text template with formatting markup
- template_var_mappings: Optional[dict], variable mappings
- function_mappings: Optional[dict], function mappings
"""
def __init__(
self,
template: str,
template_var_mappings: Optional[dict] = None,
function_mappings: Optional[dict] = None,
**kwargs
): ...Structured message types for chat-based interactions with comprehensive role support.
class ChatMessage:
"""
Individual message in a chat conversation.
Parameters:
- role: MessageRole, role of the message sender
- content: Union[str, List], message content (text or structured content)
- additional_kwargs: Optional[dict], additional message metadata
- tool_calls: Optional[List], tool calls in the message
- tool_call_id: Optional[str], identifier for tool call responses
"""
def __init__(
self,
role: MessageRole,
content: Union[str, List] = "",
additional_kwargs: Optional[dict] = None,
tool_calls: Optional[List] = None,
tool_call_id: Optional[str] = None,
**kwargs
): ...
@classmethod
def from_str(
cls,
content: str,
role: str = MessageRole.USER,
**kwargs
) -> "ChatMessage":
"""Create ChatMessage from string content."""
class MessageRole(str, Enum):
"""Enumeration of message roles in chat conversations."""
SYSTEM = "system" # System instructions and context setting
USER = "user" # User input and questions
ASSISTANT = "assistant" # AI assistant responses
FUNCTION = "function" # Function call results (deprecated)
TOOL = "tool" # Tool execution resultsCategorization system for different types of prompts and their intended usage patterns.
class PromptType(str, Enum):
"""Enumeration of prompt types for categorization and selection."""
QUESTION_ANSWER = "question_answer"
REFINE = "refine"
SUMMARY = "summary"
SIMPLE_INPUT = "simple_input"
CONDITIONAL_INPUT = "conditional_input"
KEYWORD_EXTRACT = "keyword_extract"
QUERY_KEYWORD_EXTRACT = "query_keyword_extract"
SCHEMA_EXTRACT = "schema_extract"
TEXT_TO_SQL = "text_to_sql"
TABLE_CONTEXT = "table_context"
KNOWLEDGE_TRIPLET_EXTRACT = "knowledge_triplet_extract"
TREE_SUMMARIZE = "tree_summarize"
TREE_INSERT = "tree_insert"
TREE_SELECT = "tree_select"
TREE_SELECT_MULTIPLE = "tree_select_multiple"
SUB_QUESTION = "sub_question"
PANDAS = "pandas"
JSON_PATH = "json_path"
CHOICE_SELECT = "choice_select"
MULTI_SELECT = "multi_select"
SINGLE_SELECT = "single_select"Utilities for displaying and debugging prompt templates and their formatted output.
def display_prompt_dict(prompts_dict: Dict[str, BasePromptTemplate]) -> None:
"""
Display a dictionary of prompts in a formatted way for debugging.
Parameters:
- prompts_dict: Dict[str, BasePromptTemplate], dictionary of prompt templates
"""from llama_index.core.prompts import PromptTemplate
# Create a simple prompt template
template = PromptTemplate(
template="Explain the concept of {topic} in {style} style for {audience}."
)
# Format the prompt
formatted_prompt = template.format(
topic="machine learning",
style="simple",
audience="beginners"
)
print(formatted_prompt)
# Output: "Explain the concept of machine learning in simple style for beginners."
# Check template variables
print(f"Template variables: {template.template_vars}")
# Output: ['topic', 'style', 'audience']from llama_index.core.prompts import ChatPromptTemplate
from llama_index.core.prompts.types import ChatMessage, MessageRole
# Create chat messages
messages = [
ChatMessage(
role=MessageRole.SYSTEM,
content="You are a helpful AI assistant specializing in {domain}."
),
ChatMessage(
role=MessageRole.USER,
content="I have a question about {topic}. Can you help me understand {specific_question}?"
)
]
# Create chat prompt template
chat_template = ChatPromptTemplate.from_messages(messages)
# Format chat messages
formatted_messages = chat_template.format_messages(
domain="machine learning",
topic="neural networks",
specific_question="how backpropagation works"
)
for msg in formatted_messages:
print(f"{msg.role}: {msg.content}")from llama_index.core.prompts import SelectorPromptTemplate, PromptTemplate
# Define templates for different scenarios
simple_template = PromptTemplate(
template="Provide a brief explanation of {concept}."
)
detailed_template = PromptTemplate(
template="Provide a comprehensive explanation of {concept}, including examples, applications, and technical details."
)
# Define condition function
def is_detailed_request(detail_level: str, **kwargs) -> bool:
return detail_level.lower() in ["detailed", "comprehensive", "advanced"]
# Create selector template
selector = SelectorPromptTemplate(
default_template=simple_template,
conditionals=[
(is_detailed_request, detailed_template)
]
)
# Format with different detail levels
simple_prompt = selector.format(concept="neural networks", detail_level="basic")
detailed_prompt = selector.format(concept="neural networks", detail_level="detailed")
print("Simple:", simple_prompt)
print("Detailed:", detailed_prompt)# Create a conversation template
conversation_template = ChatPromptTemplate.from_messages([
ChatMessage(
role=MessageRole.SYSTEM,
content="You are an expert in {subject}. Answer questions clearly and provide examples when helpful."
),
ChatMessage(
role=MessageRole.USER,
content="{user_question}"
),
ChatMessage(
role=MessageRole.ASSISTANT,
content="I'll help you understand {subject}. {context_info}"
),
ChatMessage(
role=MessageRole.USER,
content="Can you explain {follow_up_topic} in more detail?"
)
])
# Format the conversation
conversation = conversation_template.format_messages(
subject="machine learning",
user_question="What is supervised learning?",
context_info="Supervised learning uses labeled data to train models.",
follow_up_topic="the difference between classification and regression"
)
print("Conversation flow:")
for i, msg in enumerate(conversation):
print(f"{i+1}. {msg.role.upper()}: {msg.content}")# Create template with multiple variables
analysis_template = PromptTemplate(
template="Analyze {data_type} data from {source} using {method} approach. Focus on {aspect} and provide {output_format} results."
)
# Partially format with some variables
partial_template = analysis_template.partial_format(
method="statistical",
output_format="detailed"
)
# Complete formatting later
final_prompt = partial_template.format(
data_type="customer behavior",
source="web analytics",
aspect="conversion patterns"
)
print(final_prompt)from datetime import datetime
def get_current_date():
return datetime.now().strftime("%Y-%m-%d")
def get_greeting(time_of_day):
greetings = {
"morning": "Good morning",
"afternoon": "Good afternoon",
"evening": "Good evening"
}
return greetings.get(time_of_day, "Hello")
# Template with function mappings
dynamic_template = PromptTemplate(
template="{greeting}! Today is {current_date}. Let's discuss {topic}.",
function_mappings={
"current_date": get_current_date,
"greeting": lambda: get_greeting("morning")
}
)
# Format with dynamic functions
formatted = dynamic_template.format(topic="AI developments")
print(formatted)# Advanced chat template with tool integration
tool_chat_template = ChatPromptTemplate.from_messages([
ChatMessage(
role=MessageRole.SYSTEM,
content="You are an AI assistant with access to tools. Use the {available_tools} when needed to answer questions about {domain}."
),
ChatMessage(
role=MessageRole.USER,
content="{user_query}"
),
ChatMessage(
role=MessageRole.ASSISTANT,
content="I'll help you with {user_query}. Let me use the appropriate tools to gather information."
)
])
# Format with tool information
tool_conversation = tool_chat_template.format_messages(
available_tools="search, calculator, and code_executor tools",
domain="data science",
user_query="How do I calculate the correlation between two datasets?"
)
for msg in tool_conversation:
print(f"{msg.role}: {msg.content}")# Validate template variables
template = PromptTemplate(
template="Generate a {output_type} about {subject} with {requirements}."
)
# Check what variables are needed
required_vars = template.template_vars
print(f"Required variables: {required_vars}")
# Attempt formatting with missing variables
try:
template.format(subject="climate change")
except KeyError as e:
print(f"Missing variable: {e}")
# Proper formatting
complete_prompt = template.format(
output_type="report",
subject="climate change",
requirements="scientific citations and data visualizations"
)
print(complete_prompt)from llama_index.core.prompts import display_prompt_dict
# Collection of prompts for an application
prompts = {
"question_answer": PromptTemplate(
template="Answer the following question: {question}"
),
"summarization": PromptTemplate(
template="Summarize the following text in {length} sentences: {text}"
),
"classification": ChatPromptTemplate.from_messages([
ChatMessage(role=MessageRole.SYSTEM, content="You are a text classifier."),
ChatMessage(role=MessageRole.USER, content="Classify this text: {text}")
])
}
# Display all prompts for debugging
display_prompt_dict(prompts)Common prompt patterns and templates for typical LLM applications:
# Question-Answering Template
QA_TEMPLATE = PromptTemplate(
template="Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the question: {query_str}\n"
)
# Refinement Template
REFINE_TEMPLATE = PromptTemplate(
template="The original question is as follows: {query_str}\n"
"We have provided an existing answer: {existing_answer}\n"
"We have the opportunity to refine the existing answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"Given the new context, refine the original answer to better "
"answer the question. If the context isn't useful, return "
"the original answer.\n"
)
# Summarization Template
SUMMARIZE_TEMPLATE = PromptTemplate(
template="Write a summary of the following. Try to use only the "
"information provided. Try to include as many key details as possible.\n"
"\n"
"{context_str}\n"
"\n"
"SUMMARY:"
)# Type alias for prompt template dictionaries
PromptMixinType = Dict[str, BasePromptTemplate]
# Template variable types
TemplateVarType = Union[str, Callable[[], str]]
# Default template delimiters
DEFAULT_TEMPLATE_VAR_FORMAT = "{{{var}}}"
CHAT_TEMPLATE_VAR_FORMAT = "{var}"
# Validation settings
VALIDATE_TEMPLATE_VARS = True
ALLOW_UNDEFINED_TEMPLATE_VARS = FalseInstall with Tessl CLI
npx tessl i tessl/pypi-llama-index-core