Model adapters transform between DSPy signatures and language model formats, while custom types enable multimodal inputs, tool calling, and specialized data structures in signatures.
Base adapter class for formatting prompts and parsing responses.
class Adapter:
"""
Base adapter for prompt formatting and parsing.
Transforms DSPy signatures into LM-specific prompt formats
and parses LM responses back into structured outputs.
"""
def __init__(
self,
callbacks: list = None,
use_native_function_calling: bool = False,
native_response_types: list = None
):
"""
Initialize adapter.
Args:
callbacks (list | None): Callbacks for format/parse events
use_native_function_calling (bool): Enable native function calling (default: False)
native_response_types (list | None): Types handled natively by model
"""
pass
def __call__(self, lm, lm_kwargs: dict, signature, demos: list, inputs: dict):
"""
Format prompt and execute LM call.
Args:
lm: Language model instance
lm_kwargs (dict): LM configuration
signature: Task signature
demos (list): Demonstration examples
inputs (dict): Input field values
Returns:
Parsed prediction
"""
pass
def acall(self, lm, lm_kwargs: dict, signature, demos: list, inputs: dict):
"""
Async version of __call__.
Args:
lm: Language model instance
lm_kwargs (dict): LM configuration
signature: Task signature
demos (list): Demonstrations
inputs (dict): Input values
Returns:
Awaitable resolving to parsed prediction
"""
pass
def format(self, signature, demos: list, inputs: dict):
"""
Format signature into prompt messages.
Args:
signature: Task signature
demos (list): Demonstration examples
inputs (dict): Input field values
Returns:
List of message dictionaries
"""
pass
def parse(self, signature, completion: str, _parse_values: bool = True):
"""
Parse LM completion into structured output.
Args:
signature: Task signature
completion (str): Raw LM response
_parse_values (bool): Parse and validate values (default: True)
Returns:
Dictionary of parsed output fields
"""
passStandard chat-based adapter for conversation models.
class ChatAdapter:
"""
Chat adapter for conversation models.
Formats prompts as chat messages with system, user, and assistant roles.
Default adapter for most modern LLMs.
"""
def __init__(
self,
callbacks: list = None,
use_native_function_calling: bool = False,
native_response_types: list = None
):
"""
Initialize chat adapter.
Args:
callbacks (list | None): Callbacks for monitoring
use_native_function_calling (bool): Use native function calling
native_response_types (list | None): Types handled natively
"""
passUsage:
import dspy
# Chat adapter is default, but can be explicit
adapter = dspy.ChatAdapter()
dspy.configure(
lm=dspy.LM('openai/gpt-4o-mini'),
adapter=adapter
)
# With native function calling
adapter = dspy.ChatAdapter(use_native_function_calling=True)
dspy.configure(lm=lm, adapter=adapter)
# Use tools with native function calling
class AgentSignature(dspy.Signature):
question: str = dspy.InputField()
answer: str = dspy.OutputField()
react = dspy.ReAct(AgentSignature, tools=[my_tool])Adapter using JSON for structured input/output.
class JSONAdapter:
"""
JSON adapter for structured I/O.
Formats prompts to request JSON responses and parses
JSON output for better structure preservation.
"""
def __init__(
self,
callbacks: list = None,
use_native_function_calling: bool = False,
native_response_types: list = None
):
"""
Initialize JSON adapter.
Args:
callbacks (list | None): Callbacks
use_native_function_calling (bool): Native function calling
native_response_types (list | None): Native types
"""
passUsage:
import dspy
from pydantic import BaseModel
# Use JSON adapter
adapter = dspy.JSONAdapter()
dspy.configure(lm=lm, adapter=adapter)
# Works well with structured outputs
class Person(BaseModel):
name: str
age: int
occupation: str
class ExtractPerson(dspy.Signature):
text: str = dspy.InputField()
person: Person = dspy.OutputField()
extractor = dspy.Predict(ExtractPerson)
result = extractor(text="John Smith is a 35-year-old engineer.")
print(result.person.name) # "John Smith"Adapter using XML formatting for prompts.
class XMLAdapter:
"""
XML adapter for prompt formatting.
Uses XML tags for field delimiters, useful for models
trained on XML-formatted data.
"""
def __init__(
self,
callbacks: list = None,
use_native_function_calling: bool = False,
native_response_types: list = None
):
"""
Initialize XML adapter.
Args:
callbacks (list | None): Callbacks
use_native_function_calling (bool): Native function calling
native_response_types (list | None): Native types
"""
passTwo-step adapter for complex reasoning tasks.
class TwoStepAdapter:
"""
Two-step adapter for complex tasks.
Separates reasoning from final output in a two-step process,
similar to chain-of-thought but at the adapter level.
"""
def __init__(
self,
callbacks: list = None,
use_native_function_calling: bool = False,
native_response_types: list = None
):
"""
Initialize two-step adapter.
Args:
callbacks (list | None): Callbacks
use_native_function_calling (bool): Native function calling
native_response_types (list | None): Native types
"""
passBase class for custom types in signatures.
class Type:
"""
Base class for custom types.
Extend this to create custom types with special handling
in adapters and language models.
"""
passRepresents image inputs in multimodal models.
class Image(Type):
"""
Image input type for multimodal models.
Supports URLs, base64 data, and file paths.
"""
passUsage:
import dspy
class ImageQA(dspy.Signature):
"""Answer questions about images."""
image: dspy.Image = dspy.InputField()
question: str = dspy.InputField()
answer: str = dspy.OutputField()
# Configure with vision model
dspy.configure(lm=dspy.LM('openai/gpt-4o'))
qa = dspy.Predict(ImageQA)
result = qa(
image="https://example.com/image.jpg",
question="What's in this image?"
)
print(result.answer)Represents audio inputs.
class Audio(Type):
"""
Audio input type.
Supports audio files and URLs for models with audio capabilities.
"""
passUsage:
import dspy
class Transcription(dspy.Signature):
"""Transcribe audio to text."""
audio: dspy.Audio = dspy.InputField()
transcript: str = dspy.OutputField()
dspy.configure(lm=dspy.LM('openai/whisper-1'))
transcriber = dspy.Predict(Transcription)
result = transcriber(audio="/path/to/audio.mp3")
print(result.transcript)Represents file inputs.
class File(Type):
"""
File input type.
Generic file type for document processing and analysis.
"""
passRepresents conversation history in chat applications.
class History(Type):
"""
Conversation history type.
Represents message history in chat and dialogue systems.
Formatted appropriately by adapters.
"""
passUsage:
import dspy
class ChatSignature(dspy.Signature):
"""Generate conversational responses."""
history: dspy.History = dspy.InputField()
message: str = dspy.InputField()
response: str = dspy.OutputField()
dspy.configure(lm=dspy.LM('openai/gpt-4o-mini'))
chat = dspy.Predict(ChatSignature)
# Conversation history as list of messages
history = [
{"role": "user", "content": "What's your name?"},
{"role": "assistant", "content": "I'm Claude, an AI assistant."},
{"role": "user", "content": "What can you help with?"},
]
result = chat(
history=history,
message="Can you write code?"
)
print(result.response)Represents code snippets in signatures.
class Code(Type):
"""
Code snippet type.
Represents code with potential language-specific formatting
and syntax highlighting support.
"""
passUsage:
import dspy
class CodeGeneration(dspy.Signature):
"""Generate code from description."""
description: str = dspy.InputField()
code: dspy.Code = dspy.OutputField()
dspy.configure(lm=dspy.LM('openai/gpt-4o'))
generator = dspy.Predict(CodeGeneration)
result = generator(description="Function to calculate factorial")
print(result.code)Represents reasoning/thinking text for models with native reasoning support.
class Reasoning(Type):
"""
Reasoning type.
Represents reasoning or thinking content, used with models
that have native reasoning capabilities (e.g., o1 models).
"""
passUsage:
import dspy
class ReasoningSignature(dspy.Signature):
"""Solve complex problems with reasoning."""
problem: str = dspy.InputField()
reasoning: dspy.Reasoning = dspy.OutputField()
solution: str = dspy.OutputField()
# Use with reasoning model
dspy.configure(lm=dspy.LM('openai/o1-preview'))
solver = dspy.Predict(ReasoningSignature)
result = solver(problem="Prove Fermat's Last Theorem")
print(result.reasoning) # Detailed reasoning process
print(result.solution) # Final answerRepresents multiple tool calls in function calling.
class ToolCalls(Type):
"""
Tool calls type.
Represents multiple tool invocations in agents and
function calling scenarios.
"""
passUsage:
import dspy
class AgentSignature(dspy.Signature):
"""Agent that calls multiple tools."""
task: str = dspy.InputField()
tool_calls: dspy.ToolCalls = dspy.OutputField()
result: str = dspy.OutputField()
# Define tools
def get_weather(city: str) -> str:
return f"Weather in {city}: Sunny"
def search_web(query: str) -> str:
return f"Results for {query}"
# Create agent with tool calls support
dspy.configure(lm=dspy.LM('openai/gpt-4o'))
adapter = dspy.ChatAdapter(use_native_function_calling=True)
dspy.configure(lm=lm, adapter=adapter)
agent = dspy.ReAct(AgentSignature, tools=[get_weather, search_web])Wrapper for functions used as tools in agents.
class Tool:
"""
Tool wrapper for agent functions.
Wraps Python functions with metadata for better tool
use in ReAct, CodeAct, and function calling scenarios.
"""
def __init__(
self,
func: callable,
name: str = None,
desc: str = None,
args: dict = None,
arg_types: dict = None,
arg_desc: dict = None
):
"""
Create tool from function.
Args:
func (callable): Function to wrap
name (str | None): Tool name (auto-inferred from function name)
desc (str | None): Tool description (auto-inferred from docstring)
args (dict | None): Argument schemas (auto-inferred from signature)
arg_types (dict | None): Argument type annotations
arg_desc (dict | None): Argument descriptions
"""
pass
def __call__(self, **kwargs):
"""
Execute tool.
Args:
**kwargs: Tool arguments
Returns:
Tool execution result
"""
pass
def acall(self, **kwargs):
"""
Async execution.
Args:
**kwargs: Tool arguments
Returns:
Awaitable resolving to result
"""
passUsage:
import dspy
# Basic tool
def get_current_time() -> str:
"""Get the current time."""
import datetime
return datetime.datetime.now().isoformat()
tool = dspy.Tool(get_current_time)
# Tool with detailed metadata
def calculate_distance(
lat1: float,
lon1: float,
lat2: float,
lon2: float
) -> float:
"""Calculate distance between two GPS coordinates."""
# Implementation...
return 0.0
tool = dspy.Tool(
calculate_distance,
name="distance_calculator",
desc="Calculate great circle distance between two GPS points",
arg_desc={
"lat1": "Latitude of first point (-90 to 90)",
"lon1": "Longitude of first point (-180 to 180)",
"lat2": "Latitude of second point (-90 to 90)",
"lon2": "Longitude of second point (-180 to 180)"
}
)
# Use in agent
class AgentSig(dspy.Signature):
question: str = dspy.InputField()
answer: str = dspy.OutputField()
agent = dspy.ReAct(AgentSig, tools=[tool])Represents citations in model outputs.
class Citations(Type):
"""
Citations type.
Represents citation information for answers grounded
in source documents. Used with models that support citations.
"""
passUsage:
import dspy
class AnswerWithCitations(dspy.Signature):
"""Answer questions with source citations."""
context: str = dspy.InputField(desc="Source documents")
question: str = dspy.InputField()
answer: str = dspy.OutputField()
citations: dspy.Citations = dspy.OutputField(desc="Citation indices")
dspy.configure(lm=dspy.LM('openai/gpt-4o'))
qa = dspy.Predict(AnswerWithCitations)
result = qa(
context="[1] Paris is the capital. [2] It has the Eiffel Tower.",
question="What is the capital of France?"
)
print(result.answer) # "Paris"
print(result.citations) # [1]Represents document structures in signatures.
class Document(Type):
"""
Document type.
Represents structured document data with metadata,
useful for document processing and analysis tasks.
"""
passCreate custom adapter for specific needs:
import dspy
class CustomAdapter(dspy.Adapter):
"""Custom adapter with special formatting."""
def format(self, signature, demos, inputs):
"""Custom formatting logic."""
messages = []
# Add system message
messages.append({
"role": "system",
"content": "Custom system prompt"
})
# Add demonstrations
for demo in demos:
# Custom demo formatting
messages.append({
"role": "user",
"content": f"Q: {demo.question}"
})
messages.append({
"role": "assistant",
"content": f"A: {demo.answer}"
})
# Add current inputs
user_msg = "\n".join([
f"{field}: {inputs[field]}"
for field in signature.input_fields
])
messages.append({"role": "user", "content": user_msg})
return messages
def parse(self, signature, completion, _parse_values=True):
"""Custom parsing logic."""
# Extract fields from completion
outputs = {}
for field_name in signature.output_fields:
# Custom extraction logic
outputs[field_name] = self._extract_field(completion, field_name)
return outputs
# Use custom adapter
adapter = CustomAdapter()
dspy.configure(lm=lm, adapter=adapter)Combine multiple input types:
import dspy
class MultimodalQA(dspy.Signature):
"""Answer questions about images and audio."""
image: dspy.Image = dspy.InputField()
audio: dspy.Audio = dspy.InputField()
text: str = dspy.InputField()
answer: str = dspy.OutputField()
dspy.configure(lm=dspy.LM('openai/gpt-4o'))
qa = dspy.Predict(MultimodalQA)
result = qa(
image="image.jpg",
audio="audio.mp3",
text="What do you see and hear?"
)Complex structured outputs with Pydantic:
import dspy
from pydantic import BaseModel
class Address(BaseModel):
street: str
city: str
country: str
postal_code: str
class Contact(BaseModel):
name: str
email: str
phone: str
address: Address
class ExtractContact(dspy.Signature):
"""Extract contact information from text."""
text: str = dspy.InputField()
contact: Contact = dspy.OutputField()
# JSON adapter works well with Pydantic
adapter = dspy.JSONAdapter()
dspy.configure(lm=lm, adapter=adapter)
extractor = dspy.Predict(ExtractContact)
result = extractor(text="John Smith, john@example.com, ...")
print(result.contact.name)
print(result.contact.address.city)