Structured outputs for LLMs with type safety, validation, and automatic retries
—
The instructor package provides advanced validation capabilities including LLM-powered validation and content moderation. These validators can be applied to Pydantic model fields to ensure data quality and safety.
Use Large Language Models to validate field values with custom logic and natural language instructions.
def llm_validator(
statement: str,
client: Optional[Any] = None,
model: Optional[str] = None,
temperature: float = 0.0,
max_retries: int = 3,
**kwargs: Any
) -> Callable[[Any], Any]:
"""
Create LLM-based field validator.
Args:
statement: Natural language validation instruction
client: Optional LLM client (uses global default if None)
model: Optional model name (uses client default if None)
temperature: Sampling temperature for validation
max_retries: Maximum number of validation attempts
**kwargs: Additional arguments for LLM call
Returns:
Validator function for use with Pydantic Field
"""from instructor import llm_validator
from pydantic import BaseModel, Field
from typing import List
class Product(BaseModel):
name: str = Field(
...,
description="Product name",
validator=llm_validator(
"Check if this is a valid product name that makes sense"
)
)
price: float = Field(
...,
description="Product price in USD",
validator=llm_validator(
"Verify this is a reasonable price for the given product name",
temperature=0.1
)
)
description: str = Field(
...,
description="Product description",
validator=llm_validator(
"Ensure the description accurately matches the product name and is marketing-appropriate"
)
)
category: str = Field(
...,
description="Product category",
validator=llm_validator(
"Validate that the category is appropriate for this type of product"
)
)
# Usage with custom client and model
class ReviewedArticle(BaseModel):
title: str = Field(
...,
description="Article title",
validator=llm_validator(
"Check if this title is engaging and grammatically correct",
model="gpt-4",
max_retries=2
)
)
content: str = Field(
...,
description="Article content",
validator=llm_validator(
"Verify the content is well-structured, informative, and free of factual errors",
model="gpt-4",
temperature=0.2
)
)
tags: List[str] = Field(
...,
description="Article tags",
validator=llm_validator(
"Ensure all tags are relevant to the article content and properly formatted"
)
)
# Extract with validation
product = client.create(
model="gpt-4",
messages=[{
"role": "user",
"content": "Extract product: Premium Wireless Headphones, $299, High-quality audio experience"
}],
response_model=Product
)
# All fields are automatically validated by LLM before returningUse OpenAI's moderation API to check content safety and compliance.
def openai_moderation(
client: Optional[Any] = None,
model: str = "text-moderation-latest",
**kwargs: Any
) -> Callable[[Any], Any]:
"""
Create OpenAI moderation validator.
Args:
client: Optional OpenAI client (uses global default if None)
model: Moderation model to use
**kwargs: Additional arguments for moderation call
Returns:
Validator function that checks content safety
Raises:
ValidationError: If content fails moderation check
"""from instructor import openai_moderation
from pydantic import BaseModel, Field
class UserContent(BaseModel):
username: str = Field(
...,
description="User's chosen username",
validator=openai_moderation() # Check for inappropriate usernames
)
bio: str = Field(
...,
description="User biography",
validator=openai_moderation() # Check bio content
)
post_content: str = Field(
...,
description="User's post content",
validator=openai_moderation(model="text-moderation-stable")
)
class SafeComment(BaseModel):
author: str = Field(
...,
description="Comment author"
)
text: str = Field(
...,
description="Comment text content",
validator=openai_moderation()
)
is_public: bool = Field(
...,
description="Whether comment should be public"
)
# Extract user content with safety checking
user_data = client.create(
model="gpt-4",
messages=[{
"role": "user",
"content": "Extract user info: JohnDoe, 'Love hiking and photography', 'Check out my latest mountain photos!'"
}],
response_model=UserContent
)
# Content automatically checked for safety violationsYou can combine multiple validators on the same field for comprehensive validation.
from pydantic import Field, validator
from typing import Any
class QualityContent(BaseModel):
title: str = Field(
...,
description="Content title",
validators=[
llm_validator("Check if title is engaging and appropriate"),
openai_moderation() # Also check for safety
]
)
body: str = Field(
...,
description="Content body",
validators=[
openai_moderation(), # Safety first
llm_validator("Verify content is well-written and informative") # Quality second
]
)
# Custom validation with both LLM and traditional validation
class ValidatedEmail(BaseModel):
email: str = Field(
...,
description="Email address",
validators=[
llm_validator("Verify this looks like a valid email address")
]
)
@validator('email')
def validate_email_format(cls, v):
"""Traditional regex validation."""
import re
pattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$'
if not re.match(pattern, v):
raise ValueError('Invalid email format')
return vclass ContextualProduct(BaseModel):
category: str = Field(..., description="Product category")
name: str = Field(..., description="Product name")
price: float = Field(..., description="Price in USD")
@validator('price')
def validate_price_for_category(cls, v, values):
"""Use LLM to validate price based on category context."""
if 'category' in values:
category = values['category']
# Dynamic LLM validation based on context
validator_func = llm_validator(
f"Check if ${v} is a reasonable price for a {category} product"
)
return validator_func(v)
return v
class CompanyInfo(BaseModel):
name: str = Field(..., description="Company name")
industry: str = Field(..., description="Industry sector")
description: str = Field(
...,
description="Company description",
validator=llm_validator(
"Verify the description matches the company name and industry"
)
)class CoherentResponse(BaseModel):
question: str = Field(..., description="The original question")
answer: str = Field(..., description="The answer to the question")
confidence: float = Field(..., description="Confidence score 0-1")
@validator('answer')
def validate_answer_coherence(cls, v, values):
"""Validate answer coherence with question."""
if 'question' in values:
question = values['question']
validator_func = llm_validator(
f"Check if this answer '{v}' properly addresses the question '{question}'"
)
return validator_func(v)
return v
@validator('confidence')
def validate_confidence_matches_answer(cls, v, values):
"""Validate confidence score matches answer quality."""
if 'answer' in values and 'question' in values:
answer = values['answer']
question = values['question']
validator_func = llm_validator(
f"Check if confidence score {v} is appropriate for this answer quality: '{answer}' to question '{question}'"
)
return validator_func(v)
return vdef create_domain_validator(domain: str, rules: List[str]) -> Callable:
"""Create domain-specific validator with custom rules."""
rule_text = "; ".join(rules)
statement = f"Validate this {domain} data according to these rules: {rule_text}"
return llm_validator(statement, temperature=0.1)
class MedicalRecord(BaseModel):
patient_id: str = Field(
...,
description="Patient identifier",
validator=create_domain_validator("medical", [
"Must be properly anonymized",
"Should not contain personally identifiable information",
"Must follow HIPAA guidelines"
])
)
diagnosis: str = Field(
...,
description="Medical diagnosis",
validator=create_domain_validator("medical", [
"Must use proper medical terminology",
"Should be specific and accurate",
"Must be a valid medical condition"
])
)
treatment: str = Field(
...,
description="Prescribed treatment",
validator=llm_validator(
"Verify this treatment is appropriate for the given diagnosis",
model="gpt-4",
max_retries=1
)
)from pydantic import ValidationError
import logging
# Set up logging to debug validation issues
logging.basicConfig(level=logging.DEBUG)
class DebugValidatedModel(BaseModel):
content: str = Field(
...,
description="Content to validate",
validator=llm_validator(
"Check if content is appropriate and well-written",
max_retries=2
)
)
try:
result = client.create(
model="gpt-4",
messages=[{"role": "user", "content": "Extract: Some problematic content"}],
response_model=DebugValidatedModel
)
except ValidationError as e:
print(f"Validation failed: {e}")
for error in e.errors():
print(f"Field: {error['loc']}")
print(f"Error: {error['msg']}")
print(f"Type: {error['type']}")
# Custom error handling for moderation failures
class SafeUserInput(BaseModel):
message: str = Field(
...,
description="User message",
validator=openai_moderation()
)
def safe_extract(user_input: str) -> SafeUserInput | None:
"""Safely extract user input with moderation."""
try:
return client.create(
model="gpt-4",
messages=[{"role": "user", "content": f"Extract: {user_input}"}],
response_model=SafeUserInput
)
except ValidationError as e:
# Check if it's a moderation failure
moderation_errors = [
error for error in e.errors()
if 'moderation' in str(error.get('type', ''))
]
if moderation_errors:
logging.warning(f"Content failed moderation: {user_input}")
return None
else:
# Re-raise other validation errors
raise# Validation caching for repeated patterns
from functools import lru_cache
@lru_cache(maxsize=1000)
def cached_llm_validator(statement: str, value: str) -> bool:
"""Cached validation to avoid repeated LLM calls."""
validator_func = llm_validator(statement)
try:
validator_func(value)
return True
except ValidationError:
return False
class OptimizedModel(BaseModel):
"""Model with performance-optimized validation."""
email: str = Field(
...,
description="Email address"
)
@validator('email')
def validate_email_cached(cls, v):
"""Use cached validation for common patterns."""
if cached_llm_validator("Check if this is a valid email", v):
return v
else:
raise ValueError("Email validation failed")
# Batch validation for multiple items
def validate_batch_with_llm(items: List[str], validation_rule: str) -> List[bool]:
"""Validate multiple items in a single LLM call."""
batch_prompt = f"""
Validate each of these items according to the rule: {validation_rule}
Items:
{chr(10).join(f"{i+1}. {item}" for i, item in enumerate(items))}
Return a list of True/False for each item.
"""
# Implementation would use LLM to validate all items at once
# This is more efficient than individual validation calls
passInstall with Tessl CLI
npx tessl i tessl/pypi-instructor