CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-anthropic

The official Python library for the anthropic API

Pending
Quality

Pending

Does it follow best practices?

Impact

Pending

No eval scenarios have been run

SecuritybySnyk

Pending

The risk profile of this skill

Overview
Eval results
Files

models.mddocs/

Models API

The Models API provides access to information about available Claude models, including their capabilities, context limits, and metadata. This helps developers select appropriate models for different use cases and understand model-specific features.

Capabilities

Model Listing

Retrieve a list of all available Claude models with their specifications and capabilities.

def list(**kwargs) -> List[Model]
async def list(**kwargs) -> List[Model]

Core Types

Model Types

class Model(TypedDict):
    id: str
    type: Literal["model"]
    display_name: str
    created_at: str

class ModelInfo(TypedDict):
    id: str
    type: Literal["model"]
    display_name: str
    created_at: str

class ModelParam(TypedDict):
    model: str

class ModelListParams(TypedDict, total=False):
    pass

Usage Examples

List Available Models

from anthropic import Anthropic

client = Anthropic()

# Get all available models
models = client.models.list()

for model in models:
    print(f"Model ID: {model.id}")
    print(f"Display Name: {model.display_name}")
    print(f"Created: {model.created_at}")
    print("---")

Model Selection Helper

def select_model_by_capability(models: List[Model], capability: str) -> Optional[str]:
    """Select a model based on capability requirements"""
    
    # Model capability mapping (example)
    capabilities = {
        "vision": ["claude-sonnet-4-20250514", "claude-haiku-3-20241022"],
        "function_calling": ["claude-sonnet-4-20250514", "claude-haiku-3-20241022"],
        "long_context": ["claude-sonnet-4-20250514"],
        "speed": ["claude-haiku-3-20241022"],
    }
    
    suitable_models = capabilities.get(capability, [])
    
    for model in models:
        if model.id in suitable_models:
            return model.id
    
    return None

# Usage
models = client.models.list()
vision_model = select_model_by_capability(models, "vision")

if vision_model:
    print(f"Selected model for vision: {vision_model}")
    
    # Use the selected model
    message = client.messages.create(
        model=vision_model,
        max_tokens=1024,
        messages=[
            {
                "role": "user", 
                "content": [
                    {"type": "text", "text": "What's in this image?"},
                    {"type": "image", "source": {"type": "url", "url": "https://example.com/image.jpg"}}
                ]
            }
        ]
    )

Model Comparison

def compare_models(models: List[Model]) -> None:
    """Compare available models and their characteristics"""
    
    print("Available Claude Models:")
    print("=" * 50)
    
    # Sort models by creation date (newest first)
    sorted_models = sorted(models, key=lambda m: m.created_at, reverse=True)
    
    for model in sorted_models:
        print(f"ID: {model.id}")
        print(f"Name: {model.display_name}")
        print(f"Created: {model.created_at}")
        
        # Add capability hints based on model ID
        if "haiku" in model.id.lower():
            print("• Optimized for: Speed and efficiency")
            print("• Best for: Quick responses, simple tasks")
        elif "sonnet" in model.id.lower():
            print("• Optimized for: Balance of capability and speed")  
            print("• Best for: Most general use cases")
        elif "opus" in model.id.lower():
            print("• Optimized for: Maximum capability")
            print("• Best for: Complex reasoning, creative tasks")
            
        print("-" * 30)

# Usage
models = client.models.list()
compare_models(models)

Async Model Listing

import asyncio
from anthropic import AsyncAnthropic

async def list_models_async():
    client = AsyncAnthropic()
    
    models = await client.models.list()
    
    print(f"Found {len(models)} available models:")
    for model in models:
        print(f"- {model.display_name} ({model.id})")
    
    return models

# Run async
models = asyncio.run(list_models_async())

Model Validation

def validate_model_for_task(model_id: str, task_type: str) -> tuple[bool, str]:
    """Validate if a model is suitable for a specific task type"""
    
    # Get available models
    models = client.models.list()
    model_ids = [m.id for m in models]
    
    if model_id not in model_ids:
        return False, f"Model {model_id} is not available"
    
    # Task-specific validation
    if task_type == "vision" and "haiku" not in model_id:
        return True, "Model supports vision tasks"
    elif task_type == "vision" and "haiku" in model_id:
        return False, "This model version may have limited vision capabilities"
    elif task_type == "long_document" and "opus" not in model_id and "sonnet" not in model_id:
        return False, "Consider using a higher-capability model for long documents"
    
    return True, "Model is suitable for this task"

# Usage
is_valid, message = validate_model_for_task("claude-sonnet-4-20250514", "vision")
print(f"Valid: {is_valid}, Message: {message}")

if is_valid:
    # Proceed with using the model
    response = client.messages.create(
        model="claude-sonnet-4-20250514",
        max_tokens=1024,
        messages=[{"role": "user", "content": "Hello!"}]
    )

Model Metadata Tracking

from datetime import datetime
from typing import Dict, Any

class ModelTracker:
    def __init__(self, client: Anthropic):
        self.client = client
        self.usage_stats: Dict[str, Dict[str, Any]] = {}
    
    def get_models_with_metadata(self) -> Dict[str, Dict[str, Any]]:
        """Get models with additional metadata and usage tracking"""
        models = self.client.models.list()
        
        model_metadata = {}
        for model in models:
            model_metadata[model.id] = {
                "display_name": model.display_name,
                "created_at": model.created_at,
                "usage_count": self.usage_stats.get(model.id, {}).get("count", 0),
                "last_used": self.usage_stats.get(model.id, {}).get("last_used"),
                "estimated_cost_tier": self._get_cost_tier(model.id),
                "capabilities": self._get_capabilities(model.id)
            }
        
        return model_metadata
    
    def track_usage(self, model_id: str):
        """Track model usage"""
        if model_id not in self.usage_stats:
            self.usage_stats[model_id] = {"count": 0, "last_used": None}
        
        self.usage_stats[model_id]["count"] += 1
        self.usage_stats[model_id]["last_used"] = datetime.now().isoformat()
    
    def _get_cost_tier(self, model_id: str) -> str:
        """Estimate cost tier based on model name"""
        if "haiku" in model_id.lower():
            return "low"
        elif "sonnet" in model_id.lower():
            return "medium"
        elif "opus" in model_id.lower():
            return "high"
        return "unknown"
    
    def _get_capabilities(self, model_id: str) -> List[str]:
        """Get model capabilities"""
        capabilities = ["text"]
        
        # Add capabilities based on model ID patterns
        if any(x in model_id for x in ["sonnet", "opus"]):
            capabilities.extend(["vision", "function_calling", "long_context"])
        elif "haiku" in model_id:
            capabilities.extend(["vision", "function_calling"])
            
        return capabilities

# Usage
tracker = ModelTracker(client)
models_metadata = tracker.get_models_with_metadata()

for model_id, metadata in models_metadata.items():
    print(f"Model: {metadata['display_name']}")
    print(f"Capabilities: {', '.join(metadata['capabilities'])}")
    print(f"Cost Tier: {metadata['cost_tier']}")
    print(f"Usage Count: {metadata['usage_count']}")
    print("---")

# Track usage when making requests
model_to_use = "claude-sonnet-4-20250514"
tracker.track_usage(model_to_use)

response = client.messages.create(
    model=model_to_use,
    max_tokens=1024,
    messages=[{"role": "user", "content": "Hello!"}]
)

Model Selection Strategy

def select_optimal_model(
    task_description: str,
    priority: str = "balanced",  # "speed", "quality", "cost", "balanced"
    require_vision: bool = False,
    require_function_calling: bool = False
) -> str:
    """Select the optimal model based on requirements"""
    
    models = client.models.list()
    
    # Filter models based on capabilities
    suitable_models = []
    
    for model in models:
        model_id = model.id.lower()
        
        # Check vision requirement
        if require_vision and "claude-instant" in model_id:
            continue  # Skip models without vision
            
        # Check function calling requirement  
        if require_function_calling and "claude-instant" in model_id:
            continue  # Skip models without function calling
            
        suitable_models.append(model)
    
    if not suitable_models:
        raise ValueError("No models meet the specified requirements")
    
    # Select based on priority
    if priority == "speed":
        # Prefer Haiku models for speed
        for model in suitable_models:
            if "haiku" in model.id.lower():
                return model.id
    elif priority == "quality":
        # Prefer Opus models for quality
        for model in suitable_models:
            if "opus" in model.id.lower():
                return model.id
    elif priority == "cost":
        # Prefer Haiku models for cost efficiency
        for model in suitable_models:
            if "haiku" in model.id.lower():
                return model.id
    elif priority == "balanced":
        # Prefer Sonnet models for balance
        for model in suitable_models:
            if "sonnet" in model.id.lower():
                return model.id
    
    # Fallback to first suitable model
    return suitable_models[0].id

# Usage examples
speed_model = select_optimal_model(
    "Quick text generation", 
    priority="speed"
)

vision_model = select_optimal_model(
    "Analyze an image",
    priority="quality",
    require_vision=True
)

function_model = select_optimal_model(
    "Call external APIs",
    priority="balanced", 
    require_function_calling=True
)

print(f"Speed model: {speed_model}")
print(f"Vision model: {vision_model}")
print(f"Function calling model: {function_model}")

docs

batching.md

bedrock.md

beta.md

completions.md

configuration.md

errors.md

index.md

messages.md

models.md

streaming.md

tools.md

vertex.md

tile.json