CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-anthropic

The official Python library for the anthropic API

Pending
Quality

Pending

Does it follow best practices?

Impact

Pending

No eval scenarios have been run

SecuritybySnyk

Pending

The risk profile of this skill

Overview
Eval results
Files

messages.mddocs/

Messages API

The Messages API is the primary interface for conversational interactions with Claude models. It supports multi-turn conversations, system prompts, tool use, streaming responses, and message batching with comprehensive type safety.

Capabilities

Message Creation

Create conversational messages with Claude models, supporting various content types, system prompts, and tool integration.

def create(
    max_tokens: int,
    messages: List[MessageParam],
    model: str,
    *,
    metadata: Optional[MetadataParam] = None,
    service_tier: Optional[Literal["auto", "standard_only"]] = None,
    stop_sequences: Optional[List[str]] = None,
    stream: Optional[bool] = None,
    system: Optional[str] = None,
    temperature: Optional[float] = None,
    thinking: Optional[ThinkingConfigParam] = None,
    tool_choice: Optional[ToolChoiceParam] = None,
    tools: Optional[List[ToolParam]] = None,
    top_k: Optional[int] = None,
    top_p: Optional[float] = None,
    **kwargs
) -> Message

async def create(
    max_tokens: int,
    messages: List[MessageParam],
    model: str,
    *,
    metadata: Optional[MetadataParam] = None,
    service_tier: Optional[Literal["auto", "standard_only"]] = None,
    stop_sequences: Optional[List[str]] = None,
    stream: Optional[bool] = None,
    system: Optional[str] = None,
    temperature: Optional[float] = None,
    thinking: Optional[ThinkingConfigParam] = None,
    tool_choice: Optional[ToolChoiceParam] = None,
    tools: Optional[List[ToolParam]] = None,
    top_k: Optional[int] = None,
    top_p: Optional[float] = None,
    **kwargs
) -> Message

Token Counting

Count tokens in messages before sending to estimate costs and ensure messages fit within model limits.

def count_tokens(
    messages: List[MessageParam],
    model: str,
    *,
    system: Optional[str] = None,
    tool_choice: Optional[ToolChoiceParam] = None,
    tools: Optional[List[ToolParam]] = None,
    **kwargs
) -> MessageTokensCount

async def count_tokens(
    messages: List[MessageParam],
    model: str,
    *,
    system: Optional[str] = None,
    tool_choice: Optional[ToolChoiceParam] = None,
    tools: Optional[List[ToolParam]] = None,
    **kwargs
) -> MessageTokensCount

Core Types

Message Types

class Message(TypedDict):
    id: str
    type: Literal["message"]
    role: Literal["assistant"]
    content: List[ContentBlock]
    model: str
    stop_reason: Optional[StopReason]
    stop_sequence: Optional[str]
    usage: Usage

class MessageParam(TypedDict):
    role: Literal["user", "assistant"]
    content: Union[str, List[ContentBlockParam]]

class MessageTokensCount(TypedDict):
    input_tokens: int
    cache_creation_input_tokens: Optional[int]
    cache_read_input_tokens: Optional[int]

Content Block Types

class ContentBlock(TypedDict):
    type: str

class TextBlock(ContentBlock):
    type: Literal["text"]
    text: str

class ToolUseBlock(ContentBlock):
    type: Literal["tool_use"]
    id: str
    name: str
    input: Dict[str, Any]

class ContentBlockParam(TypedDict):
    type: str

class TextBlockParam(ContentBlockParam):
    type: Literal["text"]
    text: str
    cache_control: Optional[CacheControlEphemeralParam]

class ImageBlockParam(ContentBlockParam):
    type: Literal["image"]
    source: Union[Base64ImageSourceParam, URLImageSourceParam]
    cache_control: Optional[CacheControlEphemeralParam]

class DocumentBlockParam(ContentBlockParam):
    type: Literal["document"]
    source: Union[Base64PDFSourceParam, URLPDFSourceParam]
    cache_control: Optional[CacheControlEphemeralParam]

class ToolUseBlockParam(ContentBlockParam):
    type: Literal["tool_use"]
    id: str
    name: str
    input: Dict[str, Any]
    cache_control: Optional[CacheControlEphemeralParam]

class ToolResultBlockParam(ContentBlockParam):
    type: Literal["tool_result"]
    tool_use_id: str
    content: Union[str, List[ContentBlockParam]]
    is_error: Optional[bool]
    cache_control: Optional[CacheControlEphemeralParam]

Image and Document Sources

class Base64ImageSourceParam(TypedDict):
    type: Literal["base64"]
    media_type: Literal["image/jpeg", "image/png", "image/gif", "image/webp"]
    data: str

class URLImageSourceParam(TypedDict):
    type: Literal["url"]
    url: str

class Base64PDFSourceParam(TypedDict):
    type: Literal["base64"]
    media_type: Literal["application/pdf"]
    data: str

class URLPDFSourceParam(TypedDict):
    type: Literal["url"]
    url: str

class PlainTextSourceParam(TypedDict):
    type: Literal["text"]
    text: str

Usage and Metadata

class Usage(TypedDict):
    input_tokens: int
    output_tokens: int
    cache_creation_input_tokens: Optional[int]
    cache_read_input_tokens: Optional[int]

class MetadataParam(TypedDict, total=False):
    user_id: Optional[str]

class CacheControlEphemeralParam(TypedDict):
    type: Literal["ephemeral"]

class StopReason(TypedDict):
    type: Literal["end_turn", "max_tokens", "stop_sequence", "tool_use"]

Extended Thinking Configuration

class ThinkingConfigParam(TypedDict, total=False):
    type: Literal["enabled", "disabled"]
    budget_tokens: Optional[int]  # Required when type="enabled", must be ≥1024 and < max_tokens

class ThinkingConfigEnabledParam(TypedDict):
    type: Literal["enabled"]
    budget_tokens: int  # Determines how many tokens Claude can use for internal reasoning

class ThinkingConfigDisabledParam(TypedDict):
    type: Literal["disabled"]

Usage Examples

Basic Text Message

from anthropic import Anthropic

client = Anthropic()

message = client.messages.create(
    model="claude-sonnet-4-20250514",
    max_tokens=1024,
    messages=[
        {"role": "user", "content": "What is the capital of France?"}
    ]
)

print(message.content[0].text)

Multi-Turn Conversation

messages = [
    {"role": "user", "content": "Hello, can you help me with Python?"},
    {"role": "assistant", "content": "Of course! I'd be happy to help you with Python. What specific topic or problem would you like assistance with?"},
    {"role": "user", "content": "How do I read a CSV file?"}
]

message = client.messages.create(
    model="claude-sonnet-4-20250514",
    max_tokens=1024,
    messages=messages
)

System Prompt

message = client.messages.create(
    model="claude-sonnet-4-20250514",
    max_tokens=1024,
    system="You are a helpful coding assistant. Always provide code examples when relevant.",
    messages=[
        {"role": "user", "content": "How do I sort a list in Python?"}
    ]
)

Image Input

import base64

# Read and encode image
with open("image.jpg", "rb") as img_file:
    img_data = base64.b64encode(img_file.read()).decode()

message = client.messages.create(
    model="claude-sonnet-4-20250514",
    max_tokens=1024,
    messages=[
        {
            "role": "user",
            "content": [
                {
                    "type": "image",
                    "source": {
                        "type": "base64",
                        "media_type": "image/jpeg",
                        "data": img_data
                    }
                },
                {
                    "type": "text",
                    "text": "What do you see in this image?"
                }
            ]
        }
    ]
)

PDF Document Input

import base64

# Read and encode PDF
with open("document.pdf", "rb") as pdf_file:
    pdf_data = base64.b64encode(pdf_file.read()).decode()

message = client.messages.create(
    model="claude-sonnet-4-20250514",
    max_tokens=1024,
    messages=[
        {
            "role": "user",
            "content": [
                {
                    "type": "document",
                    "source": {
                        "type": "base64",
                        "media_type": "application/pdf",
                        "data": pdf_data
                    }
                },
                {
                    "type": "text",
                    "text": "Summarize this document"
                }
            ]
        }
    ]
)

Token Counting Example

# Count tokens before sending
token_count = client.messages.count_tokens(
    model="claude-sonnet-4-20250514",
    messages=[
        {"role": "user", "content": "What is the capital of France?"}
    ]
)

print(f"Input tokens: {token_count.input_tokens}")

if token_count.input_tokens < 4000:  # Model's context limit
    message = client.messages.create(
        model="claude-sonnet-4-20250514",
        max_tokens=1024,
        messages=[
            {"role": "user", "content": "What is the capital of France?"}
        ]
    )

Streaming Messages

with client.messages.stream(
    model="claude-sonnet-4-20250514",
    max_tokens=1024,
    messages=[
        {"role": "user", "content": "Write a short story"}
    ]
) as stream:
    for text in stream.text_stream:
        print(text, end="", flush=True)

Async Usage

import asyncio
from anthropic import AsyncAnthropic

async def chat():
    client = AsyncAnthropic()
    
    message = await client.messages.create(
        model="claude-sonnet-4-20250514",
        max_tokens=1024,
        messages=[
            {"role": "user", "content": "Hello!"}
        ]
    )
    
    return message.content[0].text

result = asyncio.run(chat())

Extended Thinking Example

# Enable extended thinking for complex analysis
message = client.messages.create(
    model="claude-sonnet-4-20250514",
    max_tokens=4000,
    thinking={
        "type": "enabled",
        "budget_tokens": 2000  # Allow Claude 2000 tokens for internal reasoning
    },
    messages=[
        {"role": "user", "content": "Analyze this complex business problem and provide a detailed solution..."}
    ]
)

Service Tier Example

# Use priority capacity when available
message = client.messages.create(
    model="claude-sonnet-4-20250514",
    max_tokens=1024,
    service_tier="auto",  # Use priority capacity if available, fallback to standard
    messages=[
        {"role": "user", "content": "Urgent request requiring priority processing"}
    ]
)

docs

batching.md

bedrock.md

beta.md

completions.md

configuration.md

errors.md

index.md

messages.md

models.md

streaming.md

tools.md

vertex.md

tile.json