CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-anthropic

The official Python library for the anthropic API

Pending
Quality

Pending

Does it follow best practices?

Impact

Pending

No eval scenarios have been run

SecuritybySnyk

Pending

The risk profile of this skill

Overview
Eval results
Files

completions.mddocs/

Text Completions API

The Text Completions API provides direct text completion functionality using Claude models with a prompt-based approach. This API is primarily used for specific use cases requiring the legacy completion format or when working with prompts that don't fit the conversational message format.

Capabilities

Completion Creation

Generate text completions from prompts with configurable parameters for controlling output generation.

def create(
    max_tokens_to_sample: int,
    model: str,
    prompt: str,
    *,
    metadata: Optional[MetadataParam] = None,
    stop_sequences: Optional[List[str]] = None,
    stream: Optional[bool] = None,
    temperature: Optional[float] = None,
    top_k: Optional[int] = None,
    top_p: Optional[float] = None,
    **kwargs
) -> Completion

async def create(
    max_tokens_to_sample: int,
    model: str,
    prompt: str,
    *,
    metadata: Optional[MetadataParam] = None,
    stop_sequences: Optional[List[str]] = None,
    stream: Optional[bool] = None,
    temperature: Optional[float] = None,
    top_k: Optional[int] = None,
    top_p: Optional[float] = None,
    **kwargs
) -> Completion

Core Types

Completion Types

class Completion(TypedDict):
    id: str
    type: Literal["completion"]
    completion: str
    stop_reason: Optional[StopReason]
    model: str

class CompletionCreateParams(TypedDict):
    max_tokens_to_sample: int
    model: str
    prompt: str
    metadata: Optional[MetadataParam]
    stop_sequences: Optional[List[str]]
    stream: Optional[bool]
    temperature: Optional[float]
    top_k: Optional[int]
    top_p: Optional[float]

class StopReason(TypedDict):
    type: Literal["stop_sequence", "max_tokens"]

Parameter Types

class MetadataParam(TypedDict, total=False):
    user_id: Optional[str]

Usage Examples

Basic Text Completion

from anthropic import Anthropic

client = Anthropic()

completion = client.completions.create(
    model="claude-2.1",
    prompt="Human: What is the capital of France?\n\nAssistant:",
    max_tokens_to_sample=100
)

print(completion.completion)
# Output: " The capital of France is Paris."

Completion with Stop Sequences

completion = client.completions.create(
    model="claude-2.1",
    prompt="List three fruits:\n1.",
    max_tokens_to_sample=50,
    stop_sequences=["\n4."]
)

print(completion.completion)
# Output: " Apple\n2. Banana\n3. Orange"

Temperature Control

# Lower temperature for more focused, deterministic output
focused_completion = client.completions.create(
    model="claude-2.1",
    prompt="The scientific name for water is",
    max_tokens_to_sample=20,
    temperature=0.1
)

# Higher temperature for more creative, varied output
creative_completion = client.completions.create(
    model="claude-2.1", 
    prompt="Write a creative opening line for a story:",
    max_tokens_to_sample=50,
    temperature=0.9
)

Top-k and Top-p Sampling

# Top-k sampling: limit to top 10 most likely tokens
completion = client.completions.create(
    model="claude-2.1",
    prompt="The weather today is",
    max_tokens_to_sample=30,
    top_k=10
)

# Top-p (nucleus) sampling: limit to tokens comprising top 90% probability mass
completion = client.completions.create(
    model="claude-2.1",
    prompt="The weather today is",
    max_tokens_to_sample=30,
    top_p=0.9
)

Streaming Completions

stream = client.completions.create(
    model="claude-2.1",
    prompt="Write a short poem about mountains:",
    max_tokens_to_sample=200,
    stream=True
)

for completion in stream:
    print(completion.completion, end="", flush=True)

Multiple Stop Sequences

completion = client.completions.create(
    model="claude-2.1",
    prompt="Q: What is 2+2?\nA:",
    max_tokens_to_sample=100,
    stop_sequences=["\n", "Q:", "Human:"]
)

print(completion.completion.strip())
# Output: " 4"

Legacy Prompt Format

# Using the legacy Human/Assistant format
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT

client = Anthropic()

prompt = f"{HUMAN_PROMPT} Can you explain photosynthesis in simple terms?{AI_PROMPT}"

completion = client.completions.create(
    model="claude-2.1",
    prompt=prompt,
    max_tokens_to_sample=200
)

print(completion.completion)

Async Completions

import asyncio
from anthropic import AsyncAnthropic

async def async_completion_example():
    client = AsyncAnthropic()
    
    completion = await client.completions.create(
        model="claude-2.1",
        prompt="The future of artificial intelligence is",
        max_tokens_to_sample=100,
        temperature=0.7
    )
    
    return completion.completion

result = asyncio.run(async_completion_example())
print(result)

Error Handling with Completions

from anthropic import Anthropic, RateLimitError, APITimeoutError

client = Anthropic()

try:
    completion = client.completions.create(
        model="claude-2.1",
        prompt="Write a haiku about programming",
        max_tokens_to_sample=50
    )
    
    print(completion.completion)
    
except RateLimitError as e:
    print(f"Rate limited: {e}")
    
except APITimeoutError as e:
    print(f"Request timed out: {e}")
    
except Exception as e:
    print(f"Unexpected error: {e}")

docs

batching.md

bedrock.md

beta.md

completions.md

configuration.md

errors.md

index.md

messages.md

models.md

streaming.md

tools.md

vertex.md

tile.json