The official Python library for the anthropic API
—
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Pending
The risk profile of this skill
The Text Completions API provides direct text completion functionality using Claude models with a prompt-based approach. This API is primarily used for specific use cases requiring the legacy completion format or when working with prompts that don't fit the conversational message format.
Generate text completions from prompts with configurable parameters for controlling output generation.
def create(
max_tokens_to_sample: int,
model: str,
prompt: str,
*,
metadata: Optional[MetadataParam] = None,
stop_sequences: Optional[List[str]] = None,
stream: Optional[bool] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
**kwargs
) -> Completion
async def create(
max_tokens_to_sample: int,
model: str,
prompt: str,
*,
metadata: Optional[MetadataParam] = None,
stop_sequences: Optional[List[str]] = None,
stream: Optional[bool] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
**kwargs
) -> Completionclass Completion(TypedDict):
id: str
type: Literal["completion"]
completion: str
stop_reason: Optional[StopReason]
model: str
class CompletionCreateParams(TypedDict):
max_tokens_to_sample: int
model: str
prompt: str
metadata: Optional[MetadataParam]
stop_sequences: Optional[List[str]]
stream: Optional[bool]
temperature: Optional[float]
top_k: Optional[int]
top_p: Optional[float]
class StopReason(TypedDict):
type: Literal["stop_sequence", "max_tokens"]class MetadataParam(TypedDict, total=False):
user_id: Optional[str]from anthropic import Anthropic
client = Anthropic()
completion = client.completions.create(
model="claude-2.1",
prompt="Human: What is the capital of France?\n\nAssistant:",
max_tokens_to_sample=100
)
print(completion.completion)
# Output: " The capital of France is Paris."completion = client.completions.create(
model="claude-2.1",
prompt="List three fruits:\n1.",
max_tokens_to_sample=50,
stop_sequences=["\n4."]
)
print(completion.completion)
# Output: " Apple\n2. Banana\n3. Orange"# Lower temperature for more focused, deterministic output
focused_completion = client.completions.create(
model="claude-2.1",
prompt="The scientific name for water is",
max_tokens_to_sample=20,
temperature=0.1
)
# Higher temperature for more creative, varied output
creative_completion = client.completions.create(
model="claude-2.1",
prompt="Write a creative opening line for a story:",
max_tokens_to_sample=50,
temperature=0.9
)# Top-k sampling: limit to top 10 most likely tokens
completion = client.completions.create(
model="claude-2.1",
prompt="The weather today is",
max_tokens_to_sample=30,
top_k=10
)
# Top-p (nucleus) sampling: limit to tokens comprising top 90% probability mass
completion = client.completions.create(
model="claude-2.1",
prompt="The weather today is",
max_tokens_to_sample=30,
top_p=0.9
)stream = client.completions.create(
model="claude-2.1",
prompt="Write a short poem about mountains:",
max_tokens_to_sample=200,
stream=True
)
for completion in stream:
print(completion.completion, end="", flush=True)completion = client.completions.create(
model="claude-2.1",
prompt="Q: What is 2+2?\nA:",
max_tokens_to_sample=100,
stop_sequences=["\n", "Q:", "Human:"]
)
print(completion.completion.strip())
# Output: " 4"# Using the legacy Human/Assistant format
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
client = Anthropic()
prompt = f"{HUMAN_PROMPT} Can you explain photosynthesis in simple terms?{AI_PROMPT}"
completion = client.completions.create(
model="claude-2.1",
prompt=prompt,
max_tokens_to_sample=200
)
print(completion.completion)import asyncio
from anthropic import AsyncAnthropic
async def async_completion_example():
client = AsyncAnthropic()
completion = await client.completions.create(
model="claude-2.1",
prompt="The future of artificial intelligence is",
max_tokens_to_sample=100,
temperature=0.7
)
return completion.completion
result = asyncio.run(async_completion_example())
print(result)from anthropic import Anthropic, RateLimitError, APITimeoutError
client = Anthropic()
try:
completion = client.completions.create(
model="claude-2.1",
prompt="Write a haiku about programming",
max_tokens_to_sample=50
)
print(completion.completion)
except RateLimitError as e:
print(f"Rate limited: {e}")
except APITimeoutError as e:
print(f"Request timed out: {e}")
except Exception as e:
print(f"Unexpected error: {e}")