pypi-anthropic

Description
The official Python library for the anthropic API
Author
tessl
Last updated

How to use

npx @tessl/cli registry install tessl/pypi-anthropic@0.66.0

completions.md docs/

1
# Text Completions API
2
3
The Text Completions API provides direct text completion functionality using Claude models with a prompt-based approach. This API is primarily used for specific use cases requiring the legacy completion format or when working with prompts that don't fit the conversational message format.
4
5
## Capabilities
6
7
### Completion Creation
8
9
Generate text completions from prompts with configurable parameters for controlling output generation.
10
11
```python { .api }
12
def create(
13
max_tokens_to_sample: int,
14
model: str,
15
prompt: str,
16
*,
17
metadata: Optional[MetadataParam] = None,
18
stop_sequences: Optional[List[str]] = None,
19
stream: Optional[bool] = None,
20
temperature: Optional[float] = None,
21
top_k: Optional[int] = None,
22
top_p: Optional[float] = None,
23
**kwargs
24
) -> Completion
25
26
async def create(
27
max_tokens_to_sample: int,
28
model: str,
29
prompt: str,
30
*,
31
metadata: Optional[MetadataParam] = None,
32
stop_sequences: Optional[List[str]] = None,
33
stream: Optional[bool] = None,
34
temperature: Optional[float] = None,
35
top_k: Optional[int] = None,
36
top_p: Optional[float] = None,
37
**kwargs
38
) -> Completion
39
```
40
41
## Core Types
42
43
### Completion Types
44
45
```python { .api }
46
class Completion(TypedDict):
47
id: str
48
type: Literal["completion"]
49
completion: str
50
stop_reason: Optional[StopReason]
51
model: str
52
53
class CompletionCreateParams(TypedDict):
54
max_tokens_to_sample: int
55
model: str
56
prompt: str
57
metadata: Optional[MetadataParam]
58
stop_sequences: Optional[List[str]]
59
stream: Optional[bool]
60
temperature: Optional[float]
61
top_k: Optional[int]
62
top_p: Optional[float]
63
64
class StopReason(TypedDict):
65
type: Literal["stop_sequence", "max_tokens"]
66
```
67
68
### Parameter Types
69
70
```python { .api }
71
class MetadataParam(TypedDict, total=False):
72
user_id: Optional[str]
73
```
74
75
## Usage Examples
76
77
### Basic Text Completion
78
79
```python
80
from anthropic import Anthropic
81
82
client = Anthropic()
83
84
completion = client.completions.create(
85
model="claude-2.1",
86
prompt="Human: What is the capital of France?\n\nAssistant:",
87
max_tokens_to_sample=100
88
)
89
90
print(completion.completion)
91
# Output: " The capital of France is Paris."
92
```
93
94
### Completion with Stop Sequences
95
96
```python
97
completion = client.completions.create(
98
model="claude-2.1",
99
prompt="List three fruits:\n1.",
100
max_tokens_to_sample=50,
101
stop_sequences=["\n4."]
102
)
103
104
print(completion.completion)
105
# Output: " Apple\n2. Banana\n3. Orange"
106
```
107
108
### Temperature Control
109
110
```python
111
# Lower temperature for more focused, deterministic output
112
focused_completion = client.completions.create(
113
model="claude-2.1",
114
prompt="The scientific name for water is",
115
max_tokens_to_sample=20,
116
temperature=0.1
117
)
118
119
# Higher temperature for more creative, varied output
120
creative_completion = client.completions.create(
121
model="claude-2.1",
122
prompt="Write a creative opening line for a story:",
123
max_tokens_to_sample=50,
124
temperature=0.9
125
)
126
```
127
128
### Top-k and Top-p Sampling
129
130
```python
131
# Top-k sampling: limit to top 10 most likely tokens
132
completion = client.completions.create(
133
model="claude-2.1",
134
prompt="The weather today is",
135
max_tokens_to_sample=30,
136
top_k=10
137
)
138
139
# Top-p (nucleus) sampling: limit to tokens comprising top 90% probability mass
140
completion = client.completions.create(
141
model="claude-2.1",
142
prompt="The weather today is",
143
max_tokens_to_sample=30,
144
top_p=0.9
145
)
146
```
147
148
### Streaming Completions
149
150
```python
151
stream = client.completions.create(
152
model="claude-2.1",
153
prompt="Write a short poem about mountains:",
154
max_tokens_to_sample=200,
155
stream=True
156
)
157
158
for completion in stream:
159
print(completion.completion, end="", flush=True)
160
```
161
162
### Multiple Stop Sequences
163
164
```python
165
completion = client.completions.create(
166
model="claude-2.1",
167
prompt="Q: What is 2+2?\nA:",
168
max_tokens_to_sample=100,
169
stop_sequences=["\n", "Q:", "Human:"]
170
)
171
172
print(completion.completion.strip())
173
# Output: " 4"
174
```
175
176
### Legacy Prompt Format
177
178
```python
179
# Using the legacy Human/Assistant format
180
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
181
182
client = Anthropic()
183
184
prompt = f"{HUMAN_PROMPT} Can you explain photosynthesis in simple terms?{AI_PROMPT}"
185
186
completion = client.completions.create(
187
model="claude-2.1",
188
prompt=prompt,
189
max_tokens_to_sample=200
190
)
191
192
print(completion.completion)
193
```
194
195
### Async Completions
196
197
```python
198
import asyncio
199
from anthropic import AsyncAnthropic
200
201
async def async_completion_example():
202
client = AsyncAnthropic()
203
204
completion = await client.completions.create(
205
model="claude-2.1",
206
prompt="The future of artificial intelligence is",
207
max_tokens_to_sample=100,
208
temperature=0.7
209
)
210
211
return completion.completion
212
213
result = asyncio.run(async_completion_example())
214
print(result)
215
```
216
217
### Error Handling with Completions
218
219
```python
220
from anthropic import Anthropic, RateLimitError, APITimeoutError
221
222
client = Anthropic()
223
224
try:
225
completion = client.completions.create(
226
model="claude-2.1",
227
prompt="Write a haiku about programming",
228
max_tokens_to_sample=50
229
)
230
231
print(completion.completion)
232
233
except RateLimitError as e:
234
print(f"Rate limited: {e}")
235
236
except APITimeoutError as e:
237
print(f"Request timed out: {e}")
238
239
except Exception as e:
240
print(f"Unexpected error: {e}")
241
```