pypi-openai

Description
Official Python library for the OpenAI API providing chat completions, embeddings, audio, images, and more
Author
tessl
Last updated

How to use

npx @tessl/cli registry install tessl/pypi-openai@1.106.0

text-completions.md docs/

1
# Text Completions
2
3
Legacy text completion interface for older models like GPT-3.5 Turbo Instruct, providing direct text generation capabilities.
4
5
## Capabilities
6
7
### Basic Text Completions
8
9
Generate text completions using legacy completion models with direct prompt-based interaction.
10
11
```python { .api }
12
def create(
13
self,
14
*,
15
model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
16
prompt: Union[str, List[str], List[int], List[List[int]], None],
17
best_of: Optional[int] | NotGiven = NOT_GIVEN,
18
echo: Optional[bool] | NotGiven = NOT_GIVEN,
19
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
20
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
21
logprobs: Optional[int] | NotGiven = NOT_GIVEN,
22
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
23
n: Optional[int] | NotGiven = NOT_GIVEN,
24
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
25
seed: Optional[int] | NotGiven = NOT_GIVEN,
26
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
27
stream: Optional[bool] | NotGiven = NOT_GIVEN,
28
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
29
suffix: Optional[str] | NotGiven = NOT_GIVEN,
30
temperature: Optional[float] | NotGiven = NOT_GIVEN,
31
top_p: Optional[float] | NotGiven = NOT_GIVEN,
32
user: str | NotGiven = NOT_GIVEN
33
) -> Completion | Stream[Completion]: ...
34
```
35
36
Usage examples:
37
38
```python
39
from openai import OpenAI
40
41
client = OpenAI()
42
43
# Simple text completion
44
response = client.completions.create(
45
model="gpt-3.5-turbo-instruct",
46
prompt="Once upon a time, in a land far away,",
47
max_tokens=100,
48
temperature=0.7
49
)
50
51
print(response.choices[0].text)
52
53
# Multiple completions
54
response = client.completions.create(
55
model="gpt-3.5-turbo-instruct",
56
prompt="The benefits of renewable energy include:",
57
max_tokens=150,
58
n=3, # Generate 3 different completions
59
temperature=0.8
60
)
61
62
for i, choice in enumerate(response.choices):
63
print(f"Completion {i+1}: {choice.text}")
64
65
# Code completion
66
response = client.completions.create(
67
model="gpt-3.5-turbo-instruct",
68
prompt="def fibonacci(n):",
69
max_tokens=100,
70
temperature=0.1, # Lower temperature for code
71
stop=["\n\n"] # Stop at double newline
72
)
73
74
print("Generated code:")
75
print("def fibonacci(n):" + response.choices[0].text)
76
```
77
78
### Text Completion with Context
79
80
Use prompts with context and examples for better completion quality and specific formatting.
81
82
Usage examples:
83
84
```python
85
# Few-shot learning with examples
86
prompt = """Translate English to French:
87
88
English: Hello, how are you?
89
French: Salut, comment allez-vous?
90
91
English: What time is it?
92
French: Quelle heure est-il?
93
94
English: I love programming.
95
French:"""
96
97
response = client.completions.create(
98
model="gpt-3.5-turbo-instruct",
99
prompt=prompt,
100
max_tokens=50,
101
temperature=0.3,
102
stop=["\n"]
103
)
104
105
print(f"Translation: {response.choices[0].text.strip()}")
106
107
# Text classification
108
prompt = """Classify the sentiment of these reviews as positive, negative, or neutral:
109
110
Review: "This product is amazing! I love it."
111
Sentiment: positive
112
113
Review: "It's okay, nothing special."
114
Sentiment: neutral
115
116
Review: "Terrible quality, waste of money."
117
Sentiment: negative
118
119
Review: "Best purchase I've made this year!"
120
Sentiment:"""
121
122
response = client.completions.create(
123
model="gpt-3.5-turbo-instruct",
124
prompt=prompt,
125
max_tokens=10,
126
temperature=0.0
127
)
128
129
print(f"Sentiment: {response.choices[0].text.strip()}")
130
```
131
132
### Streaming Text Completions
133
134
Stream text completions in real-time for responsive applications and long-form content generation.
135
136
```python { .api }
137
def create(
138
self,
139
*,
140
model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
141
prompt: Union[str, List[str], List[int], List[List[int]], None],
142
stream: Literal[True],
143
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
144
# ... other parameters
145
) -> Stream[Completion]: ...
146
```
147
148
Usage examples:
149
150
```python
151
# Streaming response
152
prompt = "Write a short story about a space explorer:"
153
154
stream = client.completions.create(
155
model="gpt-3.5-turbo-instruct",
156
prompt=prompt,
157
max_tokens=200,
158
temperature=0.8,
159
stream=True
160
)
161
162
print("Story: ", end="")
163
for chunk in stream:
164
if chunk.choices[0].text:
165
print(chunk.choices[0].text, end="", flush=True)
166
print()
167
168
# Stream with usage tracking
169
stream = client.completions.create(
170
model="gpt-3.5-turbo-instruct",
171
prompt="Explain quantum computing:",
172
max_tokens=150,
173
stream=True,
174
stream_options={"include_usage": True}
175
)
176
177
for chunk in stream:
178
if chunk.choices[0].text:
179
print(chunk.choices[0].text, end="")
180
if chunk.usage: # Final chunk
181
print(f"\n\nTokens used: {chunk.usage.total_tokens}")
182
```
183
184
### Advanced Text Completion Parameters
185
186
Fine-tune completion behavior with advanced parameters for specific use cases and output control.
187
188
Usage examples:
189
190
```python
191
# Logprobs for token probability analysis
192
response = client.completions.create(
193
model="gpt-3.5-turbo-instruct",
194
prompt="The capital of France is",
195
max_tokens=10,
196
logprobs=5, # Return top 5 token probabilities
197
echo=True # Include the prompt in the response
198
)
199
200
print("Full text:", response.choices[0].text)
201
print("Token logprobs:", response.choices[0].logprobs.tokens)
202
203
# Best of multiple generations
204
response = client.completions.create(
205
model="gpt-3.5-turbo-instruct",
206
prompt="Write a creative product name for a new smartphone:",
207
max_tokens=20,
208
temperature=0.9,
209
best_of=5, # Generate 5, return the best 1
210
n=1
211
)
212
213
# Suffix completion (fill in the middle)
214
response = client.completions.create(
215
model="gpt-3.5-turbo-instruct",
216
prompt="The weather today is",
217
suffix="and tomorrow will be even better.",
218
max_tokens=10,
219
temperature=0.5
220
)
221
222
print("Complete text:", response.choices[0].text)
223
224
# Frequency and presence penalties
225
response = client.completions.create(
226
model="gpt-3.5-turbo-instruct",
227
prompt="List the benefits of exercise:",
228
max_tokens=150,
229
frequency_penalty=0.5, # Reduce repetition
230
presence_penalty=0.3, # Encourage new topics
231
temperature=0.7
232
)
233
```
234
235
### Token-based Input
236
237
Use tokenized input for precise control over model input and fine-grained prompt engineering.
238
239
Usage examples:
240
241
```python
242
import tiktoken
243
244
# Get tokenizer for the model
245
encoding = tiktoken.encoding_for_model("gpt-3.5-turbo-instruct")
246
247
# Tokenize input
248
text = "Hello, world! How are you today?"
249
tokens = encoding.encode(text)
250
print(f"Tokens: {tokens}")
251
252
# Use tokens as input
253
response = client.completions.create(
254
model="gpt-3.5-turbo-instruct",
255
prompt=tokens, # List of token IDs
256
max_tokens=50,
257
temperature=0.5
258
)
259
260
print("Response:", response.choices[0].text)
261
262
# Multiple prompts with different tokenizations
263
prompts = [
264
encoding.encode("Complete this sentence: The future of AI is"),
265
encoding.encode("Write a haiku about technology:")
266
]
267
268
response = client.completions.create(
269
model="gpt-3.5-turbo-instruct",
270
prompt=prompts, # List of token lists
271
max_tokens=50,
272
n=1,
273
temperature=0.7
274
)
275
276
for i, choice in enumerate(response.choices):
277
print(f"Prompt {i+1} completion: {choice.text}")
278
```
279
280
## Types
281
282
### Core Response Types
283
284
```python { .api }
285
class Completion(BaseModel):
286
id: str
287
choices: List[CompletionChoice]
288
created: int
289
model: str
290
object: Literal["text_completion"]
291
system_fingerprint: Optional[str]
292
usage: Optional[CompletionUsage]
293
294
class CompletionChoice(BaseModel):
295
finish_reason: Literal["stop", "length", "content_filter"]
296
index: int
297
logprobs: Optional[CompletionLogprobs]
298
text: str
299
300
class CompletionLogprobs(BaseModel):
301
text_offset: List[int]
302
token_logprobs: List[Optional[float]]
303
tokens: List[str]
304
top_logprobs: List[Optional[Dict[str, float]]]
305
```
306
307
### Parameter Types
308
309
```python { .api }
310
CompletionCreateParams = TypedDict('CompletionCreateParams', {
311
'model': Required[Union[str, Literal['gpt-3.5-turbo-instruct', 'davinci-002', 'babbage-002']]],
312
'prompt': Required[Union[str, List[str], List[int], List[List[int]], None]],
313
'best_of': NotRequired[Optional[int]],
314
'echo': NotRequired[Optional[bool]],
315
'frequency_penalty': NotRequired[Optional[float]],
316
'logit_bias': NotRequired[Optional[Dict[str, int]]],
317
'logprobs': NotRequired[Optional[int]],
318
'max_tokens': NotRequired[Optional[int]],
319
'n': NotRequired[Optional[int]],
320
'presence_penalty': NotRequired[Optional[float]],
321
'seed': NotRequired[Optional[int]],
322
'stop': NotRequired[Union[Optional[str], List[str], None]],
323
'stream': NotRequired[Optional[bool]],
324
'suffix': NotRequired[Optional[str]],
325
'temperature': NotRequired[Optional[float]],
326
'top_p': NotRequired[Optional[float]],
327
'user': NotRequired[str],
328
}, total=False)
329
330
# Prompt can be various formats
331
PromptParam = Union[
332
str, # Simple text prompt
333
List[str], # Multiple text prompts
334
List[int], # Token IDs
335
List[List[int]], # Multiple token sequences
336
None # No prompt (for suffix-only completion)
337
]
338
```
339
340
### Model Types
341
342
```python { .api }
343
# Supported legacy completion models
344
CompletionModel = Literal[
345
"gpt-3.5-turbo-instruct",
346
"davinci-002",
347
"babbage-002"
348
]
349
```
350
351
### Usage and Configuration
352
353
```python { .api }
354
class CompletionUsage(BaseModel):
355
completion_tokens: int
356
prompt_tokens: int
357
total_tokens: int
358
359
# Parameter ranges and defaults
360
class CompletionParams:
361
temperature: float = 1.0 # 0.0 to 2.0
362
top_p: float = 1.0 # 0.0 to 1.0
363
max_tokens: int = 16 # 1 to model limit
364
frequency_penalty: float = 0.0 # -2.0 to 2.0
365
presence_penalty: float = 0.0 # -2.0 to 2.0
366
n: int = 1 # 1 to 128
367
best_of: int = 1 # 1 to 20
368
logprobs: int = None # 0 to 5
369
```
370
371
## Migration Notes
372
373
The completions API is a legacy interface. For new applications, consider using the chat completions API which offers:
374
375
- Better model performance (GPT-4, GPT-3.5 Turbo)
376
- Structured conversation format
377
- Function calling capabilities
378
- Improved safety and reliability
379
380
To migrate from completions to chat completions:
381
382
```python
383
# Old completions approach
384
response = client.completions.create(
385
model="gpt-3.5-turbo-instruct",
386
prompt="Translate 'Hello' to French:",
387
max_tokens=50
388
)
389
390
# New chat completions approach
391
response = client.chat.completions.create(
392
model="gpt-3.5-turbo",
393
messages=[
394
{"role": "user", "content": "Translate 'Hello' to French:"}
395
],
396
max_tokens=50
397
)
398
```