pypi-openai

Description
Official Python library for the OpenAI API providing chat completions, embeddings, audio, images, and more
Author
tessl
Last updated

How to use

npx @tessl/cli registry install tessl/pypi-openai@1.106.0

chat-completions.md docs/

1
# Chat Completions
2
3
Primary interface for conversational AI using GPT models. Supports streaming responses, function calling, structured outputs, and advanced features like reasoning models.
4
5
## Capabilities
6
7
### Basic Chat Completions
8
9
Generate conversational responses using GPT models with message-based interaction patterns.
10
11
```python { .api }
12
def create(
13
self,
14
*,
15
messages: Iterable[ChatCompletionMessageParam],
16
model: Union[str, ChatModel],
17
audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
18
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
19
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
20
functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
21
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
22
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
23
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
24
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
25
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
26
modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
27
n: Optional[int] | NotGiven = NOT_GIVEN,
28
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
29
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
30
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
31
prompt_cache_key: str | NotGiven = NOT_GIVEN,
32
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
33
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
34
safety_identifier: str | NotGiven = NOT_GIVEN,
35
seed: Optional[int] | NotGiven = NOT_GIVEN,
36
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN,
37
stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN,
38
store: Optional[bool] | NotGiven = NOT_GIVEN,
39
stream: Optional[bool] | NotGiven = NOT_GIVEN,
40
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
41
temperature: Optional[float] | NotGiven = NOT_GIVEN,
42
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
43
tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN,
44
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
45
top_p: Optional[float] | NotGiven = NOT_GIVEN,
46
user: str | NotGiven = NOT_GIVEN,
47
verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
48
web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN,
49
extra_headers: Headers | None = None,
50
extra_query: Query | None = None,
51
extra_body: Body | None = None,
52
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN
53
) -> ChatCompletion | Stream[ChatCompletionChunk]: ...
54
```
55
56
Usage example:
57
58
```python
59
from openai import OpenAI
60
61
client = OpenAI()
62
63
# Simple chat completion
64
response = client.chat.completions.create(
65
model="gpt-4",
66
messages=[
67
{"role": "system", "content": "You are a helpful assistant."},
68
{"role": "user", "content": "What is the capital of France?"}
69
]
70
)
71
72
print(response.choices[0].message.content)
73
74
# With additional parameters
75
response = client.chat.completions.create(
76
model="gpt-4",
77
messages=[
78
{"role": "system", "content": "You are a creative writer."},
79
{"role": "user", "content": "Write a short story about a robot."}
80
],
81
max_tokens=150,
82
temperature=0.8,
83
presence_penalty=0.1,
84
frequency_penalty=0.1
85
)
86
```
87
88
### Streaming Chat Completions
89
90
Stream responses in real-time for better user experience with longer generations.
91
92
```python { .api }
93
def create(
94
self,
95
*,
96
messages: Iterable[ChatCompletionMessageParam],
97
model: Union[str, ChatModel],
98
stream: Literal[True],
99
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
100
# ... other parameters
101
) -> Stream[ChatCompletionChunk]: ...
102
```
103
104
Usage example:
105
106
```python
107
# Streaming response
108
stream = client.chat.completions.create(
109
model="gpt-4",
110
messages=[{"role": "user", "content": "Tell me a long story"}],
111
stream=True
112
)
113
114
print("Response: ", end="")
115
for chunk in stream:
116
if chunk.choices[0].delta.content is not None:
117
print(chunk.choices[0].delta.content, end="")
118
print()
119
120
# With stream options for usage tracking
121
stream = client.chat.completions.create(
122
model="gpt-4",
123
messages=[{"role": "user", "content": "Hello!"}],
124
stream=True,
125
stream_options={"include_usage": True}
126
)
127
128
for chunk in stream:
129
if chunk.usage: # Final chunk contains usage info
130
print(f"Tokens used: {chunk.usage.total_tokens}")
131
```
132
133
### Function Calling
134
135
Enable models to call external functions and tools for enhanced capabilities and structured interactions.
136
137
```python { .api }
138
def create(
139
self,
140
*,
141
messages: Iterable[ChatCompletionMessageParam],
142
model: Union[str, ChatModel],
143
tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN,
144
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
145
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
146
# ... other parameters
147
) -> ChatCompletion: ...
148
```
149
150
Usage example:
151
152
```python
153
import json
154
155
# Define available functions
156
tools = [
157
{
158
"type": "function",
159
"function": {
160
"name": "get_weather",
161
"description": "Get current weather for a location",
162
"parameters": {
163
"type": "object",
164
"properties": {
165
"location": {
166
"type": "string",
167
"description": "City name"
168
},
169
"unit": {
170
"type": "string",
171
"enum": ["celsius", "fahrenheit"],
172
"description": "Temperature unit"
173
}
174
},
175
"required": ["location"]
176
}
177
}
178
}
179
]
180
181
# Function calling conversation
182
messages = [
183
{"role": "user", "content": "What's the weather like in Paris?"}
184
]
185
186
response = client.chat.completions.create(
187
model="gpt-4",
188
messages=messages,
189
tools=tools,
190
tool_choice="auto"
191
)
192
193
# Check if model wants to call a function
194
message = response.choices[0].message
195
if message.tool_calls:
196
# Add the assistant's response to messages
197
messages.append(message)
198
199
# Call the function and add result
200
for tool_call in message.tool_calls:
201
function_args = json.loads(tool_call.function.arguments)
202
203
# Your function implementation
204
weather_result = get_weather(
205
location=function_args["location"],
206
unit=function_args.get("unit", "celsius")
207
)
208
209
messages.append({
210
"tool_call_id": tool_call.id,
211
"role": "tool",
212
"name": "get_weather",
213
"content": json.dumps(weather_result)
214
})
215
216
# Get final response
217
final_response = client.chat.completions.create(
218
model="gpt-4",
219
messages=messages,
220
tools=tools
221
)
222
223
print(final_response.choices[0].message.content)
224
```
225
226
### Structured Outputs
227
228
Generate responses in specific JSON formats using response format specification for reliable data extraction.
229
230
```python { .api }
231
def create(
232
self,
233
*,
234
messages: Iterable[ChatCompletionMessageParam],
235
model: Union[str, ChatModel],
236
response_format: ResponseFormatParam | NotGiven = NOT_GIVEN,
237
# ... other parameters
238
) -> ChatCompletion: ...
239
240
def parse(
241
self,
242
*,
243
messages: Iterable[ChatCompletionMessageParam],
244
model: Union[str, ChatModel],
245
response_format: type[ResponseFormatT],
246
# ... other parameters
247
) -> ParsedChatCompletion[ResponseFormatT]: ...
248
```
249
250
Usage examples:
251
252
```python
253
from pydantic import BaseModel
254
from typing import List
255
256
# JSON Schema response format
257
response = client.chat.completions.create(
258
model="gpt-4",
259
messages=[
260
{"role": "user", "content": "List 3 colors and their hex codes"}
261
],
262
response_format={
263
"type": "json_schema",
264
"json_schema": {
265
"name": "colors",
266
"schema": {
267
"type": "object",
268
"properties": {
269
"colors": {
270
"type": "array",
271
"items": {
272
"type": "object",
273
"properties": {
274
"name": {"type": "string"},
275
"hex": {"type": "string"}
276
},
277
"required": ["name", "hex"],
278
"additionalProperties": False
279
}
280
}
281
},
282
"required": ["colors"],
283
"additionalProperties": False
284
}
285
}
286
}
287
)
288
289
# Parse with Pydantic model
290
class Color(BaseModel):
291
name: str
292
hex: str
293
294
class ColorList(BaseModel):
295
colors: List[Color]
296
297
parsed_response = client.chat.completions.parse(
298
model="gpt-4",
299
messages=[
300
{"role": "user", "content": "List 3 colors and their hex codes"}
301
],
302
response_format=ColorList
303
)
304
305
colors = parsed_response.choices[0].message.parsed
306
print(f"First color: {colors.colors[0].name} - {colors.colors[0].hex}")
307
```
308
309
### Advanced Model Features
310
311
Access advanced capabilities like reasoning, audio modalities, and prediction optimization.
312
313
```python { .api }
314
def create(
315
self,
316
*,
317
messages: Iterable[ChatCompletionMessageParam],
318
model: Union[str, ChatModel],
319
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
320
audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
321
modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN,
322
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
323
# ... other parameters
324
) -> ChatCompletion: ...
325
```
326
327
Usage examples:
328
329
```python
330
# Reasoning models with effort control
331
response = client.chat.completions.create(
332
model="o1-preview",
333
messages=[
334
{"role": "user", "content": "Solve this complex math problem step by step: ..."}
335
],
336
reasoning_effort="high"
337
)
338
339
# Audio input and output
340
response = client.chat.completions.create(
341
model="gpt-4o-audio-preview",
342
modalities=["text", "audio"],
343
audio={"voice": "alloy", "format": "wav"},
344
messages=[
345
{
346
"role": "user",
347
"content": [
348
{"type": "text", "text": "Please respond with audio"},
349
{
350
"type": "input_audio",
351
"input_audio": {
352
"data": audio_data_base64,
353
"format": "wav"
354
}
355
}
356
]
357
}
358
]
359
)
360
361
# Prediction for faster responses
362
response = client.chat.completions.create(
363
model="gpt-4",
364
messages=[{"role": "user", "content": "Complete this code: def fibonacci("}],
365
prediction={
366
"type": "content",
367
"content": "n):\n if n <= 1:\n return n\n return fibonacci(n-1) + fibonacci(n-2)"
368
}
369
)
370
```
371
372
### Message Management
373
374
Access conversation history and message management for multi-turn conversations.
375
376
```python { .api }
377
# Messages sub-resource
378
class Messages:
379
def list(
380
self,
381
thread_id: str,
382
*,
383
after: str | NotGiven = NOT_GIVEN,
384
before: str | NotGiven = NOT_GIVEN,
385
limit: int | NotGiven = NOT_GIVEN,
386
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
387
run_id: str | NotGiven = NOT_GIVEN
388
) -> SyncCursorPage[Message]: ...
389
390
def create(
391
self,
392
thread_id: str,
393
*,
394
content: Union[str, Iterable[MessageContentPartParam]],
395
role: Literal["user", "assistant"],
396
attachments: Optional[Iterable[AttachmentParam]] | NotGiven = NOT_GIVEN,
397
metadata: Optional[object] | NotGiven = NOT_GIVEN
398
) -> Message: ...
399
```
400
401
## Types
402
403
### Core Response Types
404
405
```python { .api }
406
class ChatCompletion(BaseModel):
407
id: str
408
choices: List[ChatCompletionChoice]
409
created: int
410
model: str
411
object: Literal["chat.completion"]
412
service_tier: Optional[Literal["scale", "default"]]
413
system_fingerprint: Optional[str]
414
usage: Optional[CompletionUsage]
415
416
class ChatCompletionChoice(BaseModel):
417
finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"]
418
index: int
419
logprobs: Optional[ChoiceLogprobs]
420
message: ChatCompletionMessage
421
422
class ChatCompletionMessage(BaseModel):
423
content: Optional[str]
424
role: Literal["assistant"]
425
function_call: Optional[FunctionCall]
426
tool_calls: Optional[List[ChatCompletionMessageToolCall]]
427
audio: Optional[ChatCompletionMessageAudio]
428
429
class ChatCompletionChunk(BaseModel):
430
id: str
431
choices: List[ChatCompletionChunkChoice]
432
created: int
433
model: str
434
object: Literal["chat.completion.chunk"]
435
service_tier: Optional[Literal["scale", "default"]]
436
system_fingerprint: Optional[str]
437
usage: Optional[CompletionUsage]
438
439
class ParsedChatCompletion(BaseModel, Generic[ResponseFormatT]):
440
choices: List[ParsedChoice[ResponseFormatT]]
441
created: int
442
id: str
443
model: str
444
object: Literal["chat.completion"]
445
service_tier: Optional[Literal["scale", "default"]]
446
system_fingerprint: Optional[str]
447
usage: Optional[CompletionUsage]
448
```
449
450
### Message Parameter Types
451
452
```python { .api }
453
ChatCompletionMessageParam = Union[
454
ChatCompletionSystemMessageParam,
455
ChatCompletionUserMessageParam,
456
ChatCompletionAssistantMessageParam,
457
ChatCompletionToolMessageParam,
458
ChatCompletionFunctionMessageParam
459
]
460
461
class ChatCompletionSystemMessageParam(TypedDict, total=False):
462
content: Required[str]
463
role: Required[Literal["system"]]
464
name: str
465
466
class ChatCompletionUserMessageParam(TypedDict, total=False):
467
content: Required[Union[str, List[ChatCompletionContentPartParam]]]
468
role: Required[Literal["user"]]
469
name: str
470
471
class ChatCompletionAssistantMessageParam(TypedDict, total=False):
472
role: Required[Literal["assistant"]]
473
content: Optional[str]
474
function_call: FunctionCall
475
name: str
476
tool_calls: Iterable[ChatCompletionMessageToolCallParam]
477
audio: ChatCompletionMessageAudioParam
478
479
class ChatCompletionToolMessageParam(TypedDict, total=False):
480
content: Required[Union[str, List[ChatCompletionContentPartParam]]]
481
role: Required[Literal["tool"]]
482
tool_call_id: Required[str]
483
484
ChatCompletionContentPartParam = Union[
485
ChatCompletionContentPartTextParam,
486
ChatCompletionContentPartImageParam,
487
ChatCompletionContentPartAudioParam,
488
ChatCompletionContentPartRefusalParam
489
]
490
```
491
492
### Tool and Function Types
493
494
```python { .api }
495
ChatCompletionToolUnionParam = Union[
496
ChatCompletionToolParam,
497
ChatCompletionNamedToolChoiceParam
498
]
499
500
class ChatCompletionToolParam(TypedDict, total=False):
501
function: Required[FunctionDefinition]
502
type: Required[Literal["function"]]
503
504
class FunctionDefinition(TypedDict, total=False):
505
name: Required[str]
506
description: str
507
parameters: FunctionParameters
508
strict: Optional[bool]
509
510
class ChatCompletionMessageToolCall(BaseModel):
511
id: str
512
function: Function
513
type: Literal["function"]
514
515
ChatCompletionToolChoiceOptionParam = Union[
516
Literal["none", "auto", "required"],
517
ChatCompletionNamedToolChoiceParam
518
]
519
```
520
521
### Response Format Types
522
523
```python { .api }
524
ResponseFormatParam = Union[
525
ResponseFormatText,
526
ResponseFormatJSONObject,
527
ResponseFormatJSONSchema
528
]
529
530
class ResponseFormatText(TypedDict, total=False):
531
type: Required[Literal["text"]]
532
533
class ResponseFormatJSONObject(TypedDict, total=False):
534
type: Required[Literal["json_object"]]
535
536
class ResponseFormatJSONSchema(TypedDict, total=False):
537
json_schema: Required[JSONSchema]
538
type: Required[Literal["json_schema"]]
539
540
class JSONSchema(TypedDict, total=False):
541
description: str
542
name: Required[str]
543
schema: Optional[Dict[str, object]]
544
strict: Optional[bool]
545
```
546
547
### Audio and Multimodal Types
548
549
```python { .api }
550
class ChatCompletionAudioParam(TypedDict, total=False):
551
voice: Required[AudioVoice]
552
format: Required[AudioFormat]
553
554
AudioVoice = Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]
555
AudioFormat = Literal["wav", "mp3", "flac", "opus"]
556
557
class ChatCompletionContentPartAudioParam(TypedDict, total=False):
558
input_audio: Required[InputAudio]
559
type: Required[Literal["input_audio"]]
560
561
class InputAudio(TypedDict, total=False):
562
data: Required[str] # Base64 encoded audio
563
format: Required[Literal["wav", "mp3"]]
564
```
565
566
### Usage and Metadata Types
567
568
```python { .api }
569
class CompletionUsage(BaseModel):
570
completion_tokens: int
571
prompt_tokens: int
572
total_tokens: int
573
completion_tokens_details: Optional[CompletionTokensDetails]
574
prompt_tokens_details: Optional[PromptTokensDetails]
575
576
class ChatCompletionStreamOptionsParam(TypedDict, total=False):
577
include_usage: bool
578
579
ReasoningEffort = Literal["low", "medium", "high"]
580
581
class ChatCompletionPredictionContentParam(TypedDict, total=False):
582
type: Required[Literal["content"]]
583
content: Required[Union[str, List[ChatCompletionContentPartParam]]]
584
```