pypi-openai

Description
Official Python library for the OpenAI API providing chat completions, embeddings, audio, images, and more
Author
tessl
Last updated

How to use

npx @tessl/cli registry install tessl/pypi-openai@1.106.0

client-setup.md docs/

1
# Client Setup and Configuration
2
3
Client initialization, authentication, configuration options, and Azure integration for both synchronous and asynchronous usage patterns.
4
5
## Capabilities
6
7
### Basic Client Initialization
8
9
Create OpenAI client instances with API key authentication and basic configuration options.
10
11
```python { .api }
12
class OpenAI:
13
def __init__(
14
self,
15
*,
16
api_key: str | None = None,
17
organization: str | None = None,
18
project: str | None = None,
19
base_url: str | httpx.URL | None = None,
20
timeout: Union[float, Timeout, None] = None,
21
max_retries: int = 2,
22
default_headers: Mapping[str, str] | None = None,
23
default_query: Mapping[str, object] | None = None,
24
http_client: httpx.Client | None = None
25
): ...
26
```
27
28
Usage example:
29
30
```python
31
import openai
32
from openai import OpenAI
33
34
# Basic initialization with API key
35
client = OpenAI(api_key="your-api-key-here")
36
37
# With organization and project
38
client = OpenAI(
39
api_key="your-api-key-here",
40
organization="org-your-org-id",
41
project="proj-your-project-id"
42
)
43
44
# With custom configuration
45
client = OpenAI(
46
api_key="your-api-key-here",
47
base_url="https://api.openai.com/v1",
48
timeout=30.0,
49
max_retries=3,
50
default_headers={"User-Agent": "MyApp/1.0"}
51
)
52
```
53
54
### Async Client Initialization
55
56
Create asynchronous OpenAI client instances for concurrent operations and async/await usage patterns.
57
58
```python { .api }
59
class AsyncOpenAI:
60
def __init__(
61
self,
62
*,
63
api_key: str | Callable[[], Awaitable[str]] | None = None,
64
organization: str | None = None,
65
project: str | None = None,
66
base_url: str | httpx.URL | None = None,
67
timeout: Union[float, Timeout, None] = None,
68
max_retries: int = 2,
69
default_headers: Mapping[str, str] | None = None,
70
default_query: Mapping[str, object] | None = None,
71
http_client: httpx.AsyncClient | None = None
72
): ...
73
```
74
75
Usage example:
76
77
```python
78
import asyncio
79
from openai import AsyncOpenAI
80
81
async def main():
82
# Basic async client
83
client = AsyncOpenAI(api_key="your-api-key-here")
84
85
# Multiple concurrent requests
86
tasks = []
87
for i in range(5):
88
task = client.chat.completions.create(
89
model="gpt-3.5-turbo",
90
messages=[{"role": "user", "content": f"Hello {i}"}]
91
)
92
tasks.append(task)
93
94
responses = await asyncio.gather(*tasks)
95
96
await client.close() # Clean up resources
97
98
asyncio.run(main())
99
```
100
101
### Azure OpenAI Integration
102
103
Azure-specific client configuration for OpenAI services hosted on Microsoft Azure with Azure Active Directory authentication.
104
105
```python { .api }
106
class AzureOpenAI:
107
def __init__(
108
self,
109
*,
110
azure_endpoint: str | None = None,
111
azure_deployment: str | None = None,
112
api_version: str | None = None,
113
api_key: str | Callable[[], str] | None = None,
114
azure_ad_token: str | None = None,
115
azure_ad_token_provider: AzureADTokenProvider | None = None,
116
organization: str | None = None,
117
project: str | None = None,
118
base_url: str | httpx.URL | None = None,
119
timeout: Union[float, Timeout, None] = None,
120
max_retries: int = 2,
121
default_headers: Mapping[str, str] | None = None,
122
default_query: Mapping[str, object] | None = None,
123
http_client: httpx.Client | None = None
124
): ...
125
126
class AsyncAzureOpenAI:
127
def __init__(
128
self,
129
*,
130
azure_endpoint: str | None = None,
131
azure_deployment: str | None = None,
132
api_version: str | None = None,
133
api_key: str | Callable[[], Awaitable[str]] | None = None,
134
azure_ad_token: str | None = None,
135
azure_ad_token_provider: AsyncAzureADTokenProvider | None = None,
136
organization: str | None = None,
137
project: str | None = None,
138
base_url: str | httpx.URL | None = None,
139
timeout: Union[float, Timeout, None] = None,
140
max_retries: int = 2,
141
default_headers: Mapping[str, str] | None = None,
142
default_query: Mapping[str, object] | None = None,
143
http_client: httpx.AsyncClient | None = None
144
): ...
145
```
146
147
Usage examples:
148
149
```python
150
from openai import AzureOpenAI
151
152
# Basic Azure configuration with API key
153
client = AzureOpenAI(
154
azure_endpoint="https://your-endpoint.openai.azure.com/",
155
api_key="your-azure-api-key",
156
api_version="2024-02-15-preview"
157
)
158
159
# With Azure AD token
160
client = AzureOpenAI(
161
azure_endpoint="https://your-endpoint.openai.azure.com/",
162
azure_ad_token="your-azure-ad-token",
163
api_version="2024-02-15-preview"
164
)
165
166
# With Azure AD token provider
167
def get_azure_ad_token():
168
# Your token acquisition logic here
169
return "token-from-azure-ad"
170
171
client = AzureOpenAI(
172
azure_endpoint="https://your-endpoint.openai.azure.com/",
173
azure_ad_token_provider=get_azure_ad_token,
174
api_version="2024-02-15-preview"
175
)
176
177
# With specific deployment
178
client = AzureOpenAI(
179
azure_endpoint="https://your-endpoint.openai.azure.com/",
180
azure_deployment="your-deployment-name",
181
api_key="your-azure-api-key",
182
api_version="2024-02-15-preview"
183
)
184
```
185
186
### Environment-Based Configuration
187
188
Automatic configuration using environment variables for convenient deployment and credential management.
189
190
The library automatically reads configuration from environment variables:
191
192
- `OPENAI_API_KEY``api_key`
193
- `OPENAI_ORG_ID``organization`
194
- `OPENAI_PROJECT_ID``project`
195
- `OPENAI_BASE_URL``base_url`
196
- `AZURE_OPENAI_ENDPOINT``azure_endpoint`
197
- `AZURE_OPENAI_API_KEY` → Azure API key
198
- `OPENAI_API_VERSION``api_version`
199
200
Usage example:
201
202
```python
203
import os
204
from openai import OpenAI, AzureOpenAI
205
206
# Set environment variables
207
os.environ["OPENAI_API_KEY"] = "your-api-key"
208
os.environ["OPENAI_ORG_ID"] = "your-org-id"
209
210
# Client automatically uses environment variables
211
client = OpenAI() # No need to pass api_key explicitly
212
213
# Azure with environment variables
214
os.environ["AZURE_OPENAI_ENDPOINT"] = "https://your-endpoint.openai.azure.com/"
215
os.environ["AZURE_OPENAI_API_KEY"] = "your-azure-key"
216
217
azure_client = AzureOpenAI() # Automatically configured
218
```
219
220
### Module-Level Configuration
221
222
Direct API access through module-level configuration for simple usage patterns without explicit client instantiation.
223
224
```python { .api }
225
# Module-level configuration variables
226
api_key: str | None = None
227
organization: str | None = None
228
project: str | None = None
229
base_url: str | None = None
230
timeout: float | None = None
231
max_retries: int = 2
232
default_headers: dict[str, str] | None = None
233
```
234
235
Usage example:
236
237
```python
238
import openai
239
240
# Configure at module level
241
openai.api_key = "your-api-key"
242
openai.organization = "your-org-id"
243
244
# Direct API access without client instantiation
245
response = openai.chat.completions.create(
246
model="gpt-3.5-turbo",
247
messages=[{"role": "user", "content": "Hello!"}]
248
)
249
250
# Other APIs work similarly
251
embeddings = openai.embeddings.create(
252
model="text-embedding-ada-002",
253
input="Text to embed"
254
)
255
```
256
257
### Client Configuration Options
258
259
Advanced configuration options for timeout, retries, HTTP behavior, and custom headers.
260
261
```python { .api }
262
class Timeout:
263
def __init__(
264
self,
265
total: float | None = None,
266
connect: float | None = None,
267
read: float | None = None,
268
write: float | None = None,
269
pool: float | None = None
270
): ...
271
```
272
273
Usage examples:
274
275
```python
276
from openai import OpenAI, Timeout
277
import httpx
278
279
# Custom timeout configuration
280
timeout = Timeout(
281
total=60.0, # Total timeout for the request
282
connect=10.0, # Connection timeout
283
read=30.0, # Read timeout
284
write=10.0 # Write timeout
285
)
286
287
client = OpenAI(
288
api_key="your-api-key",
289
timeout=timeout,
290
max_retries=5,
291
default_headers={
292
"User-Agent": "MyApplication/1.0",
293
"X-Custom-Header": "custom-value"
294
}
295
)
296
297
# Custom HTTP client configuration
298
http_client = httpx.Client(
299
limits=httpx.Limits(
300
max_connections=100,
301
max_keepalive_connections=20
302
),
303
proxies="http://proxy.example.com:8080"
304
)
305
306
client = OpenAI(
307
api_key="your-api-key",
308
http_client=http_client
309
)
310
311
# Custom base URL for OpenAI-compatible APIs
312
client = OpenAI(
313
api_key="your-api-key",
314
base_url="https://api.example.com/v1"
315
)
316
```
317
318
### Client Methods and Properties
319
320
Client manipulation methods and response wrapper access for advanced usage patterns.
321
322
```python { .api }
323
class OpenAI:
324
def copy(self, **kwargs) -> "OpenAI": ...
325
def with_options(self, **kwargs) -> "OpenAI": ...
326
327
@property
328
def with_raw_response(self) -> "OpenAIWithRawResponse": ...
329
330
@property
331
def with_streaming_response(self) -> "OpenAIWithStreamedResponse": ...
332
```
333
334
Usage examples:
335
336
```python
337
from openai import OpenAI
338
339
client = OpenAI(api_key="your-api-key")
340
341
# Create client copy with different configuration
342
client_with_timeout = client.copy(timeout=30.0)
343
344
# Access raw HTTP responses
345
raw_client = client.with_raw_response
346
raw_response = raw_client.chat.completions.create(
347
model="gpt-3.5-turbo",
348
messages=[{"role": "user", "content": "Hello!"}]
349
)
350
351
print(f"Status: {raw_response.status_code}")
352
print(f"Headers: {raw_response.headers}")
353
parsed_response = raw_response.parse()
354
355
# Access streaming responses
356
streaming_client = client.with_streaming_response
357
stream_response = streaming_client.chat.completions.create(
358
model="gpt-3.5-turbo",
359
messages=[{"role": "user", "content": "Hello!"}],
360
stream=True
361
)
362
363
for chunk in stream_response:
364
if chunk.choices[0].delta.content:
365
print(chunk.choices[0].delta.content, end="")
366
```
367
368
## Types
369
370
### Configuration Types
371
372
```python { .api }
373
AzureADTokenProvider = Callable[[], str]
374
AsyncAzureADTokenProvider = Callable[[], str | Awaitable[str]]
375
376
class Timeout:
377
total: float | None
378
connect: float | None
379
read: float | None
380
write: float | None
381
pool: float | None
382
383
RequestOptions = TypedDict('RequestOptions', {
384
'extra_headers': NotRequired[Headers],
385
'extra_query': NotRequired[Query],
386
'extra_body': NotRequired[Body],
387
'timeout': NotRequired[float | httpx.Timeout | None],
388
}, total=False)
389
```
390
391
### Exception Types
392
393
```python { .api }
394
class MutuallyExclusiveAuthError(OpenAIError):
395
"""Raised when multiple Azure authentication methods are provided"""
396
```
397
398
### Constants
399
400
```python { .api }
401
DEFAULT_TIMEOUT: float = 600.0
402
DEFAULT_MAX_RETRIES: int = 2
403
DEFAULT_CONNECTION_LIMITS: dict = {
404
"max_connections": 1000,
405
"max_keepalive_connections": 100
406
}
407
```