0
# Client Usage
1
2
This document covers the core client functionality and methods for the instructor package, including client creation, core methods, hooks, and multimodal support.
3
4
## Type Imports
5
6
```python { .api }
7
from typing import List, Dict, Any, Type, Tuple, Generator, AsyncGenerator
8
from openai.types.chat import ChatCompletionMessageParam
9
from pydantic import BaseModel
10
from tenacity import Retrying, AsyncRetrying
11
from instructor.core.hooks import Hooks
12
```
13
14
## Core Client Classes
15
16
### Instructor
17
18
The primary synchronous client for structured output extraction.
19
20
```python { .api }
21
class Instructor:
22
def __init__(
23
self,
24
client: Any | None,
25
create: Callable[..., Any],
26
mode: instructor.Mode = instructor.Mode.TOOLS,
27
provider: Provider = Provider.OPENAI,
28
hooks: Hooks | None = None,
29
**kwargs: Any
30
) -> None:
31
"""
32
Initialize Instructor client.
33
34
Args:
35
client: The underlying LLM client (OpenAI, Anthropic, etc.)
36
create: The create function to patch
37
mode: The extraction mode to use
38
provider: The LLM provider type
39
hooks: Event hooks for lifecycle management
40
**kwargs: Additional configuration options
41
"""
42
43
def create(
44
self,
45
response_model: Type[BaseModel] | None,
46
messages: List[ChatCompletionMessageParam],
47
max_retries: int | Retrying = 3,
48
validation_context: dict[str, Any] | None = None,
49
context: dict[str, Any] | None = None,
50
strict: bool = True,
51
hooks: Hooks | None = None,
52
**kwargs: Any
53
) -> BaseModel:
54
"""
55
Generate structured output from LLM.
56
57
Args:
58
response_model: Pydantic model to extract (can be None)
59
messages: Chat messages for the LLM
60
max_retries: Number of retry attempts on validation failure
61
validation_context: Context dict for validation (deprecated, use context)
62
context: Context dict for validation and processing
63
strict: Whether to use strict mode for schema validation
64
hooks: Per-call hooks to override client hooks
65
**kwargs: Additional model parameters (model, temperature, etc.)
66
67
Returns:
68
Instance of response_model with extracted data, or Any if response_model is None
69
"""
70
71
def create_partial(
72
self,
73
response_model: Type[BaseModel],
74
messages: List[ChatCompletionMessageParam],
75
max_retries: int | Retrying = 3,
76
validation_context: dict[str, Any] | None = None,
77
context: dict[str, Any] | None = None,
78
strict: bool = True,
79
hooks: Hooks | None = None,
80
**kwargs: Any
81
) -> Generator[BaseModel, None, None]:
82
"""
83
Stream partial results during extraction.
84
85
Args:
86
response_model: Pydantic model to extract
87
messages: Chat messages for the LLM
88
max_retries: Number of retry attempts on validation failure
89
validation_context: Context dict for validation (deprecated, use context)
90
context: Context dict for validation and processing
91
strict: Whether to use strict mode for schema validation
92
hooks: Per-call hooks to override client hooks
93
**kwargs: Additional model parameters (model, temperature, etc.)
94
95
Yields:
96
Partial instances of response_model as they're built
97
"""
98
99
def create_iterable(
100
self,
101
messages: List[ChatCompletionMessageParam],
102
response_model: Type[BaseModel],
103
max_retries: int | Retrying = 3,
104
validation_context: dict[str, Any] | None = None,
105
context: dict[str, Any] | None = None,
106
strict: bool = True,
107
hooks: Hooks | None = None,
108
**kwargs: Any
109
) -> Generator[BaseModel, None, None]:
110
"""
111
Generate multiple structured outputs.
112
113
Args:
114
messages: Chat messages for the LLM
115
response_model: Pydantic model to extract
116
max_retries: Number of retry attempts on validation failure
117
validation_context: Context dict for validation (deprecated, use context)
118
context: Context dict for validation and processing
119
strict: Whether to use strict mode for schema validation
120
hooks: Per-call hooks to override client hooks
121
**kwargs: Additional model parameters (model, temperature, etc.)
122
123
Yields:
124
Generator of response_model instances
125
"""
126
127
def create_with_completion(
128
self,
129
messages: List[ChatCompletionMessageParam],
130
response_model: Type[BaseModel],
131
max_retries: int | Retrying = 3,
132
validation_context: dict[str, Any] | None = None,
133
context: dict[str, Any] | None = None,
134
strict: bool = True,
135
hooks: Hooks | None = None,
136
**kwargs: Any
137
) -> Tuple[BaseModel, Any]:
138
"""
139
Return both structured result and raw completion.
140
141
Args:
142
messages: Chat messages for the LLM
143
response_model: Pydantic model to extract
144
max_retries: Number of retry attempts on validation failure
145
validation_context: Context dict for validation (deprecated, use context)
146
context: Context dict for validation and processing
147
strict: Whether to use strict mode for schema validation
148
hooks: Per-call hooks to override client hooks
149
**kwargs: Additional model parameters (model, temperature, etc.)
150
151
Returns:
152
Tuple of (extracted_model, raw_completion)
153
"""
154
```
155
156
### AsyncInstructor
157
158
The asynchronous client variant with identical API patterns.
159
160
```python { .api }
161
class AsyncInstructor:
162
def __init__(
163
self,
164
client: Any | None,
165
create: Callable[..., Any],
166
mode: instructor.Mode = instructor.Mode.TOOLS,
167
provider: Provider = Provider.OPENAI,
168
hooks: Hooks | None = None,
169
**kwargs: Any
170
) -> None:
171
"""
172
Initialize AsyncInstructor client.
173
174
Args:
175
client: The underlying async LLM client
176
create: The async create function to patch
177
mode: The extraction mode to use
178
provider: The LLM provider type
179
hooks: Event hooks for lifecycle management
180
**kwargs: Additional configuration options
181
"""
182
183
async def create(
184
self,
185
response_model: Type[BaseModel] | None,
186
messages: List[ChatCompletionMessageParam],
187
max_retries: int | AsyncRetrying = 3,
188
validation_context: dict[str, Any] | None = None,
189
context: dict[str, Any] | None = None,
190
strict: bool = True,
191
hooks: Hooks | None = None,
192
**kwargs: Any
193
) -> BaseModel:
194
"""Async version of create method."""
195
196
async def create_partial(
197
self,
198
response_model: Type[BaseModel],
199
messages: List[ChatCompletionMessageParam],
200
max_retries: int | AsyncRetrying = 3,
201
validation_context: dict[str, Any] | None = None,
202
context: dict[str, Any] | None = None,
203
strict: bool = True,
204
hooks: Hooks | None = None,
205
**kwargs: Any
206
) -> AsyncGenerator[BaseModel, None]:
207
"""Async version of create_partial method."""
208
209
async def create_iterable(
210
self,
211
messages: List[ChatCompletionMessageParam],
212
response_model: Type[BaseModel],
213
max_retries: int | AsyncRetrying = 3,
214
validation_context: dict[str, Any] | None = None,
215
context: dict[str, Any] | None = None,
216
strict: bool = True,
217
hooks: Hooks | None = None,
218
**kwargs: Any
219
) -> AsyncGenerator[BaseModel, None]:
220
"""Async version of create_iterable method."""
221
222
async def create_with_completion(
223
self,
224
messages: List[ChatCompletionMessageParam],
225
response_model: Type[BaseModel],
226
max_retries: int | AsyncRetrying = 3,
227
validation_context: dict[str, Any] | None = None,
228
context: dict[str, Any] | None = None,
229
strict: bool = True,
230
hooks: Hooks | None = None,
231
**kwargs: Any
232
) -> Tuple[BaseModel, Any]:
233
"""Async version of create_with_completion method."""
234
```
235
236
## Hook Management
237
238
Both client classes support event hooks for lifecycle management:
239
240
```python { .api }
241
from instructor.core.hooks import HookName
242
from typing import Callable, Literal
243
244
class Instructor:
245
def on(
246
self,
247
hook_name: (
248
HookName
249
| Literal[
250
"completion:kwargs",
251
"completion:response",
252
"completion:error",
253
"completion:last_attempt",
254
"parse:error",
255
]
256
),
257
handler: Callable[[Any], None],
258
) -> None:
259
"""
260
Register event hook.
261
262
Args:
263
hook_name: Hook name to listen for (specific hook names)
264
handler: Function to call when event occurs
265
"""
266
267
def off(
268
self,
269
hook_name: str,
270
handler: Callable[[Any], None] | None = None
271
) -> None:
272
"""
273
Unregister event hook.
274
275
Args:
276
hook_name: Hook name to stop listening for
277
handler: Specific handler to remove (None for all)
278
"""
279
280
def clear(self) -> None:
281
"""Remove all registered hooks."""
282
```
283
284
## Usage Examples
285
286
### Basic Extraction
287
288
```python { .api }
289
import instructor
290
from openai import OpenAI
291
from pydantic import BaseModel
292
293
class Person(BaseModel):
294
name: str
295
age: int
296
occupation: str
297
298
client = instructor.from_openai(OpenAI())
299
300
person = client.create(
301
model="gpt-4",
302
messages=[{
303
"role": "user",
304
"content": "Extract info: Sarah Chen, 28, software engineer"
305
}],
306
response_model=Person
307
)
308
309
print(f"{person.name} is {person.age} years old")
310
```
311
312
### Streaming Partial Results
313
314
```python { .api }
315
from instructor import Partial
316
317
class LongResponse(BaseModel):
318
title: str
319
summary: str
320
key_points: List[str]
321
conclusion: str
322
323
client = instructor.from_openai(OpenAI())
324
325
for partial in client.create_partial(
326
model="gpt-4",
327
messages=[{
328
"role": "user",
329
"content": "Write a detailed analysis of climate change impacts"
330
}],
331
response_model=Partial[LongResponse]
332
):
333
if partial.title:
334
print(f"Title: {partial.title}")
335
if partial.summary:
336
print(f"Summary: {partial.summary}")
337
if partial.key_points:
338
print(f"Points so far: {len(partial.key_points)}")
339
```
340
341
### Iterable Extraction
342
343
```python { .api }
344
class Task(BaseModel):
345
name: str
346
priority: str
347
estimated_hours: int
348
349
tasks = client.create_iterable(
350
model="gpt-4",
351
messages=[{
352
"role": "user",
353
"content": "Extract all tasks from: Fix bugs (high, 4h), Write docs (medium, 2h), Review code (low, 1h)"
354
}],
355
response_model=Task
356
)
357
358
for task in tasks:
359
print(f"{task.name}: {task.priority} priority, {task.estimated_hours}h")
360
```
361
362
### Access to Raw Completion
363
364
```python { .api }
365
result, completion = client.create_with_completion(
366
model="gpt-4",
367
messages=[{"role": "user", "content": "Extract user data: John, 30, teacher"}],
368
response_model=Person
369
)
370
371
print(f"Extracted: {result}")
372
print(f"Raw completion: {completion}")
373
print(f"Token usage: {completion.usage}")
374
```
375
376
## Multimodal Support
377
378
### Image Processing
379
380
```python { .api }
381
from instructor import Image
382
383
class ImageDescription(BaseModel):
384
objects: List[str]
385
scene: str
386
mood: str
387
colors: List[str]
388
389
# From URL
390
image = Image.from_url("https://example.com/photo.jpg")
391
392
# From local path
393
image = Image.from_path("/path/to/image.png")
394
395
# From base64 string
396
image = Image.from_base64("iVBORw0KGgoAAAANSUhEUgAAAA...")
397
398
# Auto-detect source
399
image = Image.autodetect("https://example.com/photo.jpg")
400
401
description = client.create(
402
model="gpt-4-vision-preview",
403
messages=[{
404
"role": "user",
405
"content": [
406
{"type": "text", "text": "Describe this image in detail"},
407
{"type": "image_url", "image_url": {"url": image.to_openai()}}
408
]
409
}],
410
response_model=ImageDescription
411
)
412
```
413
414
### Audio Processing
415
416
```python { .api }
417
from instructor import Audio
418
419
class AudioTranscription(BaseModel):
420
transcript: str
421
language: str
422
confidence: float
423
speaker_count: int
424
425
# From local audio file
426
audio = Audio.from_path("/path/to/audio.wav")
427
428
# Convert for different providers
429
openai_audio = audio.to_openai()
430
anthropic_audio = audio.to_anthropic()
431
432
transcription = client.create(
433
model="whisper-1",
434
messages=[{
435
"role": "user",
436
"content": f"Transcribe and analyze: {openai_audio}"
437
}],
438
response_model=AudioTranscription
439
)
440
```
441
442
### Provider-Specific Image Conversion
443
444
```python { .api }
445
from instructor import Image
446
447
image = Image.from_path("/path/to/image.jpg")
448
449
# OpenAI format
450
openai_format = image.to_openai() # Returns base64 data URL
451
452
# Anthropic format
453
anthropic_format = image.to_anthropic() # Returns Anthropic image format
454
455
# Google GenAI format
456
genai_format = image.to_genai() # Returns GenAI image part
457
```
458
459
## Error Handling
460
461
```python { .api }
462
from instructor.exceptions import InstructorError
463
from pydantic import ValidationError
464
465
try:
466
result = client.create(
467
model="gpt-4",
468
messages=[{"role": "user", "content": "Invalid data"}],
469
response_model=Person
470
)
471
except ValidationError as e:
472
print(f"Validation failed: {e}")
473
except InstructorError as e:
474
print(f"Instructor error: {e}")
475
except Exception as e:
476
print(f"Unexpected error: {e}")
477
```
478
479
## Hook Usage
480
481
```python { .api }
482
def on_completion(completion):
483
print(f"Completion received: {completion.usage}")
484
485
def on_error(error):
486
print(f"Error occurred: {error}")
487
488
client.on("completion", on_completion)
489
client.on("error", on_error)
490
491
# Use client normally - hooks will be called
492
result = client.create(
493
model="gpt-4",
494
messages=[{"role": "user", "content": "Hello"}],
495
response_model=Person
496
)
497
498
# Clean up hooks
499
client.off("completion", on_completion)
500
client.clear() # Remove all hooks
501
```