0
# Batch Processing
1
2
Process multiple Messages API requests efficiently using the Message Batches API. Batch processing allows you to submit up to 10,000 message creation requests at once, with results delivered within 24 hours at 50% reduced cost compared to standard API requests.
3
4
## Core Imports
5
6
```python
7
from anthropic import Anthropic
8
```
9
10
For async usage:
11
12
```python
13
from anthropic import AsyncAnthropic
14
```
15
16
## Basic Usage
17
18
```python
19
from anthropic import Anthropic
20
21
client = Anthropic()
22
23
# Create a batch with multiple message requests
24
batch = client.messages.batches.create(
25
requests=[
26
{
27
"custom_id": "request-1",
28
"params": {
29
"model": "claude-3-5-sonnet-20241022",
30
"max_tokens": 1024,
31
"messages": [
32
{"role": "user", "content": "What is the capital of France?"}
33
]
34
}
35
},
36
{
37
"custom_id": "request-2",
38
"params": {
39
"model": "claude-3-5-sonnet-20241022",
40
"max_tokens": 1024,
41
"messages": [
42
{"role": "user", "content": "What is the capital of Spain?"}
43
]
44
}
45
}
46
]
47
)
48
49
print(f"Batch ID: {batch.id}")
50
print(f"Status: {batch.processing_status}")
51
52
# Poll for completion
53
import time
54
while batch.processing_status == "in_progress":
55
time.sleep(60) # Check every minute
56
batch = client.messages.batches.retrieve(batch.id)
57
print(f"Status: {batch.processing_status}, Counts: {batch.request_counts}")
58
59
# Retrieve results once processing ends
60
if batch.results_url:
61
results = client.messages.batches.results(batch.id)
62
for result in results:
63
print(f"Request {result.custom_id}:")
64
if result.result.type == "succeeded":
65
print(f" Response: {result.result.message.content}")
66
elif result.result.type == "errored":
67
print(f" Error: {result.result.error}")
68
```
69
70
## Capabilities
71
72
### Create Batch
73
74
Submit a batch of message creation requests for asynchronous processing.
75
76
```python { .api }
77
def create(
78
*,
79
requests: Iterable[Request],
80
extra_headers: Headers | None = None,
81
extra_query: Query | None = None,
82
extra_body: Body | None = None,
83
timeout: float | httpx.Timeout | None | NotGiven = not_given,
84
) -> MessageBatch:
85
"""
86
Send a batch of Message creation requests.
87
88
The Message Batches API can be used to process multiple Messages API
89
requests at once. Once a Message Batch is created, it begins processing
90
immediately. Batches can take up to 24 hours to complete.
91
92
Args:
93
requests: List of requests for prompt completion. Each is an
94
individual request to create a Message.
95
extra_headers: Send extra headers
96
extra_query: Add additional query parameters to the request
97
extra_body: Add additional JSON properties to the request
98
timeout: Override the client-level default timeout for this request,
99
in seconds
100
101
Returns:
102
MessageBatch: The created batch object with processing status
103
"""
104
```
105
106
Async version:
107
108
```python { .api }
109
async def create(
110
*,
111
requests: Iterable[Request],
112
extra_headers: Headers | None = None,
113
extra_query: Query | None = None,
114
extra_body: Body | None = None,
115
timeout: float | httpx.Timeout | None | NotGiven = not_given,
116
) -> MessageBatch:
117
"""Async version of create()"""
118
```
119
120
Example:
121
122
```python
123
from anthropic import Anthropic
124
125
client = Anthropic()
126
127
batch = client.messages.batches.create(
128
requests=[
129
{
130
"custom_id": "analysis-1",
131
"params": {
132
"model": "claude-3-5-sonnet-20241022",
133
"max_tokens": 2048,
134
"messages": [
135
{
136
"role": "user",
137
"content": "Analyze the sentiment of: 'I love this product!'"
138
}
139
]
140
}
141
},
142
{
143
"custom_id": "analysis-2",
144
"params": {
145
"model": "claude-3-5-sonnet-20241022",
146
"max_tokens": 2048,
147
"messages": [
148
{
149
"role": "user",
150
"content": "Analyze the sentiment of: 'This is disappointing.'"
151
}
152
]
153
}
154
}
155
]
156
)
157
158
print(f"Created batch: {batch.id}")
159
print(f"Status: {batch.processing_status}")
160
print(f"Expires at: {batch.expires_at}")
161
```
162
163
### Retrieve Batch Status
164
165
Get the current status and metadata of a batch. This endpoint is idempotent and can be used to poll for completion.
166
167
```python { .api }
168
def retrieve(
169
message_batch_id: str,
170
*,
171
extra_headers: Headers | None = None,
172
extra_query: Query | None = None,
173
extra_body: Body | None = None,
174
timeout: float | httpx.Timeout | None | NotGiven = not_given,
175
) -> MessageBatch:
176
"""
177
This endpoint is idempotent and can be used to poll for Message Batch
178
completion.
179
180
To access the results of a Message Batch, make a request to the
181
`results_url` field in the response.
182
183
Args:
184
message_batch_id: ID of the Message Batch
185
extra_headers: Send extra headers
186
extra_query: Add additional query parameters to the request
187
extra_body: Add additional JSON properties to the request
188
timeout: Override the client-level default timeout for this request,
189
in seconds
190
191
Returns:
192
MessageBatch: The batch object with current status
193
"""
194
```
195
196
Async version:
197
198
```python { .api }
199
async def retrieve(
200
message_batch_id: str,
201
*,
202
extra_headers: Headers | None = None,
203
extra_query: Query | None = None,
204
extra_body: Body | None = None,
205
timeout: float | httpx.Timeout | None | NotGiven = not_given,
206
) -> MessageBatch:
207
"""Async version of retrieve()"""
208
```
209
210
Example:
211
212
```python
213
import time
214
from anthropic import Anthropic
215
216
client = Anthropic()
217
batch_id = "msgbatch_01ABC123"
218
219
# Poll until processing completes
220
while True:
221
batch = client.messages.batches.retrieve(batch_id)
222
223
print(f"Status: {batch.processing_status}")
224
print(f"Request counts: {batch.request_counts}")
225
226
if batch.processing_status == "ended":
227
print(f"Batch completed!")
228
print(f"Succeeded: {batch.request_counts.succeeded}")
229
print(f"Errored: {batch.request_counts.errored}")
230
print(f"Canceled: {batch.request_counts.canceled}")
231
print(f"Expired: {batch.request_counts.expired}")
232
break
233
234
if batch.processing_status == "canceling":
235
print("Batch is being canceled...")
236
237
time.sleep(60) # Wait 1 minute before checking again
238
```
239
240
### List Batches
241
242
List all message batches in a workspace, with most recently created batches returned first.
243
244
```python { .api }
245
def list(
246
*,
247
after_id: str | Omit = omit,
248
before_id: str | Omit = omit,
249
limit: int | Omit = omit,
250
extra_headers: Headers | None = None,
251
extra_query: Query | None = None,
252
extra_body: Body | None = None,
253
timeout: float | httpx.Timeout | None | NotGiven = not_given,
254
) -> SyncPage[MessageBatch]:
255
"""
256
List all Message Batches within a Workspace. Most recently created
257
batches are returned first.
258
259
Args:
260
after_id: ID of the object to use as a cursor for pagination. When
261
provided, returns the page of results immediately after this
262
object.
263
before_id: ID of the object to use as a cursor for pagination. When
264
provided, returns the page of results immediately before this
265
object.
266
limit: Number of items to return per page. Defaults to 20. Ranges
267
from 1 to 1000.
268
extra_headers: Send extra headers
269
extra_query: Add additional query parameters to the request
270
extra_body: Add additional JSON properties to the request
271
timeout: Override the client-level default timeout for this request,
272
in seconds
273
274
Returns:
275
SyncPage[MessageBatch]: Paginated list of batch objects
276
"""
277
```
278
279
Async version:
280
281
```python { .api }
282
def list(
283
*,
284
after_id: str | Omit = omit,
285
before_id: str | Omit = omit,
286
limit: int | Omit = omit,
287
extra_headers: Headers | None = None,
288
extra_query: Query | None = None,
289
extra_body: Body | None = None,
290
timeout: float | httpx.Timeout | None | NotGiven = not_given,
291
) -> AsyncPaginator[MessageBatch, AsyncPage[MessageBatch]]:
292
"""Async version of list()"""
293
```
294
295
Example:
296
297
```python
298
from anthropic import Anthropic
299
300
client = Anthropic()
301
302
# List all batches (paginated)
303
page = client.messages.batches.list(limit=20)
304
305
for batch in page.data:
306
print(f"Batch {batch.id}:")
307
print(f" Status: {batch.processing_status}")
308
print(f" Created: {batch.created_at}")
309
print(f" Requests: {batch.request_counts}")
310
311
# Paginate through results
312
if page.has_next_page():
313
next_page = client.messages.batches.list(
314
limit=20,
315
after_id=page.data[-1].id
316
)
317
```
318
319
### Cancel Batch
320
321
Cancel a batch that is currently processing. Batches may be canceled any time before processing ends.
322
323
```python { .api }
324
def cancel(
325
message_batch_id: str,
326
*,
327
extra_headers: Headers | None = None,
328
extra_query: Query | None = None,
329
extra_body: Body | None = None,
330
timeout: float | httpx.Timeout | None | NotGiven = not_given,
331
) -> MessageBatch:
332
"""
333
Batches may be canceled any time before processing ends. Once
334
cancellation is initiated, the batch enters a `canceling` state, at
335
which time the system may complete any in-progress, non-interruptible
336
requests before finalizing cancellation.
337
338
The number of canceled requests is specified in `request_counts`. To
339
determine which requests were canceled, check the individual results
340
within the batch. Note that cancellation may not result in any canceled
341
requests if they were non-interruptible.
342
343
Args:
344
message_batch_id: ID of the Message Batch
345
extra_headers: Send extra headers
346
extra_query: Add additional query parameters to the request
347
extra_body: Add additional JSON properties to the request
348
timeout: Override the client-level default timeout for this request,
349
in seconds
350
351
Returns:
352
MessageBatch: The batch object with updated status
353
"""
354
```
355
356
Async version:
357
358
```python { .api }
359
async def cancel(
360
message_batch_id: str,
361
*,
362
extra_headers: Headers | None = None,
363
extra_query: Query | None = None,
364
extra_body: Body | None = None,
365
timeout: float | httpx.Timeout | None | NotGiven = not_given,
366
) -> MessageBatch:
367
"""Async version of cancel()"""
368
```
369
370
Example:
371
372
```python
373
from anthropic import Anthropic
374
375
client = Anthropic()
376
377
# Cancel a batch
378
batch_id = "msgbatch_01ABC123"
379
batch = client.messages.batches.cancel(batch_id)
380
381
print(f"Cancellation initiated: {batch.cancel_initiated_at}")
382
print(f"Status: {batch.processing_status}")
383
384
# Continue polling to see final cancellation results
385
import time
386
while batch.processing_status == "canceling":
387
time.sleep(10)
388
batch = client.messages.batches.retrieve(batch_id)
389
390
print(f"Final counts: {batch.request_counts}")
391
print(f"Canceled requests: {batch.request_counts.canceled}")
392
```
393
394
### Delete Batch
395
396
Delete a completed batch. Batches can only be deleted once they have finished processing.
397
398
```python { .api }
399
def delete(
400
message_batch_id: str,
401
*,
402
extra_headers: Headers | None = None,
403
extra_query: Query | None = None,
404
extra_body: Body | None = None,
405
timeout: float | httpx.Timeout | None | NotGiven = not_given,
406
) -> DeletedMessageBatch:
407
"""
408
Delete a Message Batch.
409
410
Message Batches can only be deleted once they've finished processing.
411
If you'd like to delete an in-progress batch, you must first cancel it.
412
413
Args:
414
message_batch_id: ID of the Message Batch
415
extra_headers: Send extra headers
416
extra_query: Add additional query parameters to the request
417
extra_body: Add additional JSON properties to the request
418
timeout: Override the client-level default timeout for this request,
419
in seconds
420
421
Returns:
422
DeletedMessageBatch: Confirmation of deletion
423
"""
424
```
425
426
Async version:
427
428
```python { .api }
429
async def delete(
430
message_batch_id: str,
431
*,
432
extra_headers: Headers | None = None,
433
extra_query: Query | None = None,
434
extra_body: Body | None = None,
435
timeout: float | httpx.Timeout | None | NotGiven = not_given,
436
) -> DeletedMessageBatch:
437
"""Async version of delete()"""
438
```
439
440
Example:
441
442
```python
443
from anthropic import Anthropic
444
445
client = Anthropic()
446
447
batch_id = "msgbatch_01ABC123"
448
449
# Ensure batch is finished before deleting
450
batch = client.messages.batches.retrieve(batch_id)
451
if batch.processing_status != "ended":
452
print("Batch must finish processing before deletion")
453
# Optionally cancel first
454
client.messages.batches.cancel(batch_id)
455
else:
456
# Delete the batch
457
result = client.messages.batches.delete(batch_id)
458
print(f"Deleted batch: {result.id}")
459
```
460
461
### Retrieve Batch Results
462
463
Stream the results of a completed batch as a JSONL file. Each line contains the result of a single request.
464
465
```python { .api }
466
def results(
467
message_batch_id: str,
468
*,
469
extra_headers: Headers | None = None,
470
extra_query: Query | None = None,
471
extra_body: Body | None = None,
472
timeout: float | httpx.Timeout | None | NotGiven = not_given,
473
) -> JSONLDecoder[MessageBatchIndividualResponse]:
474
"""
475
Streams the results of a Message Batch as a `.jsonl` file.
476
477
Each line in the file is a JSON object containing the result of a single
478
request in the Message Batch. Results are not guaranteed to be in the
479
same order as requests. Use the `custom_id` field to match results to
480
requests.
481
482
Args:
483
message_batch_id: ID of the Message Batch
484
extra_headers: Send extra headers
485
extra_query: Add additional query parameters to the request
486
extra_body: Add additional JSON properties to the request
487
timeout: Override the client-level default timeout for this request,
488
in seconds
489
490
Returns:
491
JSONLDecoder[MessageBatchIndividualResponse]: Iterator of batch
492
result objects
493
494
Raises:
495
AnthropicError: If the batch has no results_url (not yet finished
496
processing)
497
"""
498
```
499
500
Async version:
501
502
```python { .api }
503
async def results(
504
message_batch_id: str,
505
*,
506
extra_headers: Headers | None = None,
507
extra_query: Query | None = None,
508
extra_body: Body | None = None,
509
timeout: float | httpx.Timeout | None | NotGiven = not_given,
510
) -> AsyncJSONLDecoder[MessageBatchIndividualResponse]:
511
"""Async version of results()"""
512
```
513
514
Example:
515
516
```python
517
from anthropic import Anthropic
518
519
client = Anthropic()
520
521
batch_id = "msgbatch_01ABC123"
522
523
# Check if batch is complete
524
batch = client.messages.batches.retrieve(batch_id)
525
if batch.processing_status != "ended":
526
print("Batch is still processing...")
527
else:
528
# Stream and process results
529
results = client.messages.batches.results(batch_id)
530
531
succeeded = []
532
failed = []
533
534
for result in results:
535
custom_id = result.custom_id
536
537
if result.result.type == "succeeded":
538
message = result.result.message
539
succeeded.append({
540
"id": custom_id,
541
"content": message.content,
542
"usage": message.usage
543
})
544
elif result.result.type == "errored":
545
error = result.result.error
546
failed.append({
547
"id": custom_id,
548
"error_type": error.type,
549
"message": error.message
550
})
551
elif result.result.type == "canceled":
552
print(f"Request {custom_id} was canceled")
553
elif result.result.type == "expired":
554
print(f"Request {custom_id} expired")
555
556
print(f"Successful: {len(succeeded)}")
557
print(f"Failed: {len(failed)}")
558
```
559
560
## Types
561
562
### MessageBatch
563
564
Represents a batch processing job containing multiple message requests.
565
566
```python { .api }
567
class MessageBatch(BaseModel):
568
"""
569
A batch processing job for multiple message creation requests.
570
571
Attributes:
572
id: Unique object identifier. The format and length of IDs may
573
change over time.
574
type: Object type. For Message Batches, this is always
575
"message_batch".
576
processing_status: Processing status of the Message Batch. Values:
577
- "in_progress": Batch is currently processing
578
- "canceling": Batch cancellation has been initiated
579
- "ended": Processing has completed
580
request_counts: Tallies requests within the Message Batch,
581
categorized by their status. Requests start as "processing" and
582
move to one of the other statuses only once processing of the
583
entire batch ends.
584
created_at: RFC 3339 datetime string representing the time at which
585
the Message Batch was created.
586
expires_at: RFC 3339 datetime string representing the time at which
587
the Message Batch will expire and end processing, which is 24
588
hours after creation.
589
archived_at: RFC 3339 datetime string representing the time at which
590
the Message Batch was archived and its results became
591
unavailable. None if not yet archived.
592
cancel_initiated_at: RFC 3339 datetime string representing the time
593
at which cancellation was initiated for the Message Batch. None
594
if cancellation was not initiated.
595
ended_at: RFC 3339 datetime string representing the time at which
596
processing for the Message Batch ended. None if processing has
597
not yet ended.
598
results_url: URL to a `.jsonl` file containing the results of the
599
Message Batch requests. None until processing ends. Results in
600
the file are not guaranteed to be in the same order as requests.
601
"""
602
id: str
603
type: Literal["message_batch"]
604
processing_status: Literal["in_progress", "canceling", "ended"]
605
request_counts: MessageBatchRequestCounts
606
created_at: datetime
607
expires_at: datetime
608
archived_at: Optional[datetime] = None
609
cancel_initiated_at: Optional[datetime] = None
610
ended_at: Optional[datetime] = None
611
results_url: Optional[str] = None
612
```
613
614
### MessageBatchRequestCounts
615
616
Tallies of requests within a batch, categorized by their status.
617
618
```python { .api }
619
class MessageBatchRequestCounts(BaseModel):
620
"""
621
Request count tallies for a message batch.
622
623
Attributes:
624
processing: Number of requests in the Message Batch that are
625
processing.
626
succeeded: Number of requests in the Message Batch that have
627
completed successfully. This is zero until processing of the
628
entire Message Batch has ended.
629
errored: Number of requests in the Message Batch that encountered an
630
error. This is zero until processing of the entire Message Batch
631
has ended.
632
canceled: Number of requests in the Message Batch that have been
633
canceled. This is zero until processing of the entire Message
634
Batch has ended.
635
expired: Number of requests in the Message Batch that have expired.
636
This is zero until processing of the entire Message Batch has
637
ended.
638
"""
639
processing: int
640
succeeded: int
641
errored: int
642
canceled: int
643
expired: int
644
```
645
646
### MessageBatchIndividualResponse
647
648
Represents the result of a single request within a batch.
649
650
```python { .api }
651
class MessageBatchIndividualResponse(BaseModel):
652
"""
653
Result of a single request within a message batch.
654
655
Attributes:
656
custom_id: Developer-provided ID created for each request in a
657
Message Batch. Useful for matching results to requests, as
658
results may be given out of request order. Must be unique for
659
each request within the Message Batch.
660
result: Processing result for this request. Contains a Message
661
output if processing was successful, an error response if
662
processing failed, or the reason why processing was not
663
attempted, such as cancellation or expiration.
664
"""
665
custom_id: str
666
result: MessageBatchResult
667
```
668
669
### MessageBatchResult
670
671
Union type representing the possible outcomes of a batch request.
672
673
```python { .api }
674
MessageBatchResult = Union[
675
MessageBatchSucceededResult,
676
MessageBatchErroredResult,
677
MessageBatchCanceledResult,
678
MessageBatchExpiredResult
679
]
680
```
681
682
### MessageBatchSucceededResult
683
684
Result when a batch request completed successfully.
685
686
```python { .api }
687
class MessageBatchSucceededResult(BaseModel):
688
"""
689
Successful batch request result.
690
691
Attributes:
692
type: Result type, always "succeeded"
693
message: The successful Message response
694
"""
695
type: Literal["succeeded"]
696
message: Message
697
```
698
699
### MessageBatchErroredResult
700
701
Result when a batch request encountered an error.
702
703
```python { .api }
704
class MessageBatchErroredResult(BaseModel):
705
"""
706
Failed batch request result.
707
708
Attributes:
709
type: Result type, always "errored"
710
error: The error response with details about what went wrong
711
"""
712
type: Literal["errored"]
713
error: ErrorResponse
714
```
715
716
### MessageBatchCanceledResult
717
718
Result when a batch request was canceled.
719
720
```python { .api }
721
class MessageBatchCanceledResult(BaseModel):
722
"""
723
Canceled batch request result.
724
725
Attributes:
726
type: Result type, always "canceled"
727
"""
728
type: Literal["canceled"]
729
```
730
731
### MessageBatchExpiredResult
732
733
Result when a batch request expired before processing.
734
735
```python { .api }
736
class MessageBatchExpiredResult(BaseModel):
737
"""
738
Expired batch request result.
739
740
Attributes:
741
type: Result type, always "expired"
742
"""
743
type: Literal["expired"]
744
```
745
746
### DeletedMessageBatch
747
748
Confirmation that a batch was deleted.
749
750
```python { .api }
751
class DeletedMessageBatch(BaseModel):
752
"""
753
Deletion confirmation for a message batch.
754
755
Attributes:
756
id: ID of the Message Batch that was deleted
757
type: Deleted object type. For Message Batches, this is always
758
"message_batch_deleted".
759
"""
760
id: str
761
type: Literal["message_batch_deleted"]
762
```
763
764
### Request (Parameter Type)
765
766
Request object used when creating a batch.
767
768
```python { .api }
769
class Request(TypedDict):
770
"""
771
Individual request within a batch creation request.
772
773
Attributes:
774
custom_id: Developer-provided ID created for each request in a
775
Message Batch. Useful for matching results to requests, as
776
results may be given out of request order. Must be unique for
777
each request within the Message Batch.
778
params: Messages API creation parameters for the individual request.
779
See the Messages API reference for full documentation on
780
available parameters. Note: streaming is not supported in
781
batches.
782
"""
783
custom_id: str # Required
784
params: MessageCreateParamsNonStreaming # Required
785
```
786
787
## Usage Patterns
788
789
### Complete Batch Workflow
790
791
```python
792
from anthropic import Anthropic
793
import time
794
795
client = Anthropic()
796
797
# Step 1: Create a batch
798
print("Creating batch...")
799
batch = client.messages.batches.create(
800
requests=[
801
{
802
"custom_id": f"req-{i}",
803
"params": {
804
"model": "claude-3-5-sonnet-20241022",
805
"max_tokens": 1024,
806
"messages": [
807
{"role": "user", "content": f"What is {i} + {i}?"}
808
]
809
}
810
}
811
for i in range(10)
812
]
813
)
814
815
print(f"Batch created: {batch.id}")
816
817
# Step 2: Poll for completion
818
print("Waiting for batch to complete...")
819
while batch.processing_status == "in_progress":
820
time.sleep(60)
821
batch = client.messages.batches.retrieve(batch.id)
822
print(f" Processing: {batch.request_counts.processing} remaining")
823
824
# Step 3: Process results
825
print("Processing results...")
826
if batch.processing_status == "ended" and batch.results_url:
827
results = client.messages.batches.results(batch.id)
828
829
for result in results:
830
if result.result.type == "succeeded":
831
message = result.result.message
832
content = message.content[0].text
833
print(f"{result.custom_id}: {content}")
834
else:
835
print(f"{result.custom_id}: {result.result.type}")
836
837
# Step 4: Clean up
838
print("Deleting batch...")
839
client.messages.batches.delete(batch.id)
840
print("Done!")
841
```
842
843
### Error Handling in Batches
844
845
```python
846
from anthropic import Anthropic, AnthropicError
847
848
client = Anthropic()
849
batch_id = "msgbatch_01ABC123"
850
851
try:
852
# Try to get results
853
results = client.messages.batches.results(batch_id)
854
855
for result in results:
856
custom_id = result.custom_id
857
858
if result.result.type == "succeeded":
859
# Process successful result
860
message = result.result.message
861
print(f"Success for {custom_id}")
862
863
elif result.result.type == "errored":
864
# Handle error result
865
error = result.result.error
866
print(f"Error for {custom_id}:")
867
print(f" Type: {error.type}")
868
print(f" Message: {error.message}")
869
870
# Take action based on error type
871
if error.type == "rate_limit_error":
872
print(" -> Rate limited, request not retried in batch")
873
elif error.type == "invalid_request_error":
874
print(" -> Invalid request parameters")
875
876
elif result.result.type == "canceled":
877
print(f"Canceled: {custom_id}")
878
879
elif result.result.type == "expired":
880
print(f"Expired: {custom_id}")
881
882
except AnthropicError as e:
883
# Batch may not be finished yet
884
print(f"Error retrieving results: {e}")
885
batch = client.messages.batches.retrieve(batch_id)
886
print(f"Batch status: {batch.processing_status}")
887
```
888
889
### Async Batch Processing
890
891
```python
892
import asyncio
893
from anthropic import AsyncAnthropic
894
895
async def process_batch():
896
client = AsyncAnthropic()
897
898
# Create batch
899
batch = await client.messages.batches.create(
900
requests=[
901
{
902
"custom_id": f"async-req-{i}",
903
"params": {
904
"model": "claude-3-5-sonnet-20241022",
905
"max_tokens": 1024,
906
"messages": [
907
{"role": "user", "content": f"Question {i}"}
908
]
909
}
910
}
911
for i in range(5)
912
]
913
)
914
915
print(f"Created batch: {batch.id}")
916
917
# Poll for completion
918
while batch.processing_status == "in_progress":
919
await asyncio.sleep(60)
920
batch = await client.messages.batches.retrieve(batch.id)
921
print(f"Status: {batch.processing_status}")
922
923
# Get results
924
if batch.results_url:
925
results = await client.messages.batches.results(batch.id)
926
927
async for result in results:
928
if result.result.type == "succeeded":
929
print(f"{result.custom_id}: Success")
930
931
# Clean up
932
await client.messages.batches.delete(batch.id)
933
934
# Run the async function
935
asyncio.run(process_batch())
936
```
937
938
### Managing Large Batches
939
940
```python
941
from anthropic import Anthropic
942
from typing import List, Dict, Any
943
944
def create_large_batch(
945
client: Anthropic,
946
requests_data: List[Dict[str, Any]],
947
batch_size: int = 10000
948
) -> List[str]:
949
"""
950
Create multiple batches if request count exceeds batch_size.
951
Returns list of batch IDs.
952
"""
953
batch_ids = []
954
955
for i in range(0, len(requests_data), batch_size):
956
chunk = requests_data[i:i + batch_size]
957
958
batch = client.messages.batches.create(
959
requests=[
960
{
961
"custom_id": f"req-{i + j}",
962
"params": data
963
}
964
for j, data in enumerate(chunk)
965
]
966
)
967
968
batch_ids.append(batch.id)
969
print(f"Created batch {batch.id} with {len(chunk)} requests")
970
971
return batch_ids
972
973
# Example usage
974
client = Anthropic()
975
976
# Prepare 15,000 requests (will be split into 2 batches)
977
large_request_list = [
978
{
979
"model": "claude-3-5-sonnet-20241022",
980
"max_tokens": 1024,
981
"messages": [{"role": "user", "content": f"Process item {i}"}]
982
}
983
for i in range(15000)
984
]
985
986
batch_ids = create_large_batch(client, large_request_list)
987
print(f"Created {len(batch_ids)} batches")
988
```
989
990
### Retrieving Specific Results
991
992
```python
993
from anthropic import Anthropic
994
from typing import Dict, Optional
995
996
def get_result_by_custom_id(
997
client: Anthropic,
998
batch_id: str,
999
target_custom_id: str
1000
) -> Optional[Dict]:
1001
"""
1002
Find a specific result by custom_id in batch results.
1003
"""
1004
results = client.messages.batches.results(batch_id)
1005
1006
for result in results:
1007
if result.custom_id == target_custom_id:
1008
if result.result.type == "succeeded":
1009
return {
1010
"success": True,
1011
"message": result.result.message
1012
}
1013
else:
1014
return {
1015
"success": False,
1016
"type": result.result.type,
1017
"error": getattr(result.result, "error", None)
1018
}
1019
1020
return None
1021
1022
# Example usage
1023
client = Anthropic()
1024
batch_id = "msgbatch_01ABC123"
1025
1026
result = get_result_by_custom_id(client, batch_id, "important-request-42")
1027
if result:
1028
if result["success"]:
1029
print(f"Found result: {result['message'].content}")
1030
else:
1031
print(f"Request failed: {result['type']}")
1032
else:
1033
print("Result not found in batch")
1034
```
1035