pypi-langgraph-sdk

Description
Python SDK for interacting with the LangGraph Platform REST API to build and manage AI assistants and conversational workflows
Author
tessl
Last updated

How to use

npx @tessl/cli registry install tessl/pypi-langgraph-sdk@0.2.0

run-execution.md docs/

1
# Run Execution
2
3
Execute assistant workflows on threads with support for streaming, interrupts, configuration, and completion handling. Runs represent individual executions of an assistant on a thread.
4
5
## Capabilities
6
7
### Streaming Execution
8
9
Execute runs with real-time streaming of execution events, state changes, and outputs.
10
11
```python { .api }
12
from collections.abc import AsyncIterator, Mapping, Sequence
13
from typing import Any
14
from langgraph_sdk.schema import (
15
StreamPart, StreamMode, Config, Context, Checkpoint,
16
Command, QueryParamTypes
17
)
18
19
# Via client.runs
20
def stream(
21
thread_id: str | None,
22
assistant_id: str,
23
*,
24
input: Mapping[str, Any] | None = None,
25
command: Command | None = None,
26
stream_mode: StreamMode | Sequence[StreamMode] = "values",
27
stream_subgraphs: bool = False,
28
stream_resumable: bool = False,
29
metadata: Mapping[str, Any] | None = None,
30
config: Config | None = None,
31
context: Context | None = None,
32
checkpoint: Checkpoint | None = None,
33
checkpoint_id: str | None = None, # deprecated
34
webhook: str | None = None,
35
webhook_mode: str | None = None,
36
headers: Mapping[str, str] | None = None,
37
params: QueryParamTypes | None = None,
38
) -> AsyncIterator[StreamPart]:
39
"""
40
Stream the results of a run.
41
42
Args:
43
thread_id: The thread ID to stream the run on.
44
assistant_id: The assistant ID or graph name to stream the run on.
45
input: The input to the run.
46
command: The command to run instead of input.
47
stream_mode: The mode(s) to stream the run. Default is "values".
48
stream_subgraphs: Whether to stream subgraphs.
49
stream_resumable: Whether the stream is resumable.
50
metadata: The metadata to add to the run.
51
config: The config to use for the run.
52
context: The context to add to the run.
53
checkpoint: The checkpoint to resume from.
54
checkpoint_id: Checkpoint to resume from. Deprecated, use checkpoint instead.
55
webhook: Webhook to call after the run is done.
56
webhook_mode: Mode to call the webhook. Options are "GET" and "POST".
57
headers: Optional custom headers to include with the request.
58
params: Optional query parameters to include with the request.
59
60
Returns:
61
AsyncIterator[StreamPart]: The stream of the run.
62
"""
63
```
64
65
### Async Execution
66
67
Execute runs asynchronously and retrieve results when complete.
68
69
```python { .api }
70
from langgraph_sdk.schema import Run, QueryParamTypes
71
72
async def create(
73
thread_id: str | None,
74
assistant_id: str,
75
*,
76
input: Mapping[str, Any] | None = None,
77
command: Command | None = None,
78
stream_mode: StreamMode | Sequence[StreamMode] = "values",
79
stream_subgraphs: bool = False,
80
stream_resumable: bool = False,
81
metadata: Mapping[str, Any] | None = None,
82
config: Config | None = None,
83
context: Context | None = None,
84
checkpoint: Checkpoint | None = None,
85
checkpoint_id: str | None = None, # deprecated
86
webhook: str | None = None,
87
webhook_mode: str | None = None,
88
headers: Mapping[str, str] | None = None,
89
params: QueryParamTypes | None = None,
90
) -> Run:
91
"""
92
Create a background run.
93
94
Args:
95
thread_id: The thread ID to create the run on.
96
assistant_id: The assistant ID or graph name to create the run on.
97
input: The input to the run.
98
command: The command to run instead of input.
99
stream_mode: The mode(s) to stream the run. Default is "values".
100
stream_subgraphs: Whether to stream subgraphs.
101
stream_resumable: Whether the stream is resumable.
102
metadata: The metadata to add to the run.
103
config: The config to use for the run.
104
context: The context to add to the run.
105
checkpoint: The checkpoint to resume from.
106
checkpoint_id: Checkpoint to resume from. Deprecated, use checkpoint instead.
107
webhook: Webhook to call after the run is done.
108
webhook_mode: Mode to call the webhook. Options are "GET" and "POST".
109
headers: Optional custom headers to include with the request.
110
params: Optional query parameters to include with the request.
111
112
Returns:
113
Run: The created run.
114
"""
115
```
116
117
### Synchronous Execution
118
119
Execute runs synchronously and wait for completion.
120
121
```python { .api }
122
async def wait(
123
thread_id: str | None,
124
assistant_id: str,
125
*,
126
input: Mapping[str, Any] | None = None,
127
command: Command | None = None,
128
metadata: Mapping[str, Any] | None = None,
129
config: Config | None = None,
130
context: Context | None = None,
131
checkpoint: Checkpoint | None = None,
132
checkpoint_id: str | None = None, # deprecated
133
webhook: str | None = None,
134
webhook_mode: str | None = None,
135
checkpoint_during: bool | None = None,
136
headers: Mapping[str, str] | None = None,
137
params: QueryParamTypes | None = None,
138
) -> Run:
139
"""
140
Create a run, wait for it to finish and return the final state.
141
142
Args:
143
thread_id: The thread ID to create the run on.
144
assistant_id: The assistant ID or graph name to create the run on.
145
input: The input to the run.
146
command: The command to run instead of input.
147
metadata: The metadata to add to the run.
148
config: The config to use for the run.
149
context: The context to add to the run.
150
checkpoint: The checkpoint to resume from.
151
checkpoint_id: Checkpoint to resume from. Deprecated, use checkpoint instead.
152
webhook: Webhook to call after the run is done.
153
webhook_mode: Mode to call the webhook. Options are "GET" and "POST".
154
checkpoint_during: Whether to checkpoint during the run.
155
headers: Optional custom headers to include with the request.
156
params: Optional query parameters to include with the request.
157
158
Returns:
159
Run: The completed run.
160
"""
161
```
162
163
### Batch Execution
164
165
Execute multiple runs concurrently with batch operations.
166
167
```python { .api }
168
from langgraph_sdk.schema import RunCreate
169
170
async def create_batch(
171
payloads: list[RunCreate],
172
*,
173
headers: Mapping[str, str] | None = None,
174
params: QueryParamTypes | None = None,
175
) -> list[Run]:
176
"""
177
Create a batch of stateless background runs.
178
179
Args:
180
payloads: The payloads for the runs.
181
headers: Optional custom headers to include with the request.
182
params: Optional query parameters to include with the request.
183
184
Returns:
185
list[Run]: The created runs.
186
"""
187
```
188
189
### Run Management
190
191
Manage active and completed runs with listing, retrieval, and cancellation capabilities.
192
193
```python { .api }
194
from langgraph_sdk.schema import RunSelectField, RunStatus, CancelAction
195
196
async def list(
197
thread_id: str,
198
*,
199
limit: int = 10,
200
offset: int = 0,
201
status: RunStatus | None = None,
202
select: list[RunSelectField] | None = None,
203
headers: Mapping[str, str] | None = None,
204
params: QueryParamTypes | None = None,
205
) -> list[Run]:
206
"""
207
Get all runs for a thread.
208
209
Args:
210
thread_id: The thread ID to get runs for.
211
limit: The maximum number of runs to return.
212
offset: The number of runs to skip.
213
status: The status to filter by.
214
select: Fields to include in the response.
215
headers: Optional custom headers to include with the request.
216
params: Optional query parameters to include with the request.
217
218
Returns:
219
list[Run]: The runs for the thread.
220
"""
221
222
async def get(
223
thread_id: str,
224
run_id: str,
225
*,
226
headers: Mapping[str, str] | None = None,
227
params: QueryParamTypes | None = None,
228
) -> Run:
229
"""
230
Get a run.
231
232
Args:
233
thread_id: The thread ID to get the run from.
234
run_id: The run ID to get.
235
headers: Optional custom headers to include with the request.
236
params: Optional query parameters to include with the request.
237
238
Returns:
239
Run: Run object.
240
"""
241
242
async def cancel(
243
thread_id: str,
244
run_id: str,
245
*,
246
wait: bool = False,
247
action: CancelAction = "interrupt",
248
headers: Mapping[str, str] | None = None,
249
params: QueryParamTypes | None = None,
250
) -> None:
251
"""
252
Cancel a run.
253
254
Args:
255
thread_id: The thread ID to cancel the run on.
256
run_id: The run ID to cancel.
257
wait: Whether to wait for the run to be cancelled.
258
action: The type of cancellation. Options are "interrupt" or "rollback".
259
headers: Optional custom headers to include with the request.
260
params: Optional query parameters to include with the request.
261
"""
262
263
async def delete(
264
thread_id: str,
265
run_id: str,
266
*,
267
headers: Mapping[str, str] | None = None,
268
params: QueryParamTypes | None = None,
269
) -> None:
270
"""
271
Delete a run.
272
273
Args:
274
thread_id: The thread ID to delete the run from.
275
run_id: The run ID to delete.
276
headers: Optional custom headers to include with the request.
277
params: Optional query parameters to include with the request.
278
"""
279
```
280
281
### Run Streaming & Joining
282
283
Join ongoing runs and stream their execution events.
284
285
```python { .api }
286
async def join(
287
thread_id: str,
288
run_id: str,
289
*,
290
headers: Mapping[str, str] | None = None,
291
params: QueryParamTypes | None = None,
292
) -> dict:
293
"""
294
Block until a run is done. Returns the final state of the thread.
295
296
Args:
297
thread_id: The thread ID to join the run on.
298
run_id: The run ID to join.
299
headers: Optional custom headers to include with the request.
300
params: Optional query parameters to include with the request.
301
302
Returns:
303
dict: The final state of the thread.
304
"""
305
306
def join_stream(
307
thread_id: str,
308
run_id: str,
309
*,
310
cancel_on_disconnect: bool = False,
311
stream_mode: StreamMode | Sequence[StreamMode] | None = None,
312
headers: Mapping[str, str] | None = None,
313
params: QueryParamTypes | None = None,
314
last_event_id: str | None = None,
315
) -> AsyncIterator[StreamPart]:
316
"""
317
Stream output from a run in real-time, until the run is done.
318
Output is not buffered, so any output produced before this call will
319
not be received here.
320
321
Args:
322
thread_id: The thread ID to stream the run on.
323
run_id: The run ID to stream.
324
cancel_on_disconnect: Whether to cancel the run if the stream is disconnected.
325
stream_mode: The mode(s) to stream the run.
326
headers: Optional custom headers to include with the request.
327
params: Optional query parameters to include with the request.
328
last_event_id: The last event ID to start streaming from.
329
330
Returns:
331
AsyncIterator[StreamPart]: A stream of the run.
332
"""
333
```
334
335
## Types
336
337
```python { .api }
338
class Run(TypedDict):
339
"""Run execution details."""
340
run_id: str
341
thread_id: str
342
assistant_id: str
343
created_at: str
344
updated_at: str
345
status: RunStatus
346
kwargs: dict
347
metadata: dict
348
349
class RunCreate(TypedDict):
350
"""Run creation parameters."""
351
thread_id: str
352
assistant_id: str
353
input: dict
354
config: Config
355
metadata: dict
356
multitask_strategy: MultitaskStrategy
357
358
class StreamPart(NamedTuple):
359
"""Stream event part."""
360
event: str
361
data: dict
362
363
RunStatus = Literal["pending", "running", "error", "success", "timeout", "interrupted"]
364
365
StreamMode = Literal[
366
"values", "messages", "updates", "events",
367
"tasks", "checkpoints", "debug", "custom", "messages-tuple"
368
]
369
370
MultitaskStrategy = Literal["reject", "interrupt", "rollback", "enqueue"]
371
372
DisconnectMode = Literal["cancel", "continue"]
373
374
OnCompletionBehavior = Literal["delete", "keep"]
375
376
CancelAction = Literal["interrupt", "rollback"]
377
378
RunSelectField = Literal[
379
"run_id", "thread_id", "assistant_id", "created_at",
380
"updated_at", "status", "kwargs", "metadata"
381
]
382
```
383
384
## Usage Examples
385
386
### Streaming Execution
387
388
```python
389
# Basic streaming run
390
async for chunk in client.runs.stream(
391
thread_id="thread-123",
392
assistant_id="assistant-456",
393
input={"messages": [{"role": "human", "content": "Hello!"}]}
394
):
395
if chunk.event == "messages":
396
print(f"Message: {chunk.data}")
397
elif chunk.event == "events":
398
print(f"Event: {chunk.data}")
399
400
# Advanced streaming with configuration
401
async for chunk in client.runs.stream(
402
thread_id="thread-123",
403
assistant_id="assistant-456",
404
input={"query": "Explain AI"},
405
config={"temperature": 0.7, "max_tokens": 1000},
406
stream_mode="events",
407
interrupt_before=["human_review"],
408
multitask_strategy="enqueue"
409
):
410
print(f"{chunk.event}: {chunk.data}")
411
```
412
413
### Asynchronous Execution
414
415
```python
416
# Start run asynchronously
417
run = await client.runs.create(
418
thread_id="thread-123",
419
assistant_id="assistant-456",
420
input={"task": "analyze_document", "doc_id": "doc-789"},
421
metadata={"priority": "high"},
422
webhook="https://myapp.com/webhooks/run-complete"
423
)
424
425
print(f"Started run {run['run_id']} with status {run['status']}")
426
427
# Check run status later
428
updated_run = await client.runs.get("thread-123", run["run_id"])
429
if updated_run["status"] == "success":
430
print("Run completed successfully")
431
```
432
433
### Synchronous Execution
434
435
```python
436
# Execute and wait for completion
437
completed_run = await client.runs.wait(
438
thread_id="thread-123",
439
assistant_id="assistant-456",
440
input={"calculation": "fibonacci", "n": 100},
441
config={"timeout": 300}
442
)
443
444
print(f"Final status: {completed_run['status']}")
445
print(f"Result: {completed_run['kwargs'].get('result')}")
446
```
447
448
### Batch Operations
449
450
```python
451
# Create multiple runs
452
payloads = [
453
{
454
"thread_id": "thread-1",
455
"assistant_id": "assistant-456",
456
"input": {"task": f"process_item_{i}"}
457
}
458
for i in range(10)
459
]
460
461
batch_runs = await client.runs.create_batch(payloads)
462
print(f"Created {len(batch_runs)} runs")
463
```
464
465
### Run Management
466
467
```python
468
# List thread runs
469
runs = await client.runs.list("thread-123", limit=50)
470
active_runs = [r for r in runs if r["status"] in ["pending", "running"]]
471
472
# Cancel a run
473
if active_runs:
474
await client.runs.cancel(
475
"thread-123",
476
active_runs[0]["run_id"],
477
cancel_action="interrupt"
478
)
479
480
# Join an ongoing run
481
result = await client.runs.join("thread-123", "run-789")
482
483
# Stream events from ongoing run
484
async for event in client.runs.join_stream("thread-123", "run-789"):
485
print(f"Event: {event}")
486
```
487
488
### Error Handling
489
490
```python
491
try:
492
async for chunk in client.runs.stream(
493
thread_id="thread-123",
494
assistant_id="assistant-456",
495
input={"query": "test"}
496
):
497
if chunk.event == "error":
498
print(f"Execution error: {chunk.data}")
499
break
500
elif chunk.event == "interrupt":
501
print(f"Execution interrupted: {chunk.data}")
502
# Handle interrupt, possibly resume or cancel
503
except Exception as e:
504
print(f"Stream error: {e}")
505
```