0
# Async Operations
1
2
Full async/await support for all Tavily API operations, enabling high-performance concurrent requests and seamless integration with async frameworks like FastAPI, aiohttp, and asyncio-based applications.
3
4
## Capabilities
5
6
### Async Client
7
8
The AsyncTavilyClient provides async versions of all synchronous operations with identical functionality and parameters.
9
10
```python { .api }
11
class AsyncTavilyClient:
12
def __init__(
13
self,
14
api_key: Optional[str] = None,
15
company_info_tags: Sequence[str] = ("news", "general", "finance"),
16
proxies: Optional[dict[str, str]] = None,
17
api_base_url: Optional[str] = None
18
): ...
19
```
20
21
### Async Search Operations
22
23
All search operations support async/await patterns for non-blocking execution.
24
25
```python { .api }
26
async def search(
27
self,
28
query: str,
29
search_depth: Literal["basic", "advanced"] = None,
30
topic: Literal["general", "news", "finance"] = None,
31
time_range: Literal["day", "week", "month", "year"] = None,
32
max_results: int = None,
33
**kwargs
34
) -> dict: ...
35
36
async def get_search_context(
37
self,
38
query: str,
39
max_tokens: int = 4000,
40
**kwargs
41
) -> str: ...
42
43
async def qna_search(
44
self,
45
query: str,
46
search_depth: Literal["basic", "advanced"] = "advanced",
47
**kwargs
48
) -> str: ...
49
50
async def get_company_info(
51
self,
52
query: str,
53
search_depth: Literal["basic", "advanced"] = "advanced",
54
max_results: int = 5,
55
**kwargs
56
) -> Sequence[dict]: ...
57
```
58
59
### Async Content Operations
60
61
Async versions of extract and crawl operations for efficient content processing.
62
63
```python { .api }
64
async def extract(
65
self,
66
urls: Union[List[str], str],
67
extract_depth: Literal["basic", "advanced"] = None,
68
format: Literal["markdown", "text"] = None,
69
**kwargs
70
) -> dict: ...
71
72
async def crawl(
73
self,
74
url: str,
75
max_depth: int = None,
76
max_breadth: int = None,
77
instructions: str = None,
78
**kwargs
79
) -> dict: ...
80
81
async def map(
82
self,
83
url: str,
84
max_depth: int = None,
85
max_breadth: int = None,
86
instructions: str = None,
87
**kwargs
88
) -> dict: ...
89
```
90
91
## Basic Async Usage
92
93
### Simple Async Operations
94
95
```python
96
import asyncio
97
from tavily import AsyncTavilyClient
98
99
async def basic_async_search():
100
client = AsyncTavilyClient(api_key="tvly-YOUR_API_KEY")
101
102
# Async search
103
result = await client.search("What is machine learning?")
104
print(result)
105
106
# Async content extraction
107
content = await client.extract(["https://example.com/article"])
108
print(content)
109
110
# Run the async function
111
asyncio.run(basic_async_search())
112
```
113
114
### Concurrent Operations
115
116
Leverage async for concurrent API calls to improve performance:
117
118
```python
119
import asyncio
120
from tavily import AsyncTavilyClient
121
122
async def concurrent_searches():
123
client = AsyncTavilyClient(api_key="tvly-YOUR_API_KEY")
124
125
queries = [
126
"artificial intelligence trends",
127
"climate change solutions",
128
"quantum computing advances",
129
"renewable energy technology"
130
]
131
132
# Execute searches concurrently
133
tasks = [client.search(query) for query in queries]
134
results = await asyncio.gather(*tasks)
135
136
# Process results
137
for i, result in enumerate(results):
138
print(f"Query: {queries[i]}")
139
print(f"Results: {len(result.get('results', []))}")
140
print("---")
141
142
asyncio.run(concurrent_searches())
143
```
144
145
## Advanced Async Patterns
146
147
### Batch Processing with Rate Limiting
148
149
Process large batches of requests with controlled concurrency:
150
151
```python
152
import asyncio
153
from tavily import AsyncTavilyClient
154
155
async def batch_extract_with_limit():
156
client = AsyncTavilyClient(api_key="tvly-YOUR_API_KEY")
157
158
urls = [
159
"https://example.com/page1",
160
"https://example.com/page2",
161
"https://example.com/page3",
162
# ... many more URLs
163
]
164
165
# Limit concurrent requests to avoid overwhelming the API
166
semaphore = asyncio.Semaphore(5) # Max 5 concurrent requests
167
168
async def extract_with_limit(url):
169
async with semaphore:
170
try:
171
return await client.extract(url)
172
except Exception as e:
173
return {"error": str(e), "url": url}
174
175
# Process URLs in batches
176
tasks = [extract_with_limit(url) for url in urls]
177
results = await asyncio.gather(*tasks, return_exceptions=True)
178
179
# Process results
180
successful = [r for r in results if not isinstance(r, Exception) and "error" not in r]
181
failed = [r for r in results if isinstance(r, Exception) or "error" in r]
182
183
print(f"Successful extractions: {len(successful)}")
184
print(f"Failed extractions: {len(failed)}")
185
186
asyncio.run(batch_extract_with_limit())
187
```
188
189
### Pipeline Processing
190
191
Create processing pipelines with async operations:
192
193
```python
194
import asyncio
195
from tavily import AsyncTavilyClient
196
197
async def search_extract_pipeline():
198
client = AsyncTavilyClient(api_key="tvly-YOUR_API_KEY")
199
200
# Stage 1: Search for relevant URLs
201
search_result = await client.search(
202
query="best practices for API design",
203
max_results=10,
204
search_depth="advanced"
205
)
206
207
# Stage 2: Extract URLs from search results
208
urls = [result['url'] for result in search_result.get('results', [])]
209
210
# Stage 3: Extract content from URLs concurrently
211
extraction_tasks = [client.extract(url, format="markdown") for url in urls[:5]]
212
extraction_results = await asyncio.gather(*extraction_tasks, return_exceptions=True)
213
214
# Stage 4: Process extracted content
215
processed_content = []
216
for result in extraction_results:
217
if isinstance(result, dict) and 'results' in result:
218
for content in result['results']:
219
processed_content.append({
220
'url': content['url'],
221
'title': content.get('title', 'No title'),
222
'content_length': len(content.get('content', ''))
223
})
224
225
return processed_content
226
227
# Run pipeline
228
content_summary = asyncio.run(search_extract_pipeline())
229
for item in content_summary:
230
print(f"Title: {item['title']}")
231
print(f"URL: {item['url']}")
232
print(f"Content Length: {item['content_length']} characters")
233
print("---")
234
```
235
236
## Framework Integration
237
238
### FastAPI Integration
239
240
Integrate AsyncTavilyClient with FastAPI applications:
241
242
```python
243
from fastapi import FastAPI, HTTPException
244
from pydantic import BaseModel
245
from tavily import AsyncTavilyClient
246
247
app = FastAPI()
248
client = AsyncTavilyClient(api_key="tvly-YOUR_API_KEY")
249
250
class SearchRequest(BaseModel):
251
query: str
252
max_results: int = 5
253
254
class ExtractRequest(BaseModel):
255
urls: list[str]
256
format: str = "markdown"
257
258
@app.post("/search")
259
async def search_endpoint(request: SearchRequest):
260
try:
261
result = await client.search(
262
query=request.query,
263
max_results=request.max_results
264
)
265
return result
266
except Exception as e:
267
raise HTTPException(status_code=500, detail=str(e))
268
269
@app.post("/extract")
270
async def extract_endpoint(request: ExtractRequest):
271
try:
272
result = await client.extract(
273
urls=request.urls,
274
format=request.format
275
)
276
return result
277
except Exception as e:
278
raise HTTPException(status_code=500, detail=str(e))
279
280
@app.get("/company/{company_name}")
281
async def company_info_endpoint(company_name: str):
282
try:
283
result = await client.get_company_info(company_name)
284
return result
285
except Exception as e:
286
raise HTTPException(status_code=500, detail=str(e))
287
```
288
289
### aiohttp Integration
290
291
Use with aiohttp web applications:
292
293
```python
294
from aiohttp import web
295
import json
296
from tavily import AsyncTavilyClient
297
298
async def init_app():
299
app = web.Application()
300
app['tavily_client'] = AsyncTavilyClient(api_key="tvly-YOUR_API_KEY")
301
302
app.router.add_post('/search', search_handler)
303
app.router.add_post('/extract', extract_handler)
304
305
return app
306
307
async def search_handler(request):
308
client = request.app['tavily_client']
309
data = await request.json()
310
311
try:
312
result = await client.search(data['query'])
313
return web.json_response(result)
314
except Exception as e:
315
return web.json_response(
316
{'error': str(e)},
317
status=500
318
)
319
320
async def extract_handler(request):
321
client = request.app['tavily_client']
322
data = await request.json()
323
324
try:
325
result = await client.extract(data['urls'])
326
return web.json_response(result)
327
except Exception as e:
328
return web.json_response(
329
{'error': str(e)},
330
status=500
331
)
332
333
if __name__ == '__main__':
334
web.run_app(init_app(), host='localhost', port=8080)
335
```
336
337
## Error Handling in Async Code
338
339
Proper async error handling patterns:
340
341
```python
342
import asyncio
343
from tavily import AsyncTavilyClient, InvalidAPIKeyError, UsageLimitExceededError, TimeoutError
344
345
async def robust_async_operations():
346
client = AsyncTavilyClient(api_key="tvly-YOUR_API_KEY")
347
348
# Handle individual operation errors
349
try:
350
result = await client.search("test query", timeout=30)
351
print("Search successful")
352
except TimeoutError:
353
print("Search timed out")
354
except UsageLimitExceededError:
355
print("API usage limit exceeded")
356
except InvalidAPIKeyError:
357
print("Invalid API key")
358
except Exception as e:
359
print(f"Unexpected error: {e}")
360
361
# Handle concurrent operation errors
362
queries = ["query1", "query2", "query3"]
363
tasks = [client.search(query) for query in queries]
364
365
results = await asyncio.gather(*tasks, return_exceptions=True)
366
367
for i, result in enumerate(results):
368
if isinstance(result, Exception):
369
print(f"Query {i+1} failed: {result}")
370
else:
371
print(f"Query {i+1} succeeded: {len(result.get('results', []))} results")
372
373
asyncio.run(robust_async_operations())
374
```
375
376
## Performance Optimization
377
378
### Connection Pooling
379
380
The AsyncTavilyClient automatically manages HTTP connections efficiently:
381
382
```python
383
# The client handles connection pooling internally
384
client = AsyncTavilyClient(api_key="tvly-YOUR_API_KEY")
385
386
# Multiple concurrent requests reuse connections
387
async def optimized_requests():
388
tasks = []
389
for i in range(10):
390
task = client.search(f"query {i}")
391
tasks.append(task)
392
393
# All requests share connection pool
394
results = await asyncio.gather(*tasks)
395
return results
396
```
397
398
### Memory Management
399
400
Handle large-scale async operations efficiently:
401
402
```python
403
async def memory_efficient_processing():
404
client = AsyncTavilyClient(api_key="tvly-YOUR_API_KEY")
405
406
# Process in chunks to manage memory
407
urls = ["url1", "url2", "url3"] # Imagine many URLs
408
chunk_size = 10
409
410
all_results = []
411
for i in range(0, len(urls), chunk_size):
412
chunk = urls[i:i + chunk_size]
413
chunk_tasks = [client.extract(url) for url in chunk]
414
chunk_results = await asyncio.gather(*chunk_tasks, return_exceptions=True)
415
all_results.extend(chunk_results)
416
417
# Optional: add small delay between chunks
418
await asyncio.sleep(0.1)
419
420
return all_results
421
```
422
423
## Context Managers and Cleanup
424
425
Use async context managers for proper resource cleanup:
426
427
```python
428
from contextlib import asynccontextmanager
429
from tavily import AsyncTavilyClient
430
431
@asynccontextmanager
432
async def tavily_client_context(api_key):
433
client = AsyncTavilyClient(api_key=api_key)
434
try:
435
yield client
436
finally:
437
# Cleanup if needed (client handles this automatically)
438
pass
439
440
async def main():
441
async with tavily_client_context("tvly-YOUR_API_KEY") as client:
442
result = await client.search("example query")
443
print(result)
444
445
asyncio.run(main())
446
```