0
# Cache and Utilities
1
2
Low-level utilities including direct GraphQL access, caching management, data processing functions, and schema validation tools for advanced use cases.
3
4
## Core Utilities
5
6
### Direct GraphQL API Access
7
8
Low-level interface for custom GraphQL queries and advanced use cases.
9
10
```python { .api }
11
def call_linear_api(query: str | Dict[str, Any], api_key: Optional[str] = None) -> Dict[str, Any]:
12
"""
13
Direct low-level interface to Linear's GraphQL API.
14
15
Args:
16
query: GraphQL query string or dictionary with query/variables
17
api_key: Optional API key (falls back to LINEAR_API_KEY env var)
18
19
Returns:
20
API response data dictionary
21
22
Raises:
23
ValueError: For authentication or API errors
24
25
Dependencies:
26
requests, os
27
"""
28
```
29
30
Usage examples:
31
32
```python
33
from linear_api.utils import call_linear_api
34
35
# Simple query with query string
36
response = call_linear_api("""
37
query {
38
viewer {
39
id
40
name
41
42
}
43
}
44
""")
45
print(f"Current user: {response['data']['viewer']['name']}")
46
47
# Query with variables using dictionary format
48
query = {
49
"query": """
50
query GetIssue($id: String!) {
51
issue(id: $id) {
52
id
53
title
54
state { name }
55
assignee { name }
56
}
57
}
58
""",
59
"variables": {
60
"id": "issue-id"
61
}
62
}
63
response = call_linear_api(query)
64
issue_data = response['data']['issue']
65
```
66
67
### Data Processing Functions
68
69
Transform raw API responses into structured domain models.
70
71
```python { .api }
72
def process_issue_data(data: Dict[str, Any]) -> LinearIssue:
73
"""
74
Transform raw Linear API issue data into structured LinearIssue objects.
75
76
Key Processing:
77
- Converts nested objects (state, team, assignee, project, labels, attachments)
78
- Handles datetime field parsing (ISO format → Python datetime)
79
- Processes pagination nodes for collections
80
- Adds default values for missing project fields
81
- Handles parent issue relationships
82
83
Args:
84
data: Raw issue data from Linear API
85
86
Returns:
87
LinearIssue object with processed data
88
89
Dependencies:
90
LinearIssue, LinearUser, LinearState, LinearLabel, LinearProject, LinearTeam, LinearAttachment
91
"""
92
93
def process_project_data(data: Dict[str, Any]) -> LinearProject:
94
"""
95
Transform raw Linear API project data into structured LinearProject objects.
96
97
Key Processing:
98
- Parses datetime fields from ISO strings
99
- Handles special TimelessDate fields (startDate, targetDate)
100
- Preserves project-specific data structures
101
102
Args:
103
data: Raw project data from Linear API
104
105
Returns:
106
LinearProject object with processed data
107
108
Dependencies:
109
LinearProject, TimelessDate
110
"""
111
```
112
113
Usage examples:
114
115
```python
116
from linear_api.utils import process_issue_data, call_linear_api
117
118
# Get raw issue data and process it
119
raw_response = call_linear_api("""
120
query {
121
issue(id: "issue-id") {
122
id
123
title
124
state { id name type color }
125
assignee { id name email }
126
team { id name key }
127
createdAt
128
updatedAt
129
}
130
}
131
""")
132
133
# Process into structured model
134
issue = process_issue_data(raw_response['data']['issue'])
135
print(f"Processed issue: {issue.title}")
136
print(f"State: {issue.state.name}")
137
print(f"Assignee: {issue.assignee.name if issue.assignee else 'Unassigned'}")
138
```
139
140
### Client Enhancement
141
142
Decorator for automatic client reference injection in model objects.
143
144
```python { .api }
145
def enrich_with_client(func):
146
"""
147
Decorator that recursively attaches client references to returned model objects.
148
149
Key Features:
150
- Prevents infinite recursion by checking direct attributes only
151
- Handles nested objects, lists, and dictionaries
152
- Enables method chaining on returned models
153
154
Usage:
155
Applied to manager methods that return LinearModel instances
156
"""
157
```
158
159
Usage examples:
160
161
```python
162
from linear_api.utils import enrich_with_client
163
164
# This decorator is typically used internally by managers
165
@enrich_with_client
166
def custom_get_issue(client, issue_id: str):
167
raw_data = client.call_api(f"query {{ issue(id: \"{issue_id}\") {{ ... }} }}")
168
return process_issue_data(raw_data['data']['issue'])
169
170
# The returned issue will have client reference for dynamic properties
171
issue = custom_get_issue(client, "issue-id")
172
comments = issue.comments # This works because of client enrichment
173
```
174
175
## Cache Management
176
177
Comprehensive caching system with TTL support and performance monitoring.
178
179
### CacheManager Class
180
181
```python { .api }
182
class CacheManager:
183
"""
184
Manages caching for API responses with TTL support.
185
"""
186
187
def __init__(self, enabled: bool = True, default_ttl: int = 3600):
188
"""
189
Initialize cache manager.
190
191
Args:
192
enabled: Whether caching is enabled
193
default_ttl: Default time-to-live in seconds
194
"""
195
196
def get(self, cache_name: str, key: Any) -> Optional[Any]:
197
"""
198
Get a value from the cache with expiration checking.
199
200
Args:
201
cache_name: Name of the cache to access
202
key: Cache key to retrieve
203
204
Returns:
205
Cached value or None if not found/expired
206
"""
207
208
def set(self, cache_name: str, key: Any, value: Any, ttl: Optional[int] = None) -> None:
209
"""
210
Set a value in the cache with optional time-to-live.
211
212
Args:
213
cache_name: Name of the cache to store in
214
key: Cache key
215
value: Value to cache
216
ttl: Optional TTL override (uses default if None)
217
"""
218
219
def cached(self, cache_name: str, key_fn: Callable = lambda *args, **kwargs: str(args) + str(kwargs)):
220
"""
221
Decorator for caching function results with customizable key generation.
222
223
Args:
224
cache_name: Name of cache to use
225
key_fn: Function to generate cache keys from function arguments
226
227
Returns:
228
Decorator function
229
"""
230
231
def clear(self, cache_name: Optional[str] = None) -> None:
232
"""
233
Clear a specific cache or all caches.
234
235
Args:
236
cache_name: Cache to clear, or None for all caches
237
"""
238
239
def invalidate(self, cache_name: str, key: Any) -> None:
240
"""
241
Invalidate a specific cache entry.
242
243
Args:
244
cache_name: Name of the cache
245
key: Key to invalidate
246
"""
247
248
def enable(self) -> None:
249
"""Enable caching."""
250
251
def disable(self) -> None:
252
"""Disable caching."""
253
254
def get_cache_size(self, cache_name: Optional[str] = None) -> int:
255
"""
256
Get the number of entries in a cache or all caches.
257
258
Args:
259
cache_name: Specific cache name, or None for total
260
261
Returns:
262
Number of cache entries
263
"""
264
265
@property
266
def enabled(self) -> bool:
267
"""Check or set whether caching is enabled."""
268
269
@property
270
def stats(self) -> Dict[str, Any]:
271
"""
272
Get cache statistics including hit count, miss count, hit rate,
273
and entry counts.
274
275
Returns:
276
Dictionary with cache statistics
277
"""
278
```
279
280
Usage examples:
281
282
```python
283
from linear_api import LinearClient
284
285
client = LinearClient()
286
cache = client.cache
287
288
# Check cache status
289
print(f"Cache enabled: {cache.enabled}")
290
print(f"Cache stats: {cache.stats}")
291
292
# Manual cache operations
293
cache.set("custom", "my-key", {"data": "value"}, ttl=300) # 5 minutes
294
value = cache.get("custom", "my-key")
295
296
# Cache decorator for custom functions
297
@cache.cached("my-function")
298
def expensive_operation(param1, param2):
299
# Simulate expensive operation
300
time.sleep(1)
301
return f"Result for {param1}, {param2}"
302
303
# First call is slow, subsequent calls are fast
304
result1 = expensive_operation("a", "b") # Slow
305
result2 = expensive_operation("a", "b") # Fast (cached)
306
307
# Cache management
308
cache.clear("my-function") # Clear specific cache
309
cache.clear() # Clear all caches
310
cache.disable() # Disable caching
311
cache.enable() # Re-enable caching
312
```
313
314
### Cache Performance Monitoring
315
316
```python
317
def monitor_cache_performance(client):
318
"""Monitor cache performance and hit rates."""
319
stats = client.cache.stats
320
321
print("Cache Performance Report:")
322
print("=" * 40)
323
print(f"Cache enabled: {client.cache.enabled}")
324
print(f"Total hits: {stats.get('hits', 0)}")
325
print(f"Total misses: {stats.get('misses', 0)}")
326
print(f"Hit rate: {stats.get('hit_rate', 0):.2%}")
327
328
# Per-cache statistics
329
cache_sizes = {}
330
for cache_name in ['issues', 'projects', 'teams', 'users']:
331
size = client.cache.get_cache_size(cache_name)
332
if size > 0:
333
cache_sizes[cache_name] = size
334
335
if cache_sizes:
336
print("\nCache sizes:")
337
for name, size in cache_sizes.items():
338
print(f" {name}: {size} entries")
339
340
return stats
341
342
# Monitor performance
343
stats = monitor_cache_performance(client)
344
```
345
346
## Schema Validation Tools
347
348
Tools for validating domain models against Linear's GraphQL schema.
349
350
### Model Validation Functions
351
352
```python { .api }
353
def validate_all_models(api_key: str) -> Dict[str, Dict[str, Any]]:
354
"""
355
Validate all domain models against GraphQL schema.
356
357
Args:
358
api_key: Linear API key for schema access
359
360
Returns:
361
Dictionary with completeness metrics and field comparisons
362
"""
363
364
def validate_model(model_class: type[LinearModel], api_key: str) -> Dict[str, Any]:
365
"""
366
Validate a single model class against GraphQL type.
367
368
Args:
369
model_class: LinearModel subclass to validate
370
api_key: Linear API key for schema access
371
372
Returns:
373
Detailed field analysis and validation results
374
"""
375
376
def get_schema_for_type(type_name: str, api_key: str) -> Dict[str, Any]:
377
"""
378
Use GraphQL introspection to get type schema.
379
380
Args:
381
type_name: GraphQL type name to introspect
382
api_key: Linear API key
383
384
Returns:
385
Field definitions and metadata for the type
386
"""
387
388
def suggest_model_improvements(model_class: type[LinearModel], api_key: str) -> str:
389
"""
390
Generate code suggestions for missing fields.
391
392
Args:
393
model_class: LinearModel subclass to analyze
394
api_key: Linear API key
395
396
Returns:
397
Code suggestions with field definitions and Python type mappings
398
"""
399
400
def compare_fields(model_class: type[LinearModel], api_key: str) -> Tuple[Set[str], Set[str], Set[str]]:
401
"""
402
Compare model fields vs GraphQL schema.
403
404
Args:
405
model_class: LinearModel subclass to compare
406
api_key: Linear API key
407
408
Returns:
409
Tuple of (common_fields, missing_fields, extra_fields)
410
"""
411
```
412
413
Usage examples:
414
415
```python
416
from linear_api.schema_validator import validate_all_models, validate_model, suggest_model_improvements
417
from linear_api import LinearIssue
418
419
# Validate all models
420
all_results = validate_all_models(api_key="your-api-key")
421
for model_name, results in all_results.items():
422
completeness = results.get('completeness', 0)
423
print(f"{model_name}: {completeness:.1%} complete")
424
425
# Validate specific model
426
issue_validation = validate_model(LinearIssue, api_key="your-api-key")
427
print(f"Issue model completeness: {issue_validation['completeness']:.1%}")
428
print(f"Missing fields: {issue_validation['missing_fields']}")
429
430
# Get improvement suggestions
431
suggestions = suggest_model_improvements(LinearIssue, api_key="your-api-key")
432
print("Suggested improvements:")
433
print(suggestions)
434
```
435
436
### GraphQL Introspection Helpers
437
438
```python { .api }
439
def introspect_type(client, type_name: str) -> Dict[str, Any]:
440
"""
441
Introspect GraphQL type structure.
442
443
Args:
444
client: LinearClient instance
445
type_name: GraphQL type name to introspect
446
447
Returns:
448
Field information and metadata
449
"""
450
451
def get_field_names(client, type_name: str) -> List[str]:
452
"""
453
Extract field names from GraphQL type.
454
455
Args:
456
client: LinearClient instance
457
type_name: GraphQL type name
458
459
Returns:
460
Simplified field name list
461
"""
462
463
def print_type_fields(client, type_name: str) -> None:
464
"""
465
Pretty-print type field information.
466
467
Args:
468
client: LinearClient instance
469
type_name: GraphQL type name
470
471
Includes:
472
Field descriptions and type information
473
"""
474
```
475
476
Usage examples:
477
478
```python
479
from linear_api.utils.introspection_helper import introspect_type, print_type_fields
480
481
# Introspect a GraphQL type
482
issue_schema = introspect_type(client, "Issue")
483
print(f"Issue type has {len(issue_schema['fields'])} fields")
484
485
# Print detailed field information
486
print_type_fields(client, "Issue")
487
488
# Get field names only
489
field_names = get_field_names(client, "Project")
490
print(f"Project fields: {field_names}")
491
```
492
493
## Advanced Utility Patterns
494
495
### Custom GraphQL Queries
496
497
```python
498
def get_issue_with_custom_fields(client, issue_id: str):
499
"""Get issue with specific field selection for performance."""
500
query = """
501
query GetIssueCustom($id: String!) {
502
issue(id: $id) {
503
id
504
title
505
description
506
priority
507
state { name type }
508
assignee { name email }
509
team { name key }
510
labels(first: 10) {
511
nodes { name color }
512
}
513
comments(first: 5) {
514
nodes {
515
body
516
createdAt
517
user { name }
518
}
519
}
520
}
521
}
522
"""
523
524
response = client.execute_graphql(query, {"id": issue_id})
525
return response['data']['issue']
526
527
# Use custom query
528
issue_data = get_issue_with_custom_fields(client, "issue-id")
529
print(f"Issue: {issue_data['title']}")
530
print(f"Comments: {len(issue_data['comments']['nodes'])}")
531
```
532
533
### Bulk Data Processing
534
535
```python
536
def bulk_process_issues(client, issue_ids: List[str]):
537
"""Process multiple issues efficiently with single query."""
538
# Build query for multiple issues
539
issue_queries = []
540
for i, issue_id in enumerate(issue_ids):
541
issue_queries.append(f"""
542
issue{i}: issue(id: "{issue_id}") {{
543
id
544
title
545
state {{ name }}
546
assignee {{ name }}
547
priority
548
}}
549
""")
550
551
query = f"query {{ {' '.join(issue_queries)} }}"
552
response = client.call_api(query)
553
554
# Process results
555
issues = []
556
for i in range(len(issue_ids)):
557
issue_data = response['data'][f'issue{i}']
558
if issue_data:
559
# Use process_issue_data for full model conversion
560
issues.append(process_issue_data(issue_data))
561
562
return issues
563
564
# Process multiple issues in single API call
565
issue_ids = ["issue-1", "issue-2", "issue-3"]
566
issues = bulk_process_issues(client, issue_ids)
567
```
568
569
### Error Handling Utilities
570
571
```python
572
def safe_api_call(client, query, variables=None, retries=3):
573
"""Make API call with retry logic and error handling."""
574
import time
575
576
for attempt in range(retries):
577
try:
578
if variables:
579
return client.execute_graphql(query, variables)
580
else:
581
return client.call_api(query)
582
except ValueError as e:
583
if "rate limit" in str(e).lower() and attempt < retries - 1:
584
# Exponential backoff for rate limiting
585
wait_time = 2 ** attempt
586
print(f"Rate limited, waiting {wait_time}s before retry...")
587
time.sleep(wait_time)
588
continue
589
else:
590
raise
591
592
raise ValueError(f"Failed after {retries} attempts")
593
594
# Use with error handling
595
try:
596
response = safe_api_call(client, "query { viewer { name } }")
597
print(f"User: {response['data']['viewer']['name']}")
598
except ValueError as e:
599
print(f"API call failed: {e}")
600
```
601
602
### Performance Profiling
603
604
```python
605
import time
606
from contextlib import contextmanager
607
608
@contextmanager
609
def profile_api_calls(client):
610
"""Profile API call performance and cache effectiveness."""
611
start_stats = client.cache.stats.copy()
612
start_time = time.time()
613
614
yield
615
616
end_time = time.time()
617
end_stats = client.cache.stats
618
619
duration = end_time - start_time
620
cache_hits = end_stats.get('hits', 0) - start_stats.get('hits', 0)
621
cache_misses = end_stats.get('misses', 0) - start_stats.get('misses', 0)
622
total_requests = cache_hits + cache_misses
623
624
print(f"Performance Profile:")
625
print(f" Duration: {duration:.2f}s")
626
print(f" Total requests: {total_requests}")
627
print(f" Cache hits: {cache_hits}")
628
print(f" Cache misses: {cache_misses}")
629
if total_requests > 0:
630
print(f" Hit rate: {cache_hits/total_requests:.1%}")
631
632
# Profile a set of operations
633
with profile_api_calls(client):
634
issues = client.issues.get_by_team("Engineering")
635
for issue in list(issues.values())[:5]:
636
comments = issue.comments # This may hit cache
637
```
638
639
### Data Export Utilities
640
641
```python
642
def export_team_data(client, team_name: str, format: str = "json"):
643
"""Export comprehensive team data for backup or analysis."""
644
team_id = client.teams.get_id_by_name(team_name)
645
team = client.teams.get(team_id)
646
647
# Collect all team data
648
export_data = {
649
"team": team.model_dump(),
650
"members": [m.model_dump() for m in client.teams.get_members(team_id)],
651
"states": [s.model_dump() for s in client.teams.get_states(team_id)],
652
"labels": [l.model_dump() for l in client.teams.get_labels(team_id)],
653
"issues": {k: v.model_dump() for k, v in client.issues.get_by_team(team_name).items()},
654
"projects": {k: v.model_dump() for k, v in client.projects.get_all(team_id=team_id).items()}
655
}
656
657
if format == "json":
658
import json
659
return json.dumps(export_data, indent=2, default=str)
660
elif format == "csv":
661
# Convert to CSV format for specific entities
662
import pandas as pd
663
issues_df = pd.DataFrame([issue for issue in export_data["issues"].values()])
664
return issues_df.to_csv(index=False)
665
666
# Export team data
667
team_json = export_team_data(client, "Engineering", "json")
668
print(f"Exported {len(team_json)} characters of team data")
669
```