or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

core-task-queue.mdexception-handling.mdindex.mdlocking-concurrency.mdresult-management.mdscheduling.mdstorage-backends.mdtask-lifecycle.md

exception-handling.mddocs/

0

# Exception Handling

1

2

Exception classes for task control flow, error handling, and task execution management. These exceptions provide fine-grained control over task behavior and enable sophisticated error handling patterns.

3

4

## Capabilities

5

6

### Task Control Exceptions

7

8

Exceptions that control task execution flow and retry behavior.

9

10

```python { .api }

11

class CancelExecution(Exception):

12

"""

13

Cancel task execution with optional retry control.

14

15

When raised during task execution:

16

- Task is marked as canceled

17

- Can optionally schedule for retry

18

- Pre-execute hooks can raise this to prevent execution

19

- Post-execute hooks receive this as the exception parameter

20

"""

21

22

def __init__(self, retry=None, *args, **kwargs):

23

"""

24

Initialize CancelExecution.

25

26

Parameters:

27

- retry (bool): Whether to retry the task (optional)

28

None: Use task's default retry setting

29

True: Force retry even if retries=0

30

False: Don't retry even if retries>0

31

- *args, **kwargs: Additional exception arguments

32

"""

33

34

class RetryTask(Exception):

35

"""

36

Request task retry with optional timing control.

37

38

When raised during task execution:

39

- Task retry counter is incremented

40

- Task can be scheduled for immediate or delayed retry

41

- Overrides default retry delay if eta or delay specified

42

"""

43

44

def __init__(self, msg=None, eta=None, delay=None, *args, **kwargs):

45

"""

46

Initialize RetryTask.

47

48

Parameters:

49

- msg (str): Error message (optional)

50

- eta (datetime): Specific time to retry (optional)

51

- delay (int/float/timedelta): Delay before retry (optional)

52

- *args, **kwargs: Additional exception arguments

53

"""

54

```

55

56

### Task Execution Exceptions

57

58

Exceptions related to task execution failures and constraints.

59

60

```python { .api }

61

class TaskException(Exception):

62

"""

63

Exception wrapper for failed task results.

64

65

When a task fails and results are retrieved, the original exception

66

is wrapped in TaskException with additional metadata.

67

"""

68

69

def __init__(self, metadata=None, *args):

70

"""

71

Initialize TaskException.

72

73

Parameters:

74

- metadata (dict): Exception metadata including:

75

- 'error': String representation of original error

76

- 'traceback': Full traceback string

77

- 'task_id': ID of failed task

78

- 'retries': Number of retries attempted

79

- *args: Additional exception arguments

80

"""

81

82

class TaskLockedException(HueyException):

83

"""

84

Exception raised when task cannot acquire required lock.

85

86

Raised when:

87

- Task decorated with @lock cannot acquire lock

88

- Context manager lock acquisition fails

89

- Lock is already held by another worker

90

"""

91

92

class ResultTimeout(HueyException):

93

"""

94

Exception raised when result retrieval times out.

95

96

Raised when:

97

- Result.get(blocking=True, timeout=X) exceeds timeout

98

- Task takes longer than specified timeout to complete

99

"""

100

```

101

102

### Base Exception Classes

103

104

Foundation exception classes for Huey error hierarchy.

105

106

```python { .api }

107

class HueyException(Exception):

108

"""

109

Base exception class for all Huey-specific exceptions.

110

111

All Huey exceptions inherit from this class, allowing

112

for broad exception handling when needed.

113

"""

114

115

class ConfigurationError(HueyException):

116

"""

117

Exception raised for configuration-related errors.

118

119

Raised when:

120

- Invalid storage backend configuration

121

- Missing required dependencies

122

- Invalid consumer options

123

- Incompatible parameter combinations

124

"""

125

```

126

127

## Usage Examples

128

129

### Task Retry Control

130

131

```python

132

from huey import RedisHuey

133

from huey.exceptions import RetryTask, CancelExecution

134

import random

135

import datetime

136

137

huey = RedisHuey('exception-app')

138

139

@huey.task(retries=3, retry_delay=60)

140

def unreliable_api_call(endpoint):

141

"""Task that might fail and needs intelligent retry logic."""

142

143

# Simulate API call

144

if random.random() < 0.3: # 30% failure rate

145

# Different retry strategies based on error type

146

error_type = random.choice(['network', 'rate_limit', 'server_error'])

147

148

if error_type == 'network':

149

# Network errors: retry immediately

150

raise RetryTask("Network error - retrying immediately")

151

152

elif error_type == 'rate_limit':

153

# Rate limit: wait 5 minutes

154

raise RetryTask("Rate limited", delay=300)

155

156

elif error_type == 'server_error':

157

# Server error: retry at specific time

158

retry_time = datetime.datetime.now() + datetime.timedelta(minutes=10)

159

raise RetryTask("Server error", eta=retry_time)

160

161

return f"Successfully called {endpoint}"

162

163

# Task will retry with different strategies based on error type

164

result = unreliable_api_call('/api/users')

165

```

166

167

### Conditional Task Cancellation

168

169

```python

170

@huey.task()

171

def conditional_task(user_id, action):

172

"""Task that may be canceled based on conditions."""

173

174

# Check if user is still active

175

if not is_user_active(user_id):

176

# Cancel without retry

177

raise CancelExecution(retry=False, "User is no longer active")

178

179

# Check system maintenance

180

if is_maintenance_mode():

181

# Cancel but retry after maintenance

182

raise CancelExecution(retry=True, "System in maintenance mode")

183

184

# Proceed with task

185

return perform_action(user_id, action)

186

187

@huey.pre_execute()

188

def check_global_conditions(task):

189

"""Pre-execution hook that can cancel tasks."""

190

if is_emergency_mode():

191

raise CancelExecution(retry=True, "Emergency mode active")

192

```

193

194

### Exception Handling in Results

195

196

```python

197

from huey.exceptions import TaskException, ResultTimeout

198

199

@huey.task()

200

def risky_calculation(data):

201

"""Task that might fail with various errors."""

202

if data < 0:

203

raise ValueError("Negative values not supported")

204

elif data > 1000:

205

raise OverflowError("Value too large")

206

else:

207

return data ** 2

208

209

# Handle exceptions when retrieving results

210

result = risky_calculation(-5)

211

212

try:

213

value = result.get(blocking=True, timeout=30)

214

print(f"Result: {value}")

215

except TaskException as e:

216

# Access original exception details

217

error_info = e.metadata

218

print(f"Task failed: {error_info['error']}")

219

print(f"Task ID: {error_info['task_id']}")

220

print(f"Retries attempted: {error_info['retries']}")

221

print(f"Traceback: {error_info['traceback']}")

222

except ResultTimeout:

223

print("Task took too long to complete")

224

```

225

226

### Lock Exception Handling

227

228

```python

229

from huey.exceptions import TaskLockedException

230

231

@huey.task()

232

def resource_processor(resource_id):

233

"""Process resource with lock handling."""

234

lock_name = f'resource_{resource_id}'

235

236

try:

237

with huey.lock_task(lock_name):

238

# Process the resource

239

return process_resource(resource_id)

240

except TaskLockedException:

241

# Resource is locked, decide what to do

242

if is_urgent_processing(resource_id):

243

# For urgent tasks, retry after short delay

244

raise RetryTask("Resource locked, retrying soon", delay=30)

245

else:

246

# For normal tasks, cancel without retry

247

raise CancelExecution(

248

retry=False,

249

f"Resource {resource_id} is busy"

250

)

251

```

252

253

### Complex Error Handling Patterns

254

255

```python

256

import logging

257

from huey.exceptions import *

258

259

logger = logging.getLogger('task_errors')

260

261

@huey.task(retries=5, retry_delay=120)

262

def complex_data_processing(data_id):

263

"""Task with sophisticated error handling."""

264

265

try:

266

# Load data

267

data = load_data(data_id)

268

if not data:

269

# No point retrying if data doesn't exist

270

raise CancelExecution(

271

retry=False,

272

f"Data {data_id} not found"

273

)

274

275

# Validate data

276

if not validate_data(data):

277

# Data validation failed, might be temporary

278

raise RetryTask(

279

"Data validation failed",

280

delay=300 # Wait 5 minutes for data to be fixed

281

)

282

283

# Check system resources

284

if not has_sufficient_resources():

285

# Resource constraint, retry during off-peak hours

286

off_peak = datetime.datetime.now().replace(

287

hour=2, minute=0, second=0, microsecond=0

288

)

289

if off_peak <= datetime.datetime.now():

290

off_peak += datetime.timedelta(days=1)

291

292

raise RetryTask(

293

"Insufficient resources",

294

eta=off_peak

295

)

296

297

# Process data

298

result = process_data(data)

299

return result

300

301

except ConnectionError as e:

302

# Network issues: exponential backoff

303

logger.warning(f"Connection error in task {data_id}: {e}")

304

raise RetryTask("Connection error", delay=120)

305

306

except PermissionError as e:

307

# Permission issues: don't retry

308

logger.error(f"Permission error in task {data_id}: {e}")

309

raise CancelExecution(

310

retry=False,

311

"Insufficient permissions"

312

)

313

314

except Exception as e:

315

# Unexpected errors: log and retry with increasing delay

316

logger.exception(f"Unexpected error in task {data_id}")

317

318

# Calculate exponential backoff based on retry count

319

# (Note: in real implementation, you'd track retry count)

320

delay = min(300 * (2 ** 0), 3600) # Cap at 1 hour

321

raise RetryTask(f"Unexpected error: {e}", delay=delay)

322

```

323

324

### Global Exception Handling

325

326

```python

327

@huey.post_execute()

328

def global_error_handler(task, task_value, exception):

329

"""Global exception handler for all tasks."""

330

331

if exception is None:

332

# Task succeeded

333

logger.info(f"Task {task.name} completed successfully")

334

return

335

336

# Log all exceptions

337

logger.error(f"Task {task.name} failed: {exception}")

338

339

# Handle specific exception types

340

if isinstance(exception, TaskLockedException):

341

# Track lock contention

342

track_lock_contention(task.name)

343

344

elif isinstance(exception, (RetryTask, CancelExecution)):

345

# These are handled by Huey automatically

346

logger.info(f"Task {task.name} control exception: {exception}")

347

348

else:

349

# Unexpected exceptions

350

send_error_alert(task, exception)

351

352

@huey.signal(S.SIGNAL_ERROR)

353

def error_signal_handler(signal, task, exception):

354

"""Handle error signals."""

355

logger.error(f"Signal {signal}: Task {task.id} error: {exception}")

356

357

# Could implement:

358

# - Error metrics collection

359

# - Alert systems

360

# - Automatic task rescheduling

361

# - Dead letter queue

362

```

363

364

### Exception-based Task Routing

365

366

```python

367

@huey.task()

368

def smart_processor(data, task_type='normal'):

369

"""Task that routes to different handlers based on exceptions."""

370

371

try:

372

if task_type == 'critical':

373

return critical_processing(data)

374

else:

375

return normal_processing(data)

376

377

except ResourceBusyError:

378

# Route to different queue/priority

379

if task_type == 'critical':

380

# High priority retry

381

raise RetryTask("Resource busy", delay=10)

382

else:

383

# Low priority, longer delay

384

raise RetryTask("Resource busy", delay=300)

385

386

except DataCorruptionError:

387

# Try to repair and retry

388

try:

389

repair_data(data)

390

raise RetryTask("Data repaired, retrying", delay=60)

391

except RepairFailedError:

392

# Can't repair, send to manual review

393

send_to_manual_review(data)

394

raise CancelExecution(

395

retry=False,

396

"Data corruption - sent for manual review"

397

)

398

399

# Different task types with different error handling

400

critical_result = smart_processor(data, 'critical')

401

normal_result = smart_processor(data, 'normal')

402

```