or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

compute-resources.mdcontainer-images.mdcore-application-client.mdfunction-decorators-helpers.mdindex.mdinfrastructure-services.mdruntime-utilities.mdscheduling-reliability.mdstorage-data.mdutility-classes.mdweb-api-integration.md

runtime-utilities.mddocs/

0

# Runtime Utilities

1

2

Modal provides essential runtime utilities for execution context awareness, debugging, output control, and network tunneling within Modal functions. These utilities enable functions to understand their execution environment and provide enhanced debugging and development capabilities.

3

4

## Capabilities

5

6

### Execution Context Functions

7

8

Functions for understanding the current execution context and accessing runtime information.

9

10

```python { .api }

11

def current_function_call_id() -> Optional[str]:

12

"""Get the ID of the currently executing function call"""

13

14

def current_input_id() -> Optional[str]:

15

"""Get the ID of the current function input"""

16

17

def is_local() -> bool:

18

"""Check if code is running locally vs in Modal cloud"""

19

```

20

21

#### Usage Examples

22

23

```python

24

import modal

25

26

app = modal.App("context-aware")

27

28

@app.function()

29

def context_aware_function(data: str):

30

"""Function that uses runtime context information"""

31

32

# Check execution environment

33

if modal.is_local():

34

print("Running locally - using development configuration")

35

debug_mode = True

36

log_level = "DEBUG"

37

else:

38

print("Running in Modal cloud - using production configuration")

39

debug_mode = False

40

log_level = "INFO"

41

42

# Get runtime identifiers

43

call_id = modal.current_function_call_id()

44

input_id = modal.current_input_id()

45

46

print(f"Function call ID: {call_id}")

47

print(f"Input ID: {input_id}")

48

49

# Use context for logging and tracing

50

log_entry = {

51

"timestamp": time.time(),

52

"function_call_id": call_id,

53

"input_id": input_id,

54

"environment": "local" if modal.is_local() else "cloud",

55

"data": data,

56

"debug_mode": debug_mode

57

}

58

59

# Process data with context-aware behavior

60

if debug_mode:

61

result = process_with_detailed_logging(data, log_entry)

62

else:

63

result = process_efficiently(data, log_entry)

64

65

return {

66

"result": result,

67

"call_id": call_id,

68

"input_id": input_id,

69

"environment": "local" if modal.is_local() else "cloud"

70

}

71

72

@app.local_entrypoint()

73

def main():

74

# Test locally

75

local_result = context_aware_function("local_test_data")

76

print("Local result:", local_result)

77

78

# Test in cloud

79

cloud_result = context_aware_function.remote("cloud_test_data")

80

print("Cloud result:", cloud_result)

81

```

82

83

### Interactive Debugging

84

85

Function for enabling interactive debugging sessions within Modal containers.

86

87

```python { .api }

88

def interact() -> None:

89

"""Start an interactive debugging session (only works in Modal containers)"""

90

```

91

92

#### Usage Examples

93

94

```python

95

import modal

96

97

app = modal.App("debugging")

98

99

@app.function()

100

def debug_function(data: dict):

101

"""Function with interactive debugging capabilities"""

102

103

print("Starting data processing...")

104

105

# Process first part

106

intermediate_result = preprocess_data(data)

107

108

# Interactive debugging point

109

if should_debug(data):

110

print("Entering interactive debugging mode...")

111

print("Available variables: data, intermediate_result")

112

print("Type 'continue' or Ctrl+D to exit debugging")

113

114

# This opens an interactive Python shell inside the container

115

modal.interact()

116

117

# Continue processing after debugging

118

final_result = postprocess_data(intermediate_result)

119

120

return final_result

121

122

def should_debug(data: dict) -> bool:

123

"""Determine if debugging should be enabled"""

124

# Enable debugging for specific conditions

125

return (

126

data.get("debug_flag", False) or

127

data.get("complexity_score", 0) > 0.8 or

128

"error_prone_input" in str(data)

129

)

130

131

@app.local_entrypoint()

132

def main():

133

# Normal execution

134

result1 = debug_function.remote({"value": 42})

135

print("Normal result:", result1)

136

137

# Execution with debugging

138

result2 = debug_function.remote({

139

"value": 42,

140

"debug_flag": True,

141

"complex_data": [1, 2, 3, 4, 5]

142

})

143

print("Debug result:", result2)

144

```

145

146

### Output Control

147

148

Function for controlling output display and progress indicators during Modal function execution.

149

150

```python { .api }

151

def enable_output(show_progress: bool = True) -> None:

152

"""Enable output streaming for functions (context manager)"""

153

```

154

155

#### Usage Examples

156

157

```python

158

import modal

159

160

app = modal.App("output-control")

161

162

@app.function()

163

def processing_function(items: list[str]) -> list[str]:

164

"""Function that processes items with output"""

165

results = []

166

167

print("Starting batch processing...")

168

169

for i, item in enumerate(items):

170

# Print progress

171

print(f"Processing item {i+1}/{len(items)}: {item}")

172

173

# Simulate processing

174

result = expensive_operation(item)

175

results.append(result)

176

177

# Periodic status updates

178

if (i + 1) % 10 == 0:

179

print(f"Completed {i+1} items, {len(items) - i - 1} remaining")

180

181

print("Batch processing completed!")

182

return results

183

184

@app.function()

185

def map_processing_function(item: str) -> str:

186

"""Function for use with map() that shows progress"""

187

print(f"Processing: {item}")

188

result = complex_processing(item)

189

print(f"Completed: {item} -> {result}")

190

return result

191

192

@app.local_entrypoint()

193

def main():

194

# Enable output to see function logs and progress

195

with modal.enable_output(show_progress=True):

196

197

# Single function with output

198

items = [f"item_{i}" for i in range(50)]

199

batch_result = processing_function.remote(items)

200

print(f"Batch processing completed: {len(batch_result)} results")

201

202

# Map with progress tracking

203

map_items = [f"map_item_{i}" for i in range(20)]

204

map_results = list(map_processing_function.map(map_items))

205

print(f"Map processing completed: {len(map_results)} results")

206

207

# Alternative: Enable output for specific operations

208

@app.local_entrypoint()

209

def selective_output():

210

"""Enable output only for specific operations"""

211

212

# Normal execution (no output)

213

quiet_result = processing_function.remote([f"quiet_{i}" for i in range(5)])

214

215

# With output enabled

216

with modal.enable_output():

217

verbose_result = processing_function.remote([f"verbose_{i}" for i in range(5)])

218

219

print("Selective output demo completed")

220

```

221

222

### Network Tunneling

223

224

Function for forwarding network traffic through tunnels, enabling secure connections to remote services.

225

226

```python { .api }

227

def forward(port: int, *, host: str = "localhost") -> str:

228

"""Forward network traffic through tunnel"""

229

```

230

231

#### Usage Examples

232

233

```python

234

import modal

235

236

app = modal.App("network-tunneling")

237

238

@app.function()

239

def tunnel_database_connection():

240

"""Connect to database through network tunnel"""

241

242

# Forward database port through tunnel

243

tunnel_url = modal.forward(5432, host="internal-database.company.com")

244

245

print(f"Database tunnel established: {tunnel_url}")

246

247

# Connect to database through tunnel

248

import psycopg2

249

connection = psycopg2.connect(

250

host=tunnel_url.split("://")[1].split(":")[0],

251

port=int(tunnel_url.split(":")[-1]),

252

database="production_db",

253

user="app_user",

254

password=os.environ["DB_PASSWORD"]

255

)

256

257

# Use the connection

258

cursor = connection.cursor()

259

cursor.execute("SELECT COUNT(*) FROM users")

260

count = cursor.fetchone()[0]

261

262

connection.close()

263

return {"user_count": count, "tunnel_url": tunnel_url}

264

265

@app.function()

266

def tunnel_api_access():

267

"""Access internal API through tunnel"""

268

269

# Forward API port

270

api_tunnel = modal.forward(8080, host="internal-api.company.com")

271

272

print(f"API tunnel established: {api_tunnel}")

273

274

# Make requests through tunnel

275

import requests

276

response = requests.get(f"{api_tunnel}/api/v1/data")

277

278

return {

279

"api_response": response.json(),

280

"status_code": response.status_code,

281

"tunnel_url": api_tunnel

282

}

283

284

@app.function()

285

def multi_service_tunneling():

286

"""Connect to multiple services through tunnels"""

287

288

# Set up multiple tunnels

289

database_tunnel = modal.forward(5432, host="db.internal")

290

redis_tunnel = modal.forward(6379, host="redis.internal")

291

elasticsearch_tunnel = modal.forward(9200, host="es.internal")

292

293

services = {

294

"database": database_tunnel,

295

"redis": redis_tunnel,

296

"elasticsearch": elasticsearch_tunnel

297

}

298

299

print("All tunnels established:")

300

for service, url in services.items():

301

print(f" {service}: {url}")

302

303

# Use services through tunnels

304

results = {}

305

306

# Database query

307

db_result = query_database_through_tunnel(database_tunnel)

308

results["database"] = db_result

309

310

# Redis operation

311

redis_result = access_redis_through_tunnel(redis_tunnel)

312

results["redis"] = redis_result

313

314

# Elasticsearch search

315

es_result = search_elasticsearch_through_tunnel(elasticsearch_tunnel)

316

results["elasticsearch"] = es_result

317

318

return {

319

"results": results,

320

"tunnels": services

321

}

322

323

def query_database_through_tunnel(tunnel_url: str) -> dict:

324

"""Helper function to query database through tunnel"""

325

# Extract host and port from tunnel URL

326

host = tunnel_url.split("://")[1].split(":")[0]

327

port = int(tunnel_url.split(":")[-1])

328

329

# Connect and query

330

connection = create_db_connection(host, port)

331

cursor = connection.cursor()

332

cursor.execute("SELECT version()")

333

version = cursor.fetchone()[0]

334

connection.close()

335

336

return {"version": version, "status": "connected"}

337

338

def access_redis_through_tunnel(tunnel_url: str) -> dict:

339

"""Helper function to access Redis through tunnel"""

340

import redis

341

342

host = tunnel_url.split("://")[1].split(":")[0]

343

port = int(tunnel_url.split(":")[-1])

344

345

client = redis.Redis(host=host, port=port)

346

client.set("tunnel_test", "success")

347

value = client.get("tunnel_test").decode()

348

349

return {"test_value": value, "status": "connected"}

350

351

@app.local_entrypoint()

352

def main():

353

# Test database tunnel

354

db_result = tunnel_database_connection.remote()

355

print("Database tunnel result:", db_result)

356

357

# Test API tunnel

358

api_result = tunnel_api_access.remote()

359

print("API tunnel result:", api_result)

360

361

# Test multiple tunnels

362

multi_result = multi_service_tunneling.remote()

363

print("Multi-service tunnel result:", multi_result)

364

```

365

366

## Advanced Runtime Patterns

367

368

### Context-Aware Error Handling

369

370

```python

371

import modal

372

373

app = modal.App("context-error-handling")

374

375

@app.function()

376

def robust_function(data: dict):

377

"""Function with context-aware error handling"""

378

379

call_id = modal.current_function_call_id()

380

input_id = modal.current_input_id()

381

is_local_env = modal.is_local()

382

383

error_context = {

384

"call_id": call_id,

385

"input_id": input_id,

386

"environment": "local" if is_local_env else "cloud",

387

"timestamp": time.time()

388

}

389

390

try:

391

result = risky_operation(data)

392

return {"success": True, "result": result, "context": error_context}

393

394

except Exception as e:

395

# Enhanced error information with runtime context

396

error_info = {

397

"error": str(e),

398

"error_type": type(e).__name__,

399

"context": error_context,

400

"data_summary": {

401

"size": len(str(data)),

402

"keys": list(data.keys()) if isinstance(data, dict) else None

403

}

404

}

405

406

# Different handling based on environment

407

if is_local_env:

408

print("Local error - entering debug mode")

409

print(f"Error context: {error_info}")

410

modal.interact() # Interactive debugging in local mode

411

else:

412

print("Cloud error - logging and continuing")

413

log_error_to_monitoring_system(error_info)

414

415

return {"success": False, "error": error_info}

416

```

417

418

### Distributed Tracing with Context

419

420

```python

421

import modal

422

423

app = modal.App("distributed-tracing")

424

425

# Shared tracing storage

426

trace_store = modal.Dict.persist("trace-data")

427

428

@app.function()

429

def traced_function_a(data: str):

430

"""First function in trace chain"""

431

432

call_id = modal.current_function_call_id()

433

trace_id = f"trace_{int(time.time())}"

434

435

# Start trace

436

trace_entry = {

437

"trace_id": trace_id,

438

"function": "traced_function_a",

439

"call_id": call_id,

440

"start_time": time.time(),

441

"input_data": data

442

}

443

444

print(f"Starting trace {trace_id} in function A")

445

446

# Process data

447

result_a = process_step_a(data)

448

449

# Call next function with trace context

450

result_b = traced_function_b.remote(result_a, trace_id)

451

452

# Complete trace entry

453

trace_entry.update({

454

"end_time": time.time(),

455

"result": "completed",

456

"next_call": "traced_function_b"

457

})

458

459

# Store trace data

460

trace_store[f"{trace_id}_function_a"] = trace_entry

461

462

return {"result": result_b, "trace_id": trace_id}

463

464

@app.function()

465

def traced_function_b(data: str, trace_id: str):

466

"""Second function in trace chain"""

467

468

call_id = modal.current_function_call_id()

469

470

trace_entry = {

471

"trace_id": trace_id,

472

"function": "traced_function_b",

473

"call_id": call_id,

474

"start_time": time.time(),

475

"input_data": data

476

}

477

478

print(f"Continuing trace {trace_id} in function B")

479

480

# Process data

481

result = process_step_b(data)

482

483

# Complete trace

484

trace_entry.update({

485

"end_time": time.time(),

486

"result": result

487

})

488

489

trace_store[f"{trace_id}_function_b"] = trace_entry

490

491

return result

492

493

@app.function()

494

def get_trace_summary(trace_id: str):

495

"""Get complete trace summary"""

496

497

# Collect all trace entries

498

trace_entries = {}

499

for key in trace_store.keys():

500

if key.startswith(trace_id):

501

trace_entries[key] = trace_store.get(key)

502

503

# Calculate trace metrics

504

start_times = [entry["start_time"] for entry in trace_entries.values()]

505

end_times = [entry["end_time"] for entry in trace_entries.values()]

506

507

total_duration = max(end_times) - min(start_times)

508

509

return {

510

"trace_id": trace_id,

511

"total_duration": total_duration,

512

"function_count": len(trace_entries),

513

"entries": trace_entries

514

}

515

516

@app.local_entrypoint()

517

def main():

518

with modal.enable_output():

519

# Execute traced function chain

520

result = traced_function_a.remote("test_data")

521

522

# Get trace summary

523

trace_summary = get_trace_summary.remote(result["trace_id"])

524

print("Trace summary:", trace_summary)

525

```

526

527

### Development vs Production Runtime

528

529

```python

530

import modal

531

532

app = modal.App("environment-aware")

533

534

@app.function()

535

def environment_aware_processing(data: dict):

536

"""Function that adapts behavior based on runtime environment"""

537

538

is_local_env = modal.is_local()

539

call_id = modal.current_function_call_id()

540

541

# Environment-specific configuration

542

if is_local_env:

543

config = {

544

"timeout": 30,

545

"retries": 1,

546

"logging_level": "DEBUG",

547

"use_cache": False,

548

"external_apis": "staging"

549

}

550

print("Using development configuration")

551

else:

552

config = {

553

"timeout": 300,

554

"retries": 3,

555

"logging_level": "INFO",

556

"use_cache": True,

557

"external_apis": "production"

558

}

559

print("Using production configuration")

560

561

print(f"Processing with config: {config}")

562

print(f"Call ID: {call_id}")

563

564

# Adaptive processing based on environment

565

try:

566

if config["use_cache"]:

567

# Check cache first in production

568

cached_result = check_cache(data)

569

if cached_result:

570

return {"result": cached_result, "source": "cache", "config": config}

571

572

# Process with environment-specific settings

573

result = process_with_config(data, config)

574

575

if config["use_cache"]:

576

# Cache result in production

577

store_in_cache(data, result)

578

579

return {"result": result, "source": "computed", "config": config}

580

581

except Exception as e:

582

error_info = {

583

"error": str(e),

584

"environment": "local" if is_local_env else "production",

585

"call_id": call_id,

586

"config": config

587

}

588

589

if is_local_env:

590

# More verbose error handling in development

591

print(f"Development error: {error_info}")

592

modal.interact() # Debug interactively

593

594

return {"error": error_info, "success": False}

595

596

def process_with_config(data: dict, config: dict):

597

"""Process data with environment-specific configuration"""

598

599

# Simulate processing with different timeouts

600

import time

601

processing_time = min(config["timeout"] / 10, 5) # Simulate work

602

time.sleep(processing_time)

603

604

# Different API endpoints based on environment

605

api_base = f"https://api-{config['external_apis']}.example.com"

606

607

return {

608

"processed_data": f"processed_{data}",

609

"api_used": api_base,

610

"processing_time": processing_time

611

}

612

613

@app.local_entrypoint()

614

def main():

615

test_data = {"input": "test_value", "timestamp": time.time()}

616

617

with modal.enable_output():

618

# This will use development config when run locally

619

local_result = environment_aware_processing(test_data)

620

print("Local result:", local_result)

621

622

# This will use production config when run in Modal cloud

623

cloud_result = environment_aware_processing.remote(test_data)

624

print("Cloud result:", cloud_result)

625

```