or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

advanced.mdcapture.mdconfiguration.mdcontrols.mdcore-operations.mdindex.mdpreview.mdrecording.md

advanced.mddocs/

0

# Advanced Features

1

2

Advanced functionality including mode switching, autofocus control, frame dropping, device-specific AI sensor integration, and platform-specific optimizations. These features enable sophisticated camera applications and integration with specialized hardware.

3

4

## Capabilities

5

6

### Mode Switching

7

8

Dynamic configuration changes without stopping the camera system.

9

10

```python { .api }

11

def switch_mode(

12

self,

13

camera_config: CameraConfiguration,

14

wait: bool = True,

15

signal_function: callable = None

16

):

17

"""

18

Switch camera configuration dynamically.

19

20

Parameters:

21

- camera_config: CameraConfiguration, new configuration to apply

22

- wait: bool, wait for switch completion

23

- signal_function: callable, completion callback

24

25

Returns:

26

Job object if wait=False, None if wait=True

27

"""

28

29

def switch_mode_and_drop_frames(

30

self,

31

camera_config: CameraConfiguration,

32

num_frames: int,

33

wait: bool = True,

34

signal_function: callable = None

35

):

36

"""

37

Switch mode and drop initial frames for settling.

38

39

Parameters:

40

- camera_config: CameraConfiguration, new configuration

41

- num_frames: int, number of frames to drop after switch

42

- wait: bool, wait for completion

43

- signal_function: callable, completion callback

44

"""

45

```

46

47

### Combined Mode Switch and Capture

48

49

Atomic operations that switch mode and immediately capture.

50

51

```python { .api }

52

def switch_mode_and_capture_file(

53

self,

54

camera_config: CameraConfiguration,

55

file_output: str,

56

name: str = "main",

57

format: str = None,

58

wait: bool = True,

59

signal_function: callable = None,

60

exif_data: dict = None

61

):

62

"""

63

Switch mode and capture file atomically.

64

65

Parameters:

66

- camera_config: CameraConfiguration, new configuration

67

- file_output: str, output file path

68

- name: str, stream name to capture

69

- format: str, output format

70

- wait: bool, wait for completion

71

- signal_function: callable, completion callback

72

- exif_data: dict, EXIF metadata

73

"""

74

75

def switch_mode_and_capture_array(

76

self,

77

camera_config: CameraConfiguration,

78

name: str = "main",

79

wait: bool = True,

80

signal_function: callable = None,

81

delay: float = None

82

) -> np.ndarray:

83

"""

84

Switch mode and capture array atomically.

85

86

Parameters:

87

- camera_config: CameraConfiguration, new configuration

88

- name: str, stream name to capture

89

- wait: bool, wait for completion

90

- signal_function: callable, completion callback

91

- delay: float, delay before capture

92

93

Returns:

94

np.ndarray: Captured image array

95

"""

96

97

def switch_mode_and_capture_request(

98

self,

99

camera_config: CameraConfiguration,

100

wait: bool = True,

101

signal_function: callable = None,

102

delay: float = None

103

) -> CompletedRequest:

104

"""

105

Switch mode and capture complete request atomically.

106

107

Parameters:

108

- camera_config: CameraConfiguration, new configuration

109

- wait: bool, wait for completion

110

- signal_function: callable, completion callback

111

- delay: float, delay before capture

112

113

Returns:

114

CompletedRequest: Complete request with all streams

115

"""

116

```

117

118

### Frame Management

119

120

Control frame dropping and camera timing.

121

122

```python { .api }

123

def drop_frames(

124

self,

125

num_frames: int,

126

wait: bool = True,

127

signal_function: callable = None

128

):

129

"""

130

Drop specified number of frames from stream.

131

132

Parameters:

133

- num_frames: int, number of frames to drop

134

- wait: bool, wait for completion

135

- signal_function: callable, completion callback

136

137

Useful for allowing camera settings to settle after changes.

138

"""

139

```

140

141

### Autofocus Control

142

143

Advanced autofocus operations and control.

144

145

```python { .api }

146

def autofocus_cycle(

147

self,

148

wait: bool = True,

149

signal_function: callable = None

150

):

151

"""

152

Trigger complete autofocus cycle.

153

154

Parameters:

155

- wait: bool, wait for focus completion

156

- signal_function: callable, completion callback

157

158

Performs full autofocus scan and locks focus at optimal position.

159

"""

160

```

161

162

### Asynchronous Job Management

163

164

Control and synchronization of asynchronous operations.

165

166

```python { .api }

167

def wait(self, job, timeout: float = None):

168

"""

169

Wait for job completion.

170

171

Parameters:

172

- job: Job object from async operation

173

- timeout: float, timeout in seconds (None = infinite)

174

175

Returns:

176

Job result

177

178

Raises:

179

- TimeoutError: If timeout exceeded

180

"""

181

182

def dispatch_functions(

183

self,

184

functions: list[callable],

185

wait: bool = True,

186

signal_function: callable = None,

187

immediate: bool = False

188

):

189

"""

190

Dispatch list of functions for execution.

191

192

Parameters:

193

- functions: list of callable functions

194

- wait: bool, wait for all functions to complete

195

- signal_function: callable, completion callback

196

- immediate: bool, execute immediately vs queued

197

"""

198

199

class Job:

200

"""Asynchronous job handle."""

201

202

def execute(self):

203

"""Execute the job."""

204

205

def signal(self):

206

"""Signal job completion."""

207

208

def wait(self, timeout: float = None):

209

"""

210

Wait for job completion.

211

212

Parameters:

213

- timeout: float, timeout in seconds

214

215

Returns:

216

Job result

217

"""

218

219

def result(self):

220

"""Get job result (non-blocking)."""

221

222

@property

223

def finished(self) -> bool:

224

"""Whether job is finished."""

225

```

226

227

### Device-Specific Integration

228

229

Integration with specialized camera sensors and AI accelerators.

230

231

```python { .api }

232

# IMX500 AI Sensor

233

class IMX500:

234

"""IMX500 AI sensor integration."""

235

236

def __init__(self, picam2: Picamera2):

237

"""Initialize IMX500 with camera instance."""

238

239

def set_model(self, model_path: str):

240

"""Load AI model for on-sensor processing."""

241

242

def get_network_intrinsics(self) -> NetworkIntrinsics:

243

"""Get network intrinsics for model."""

244

245

class NetworkIntrinsics:

246

"""Neural network intrinsics for IMX500."""

247

248

input_width: int

249

input_height: int

250

input_channels: int

251

output_tensors: list

252

253

# Postprocessing functions for common AI models

254

def postprocess_efficientdet_lite0_detection(

255

outputs: list,

256

network_intrinsics: NetworkIntrinsics,

257

threshold: float = 0.5

258

) -> list:

259

"""

260

Postprocess EfficientDet-Lite0 detection outputs.

261

262

Parameters:

263

- outputs: list, raw model outputs

264

- network_intrinsics: NetworkIntrinsics, model metadata

265

- threshold: float, confidence threshold

266

267

Returns:

268

list: Detected objects with bounding boxes and scores

269

"""

270

271

def postprocess_yolov5_detection(

272

outputs: list,

273

network_intrinsics: NetworkIntrinsics,

274

threshold: float = 0.5

275

) -> list:

276

"""Postprocess YOLOv5 detection outputs."""

277

278

def postprocess_yolov8_detection(

279

outputs: list,

280

network_intrinsics: NetworkIntrinsics,

281

threshold: float = 0.5

282

) -> list:

283

"""Postprocess YOLOv8 detection outputs."""

284

285

# Hailo AI Accelerator

286

class Hailo:

287

"""Hailo AI accelerator integration."""

288

289

def __init__(self):

290

"""Initialize Hailo accelerator."""

291

292

def load_model(self, model_path: str):

293

"""Load model onto Hailo accelerator."""

294

295

def run_inference(self, input_data: np.ndarray) -> list:

296

"""Run inference on accelerator."""

297

```

298

299

### Platform-Specific Features

300

301

Platform detection and optimization.

302

303

```python { .api }

304

class Platform(Enum):

305

"""Platform types."""

306

VC4 = "vc4" # Raspberry Pi 4 and earlier

307

PISP = "pisp" # Raspberry Pi 5 and newer

308

309

def get_platform() -> Platform:

310

"""

311

Detect current platform.

312

313

Returns:

314

Platform: Current platform type

315

"""

316

317

# Platform-specific tuning and configuration

318

def load_tuning_file(tuning_file: str, dir: str = None) -> dict:

319

"""

320

Load camera tuning file.

321

322

Parameters:

323

- tuning_file: str, tuning file name

324

- dir: str, directory to search (None = default paths)

325

326

Returns:

327

dict: Tuning parameters

328

"""

329

330

def find_tuning_algo(tuning: dict, name: str) -> dict:

331

"""

332

Find algorithm parameters in tuning data.

333

334

Parameters:

335

- tuning: dict, tuning parameters

336

- name: str, algorithm name

337

338

Returns:

339

dict: Algorithm parameters

340

"""

341

```

342

343

### Memory Management

344

345

Advanced memory allocation and management.

346

347

```python { .api }

348

class MappedArray:

349

"""Context manager for memory-mapped array access."""

350

351

def __init__(

352

self,

353

request: CompletedRequest,

354

stream: str,

355

reshape: bool = True,

356

write: bool = False

357

):

358

"""

359

Initialize mapped array.

360

361

Parameters:

362

- request: CompletedRequest, source request

363

- stream: str, stream name

364

- reshape: bool, reshape to image dimensions

365

- write: bool, allow write access

366

"""

367

368

def __enter__(self) -> 'MappedArray':

369

"""Enter context and map buffer."""

370

371

def __exit__(self, exc_type, exc_val, exc_tb):

372

"""Exit context and unmap buffer."""

373

374

@property

375

def array(self) -> np.ndarray:

376

"""Access to mapped numpy array."""

377

378

# Allocator system for advanced memory management

379

class LibcameraAllocator:

380

"""Default libcamera buffer allocator."""

381

382

class DmaAllocator:

383

"""DMA buffer allocator for zero-copy operations."""

384

385

class PersistentAllocator:

386

"""Persistent buffer allocator for reduced allocation overhead."""

387

```

388

389

## Usage Examples

390

391

### Dynamic Mode Switching

392

393

```python

394

from picamera2 import Picamera2

395

import time

396

397

picam2 = Picamera2()

398

399

# Start with preview configuration

400

preview_config = picam2.create_preview_configuration()

401

picam2.configure(preview_config)

402

picam2.start()

403

404

# Preview mode active

405

time.sleep(2)

406

407

# Switch to high-resolution still mode

408

still_config = picam2.create_still_configuration(

409

main={"size": (4056, 3040), "format": "RGB888"}

410

)

411

412

picam2.switch_mode(still_config)

413

picam2.capture_file("high_res.jpg")

414

415

# Switch back to preview

416

picam2.switch_mode(preview_config)

417

418

picam2.close()

419

```

420

421

### Atomic Mode Switch and Capture

422

423

```python

424

from picamera2 import Picamera2

425

426

picam2 = Picamera2()

427

preview_config = picam2.create_preview_configuration()

428

picam2.configure(preview_config)

429

picam2.start()

430

431

# Create still configuration

432

still_config = picam2.create_still_configuration(

433

main={"size": (2592, 1944), "format": "RGB888"}

434

)

435

436

# Atomic switch and capture - no intermediate state

437

picam2.switch_mode_and_capture_file(

438

still_config,

439

"atomic_capture.jpg"

440

)

441

442

# Camera automatically returns to previous mode

443

picam2.close()

444

```

445

446

### Frame Dropping for Stability

447

448

```python

449

from picamera2 import Picamera2

450

451

picam2 = Picamera2()

452

picam2.configure(picam2.create_preview_configuration())

453

picam2.start()

454

455

# Change camera settings

456

picam2.set_controls({

457

"ExposureTime": 50000, # Long exposure

458

"AnalogueGain": 4.0

459

})

460

461

# Drop frames to allow settings to settle

462

picam2.drop_frames(5)

463

464

# Now capture with settled settings

465

picam2.capture_file("settled_image.jpg")

466

467

picam2.close()

468

```

469

470

### Autofocus Control

471

472

```python

473

from picamera2 import Picamera2

474

import time

475

476

picam2 = Picamera2()

477

picam2.configure(picam2.create_still_configuration())

478

picam2.start()

479

480

# Enable autofocus

481

picam2.set_controls({"AfMode": 1}) # Auto focus mode

482

483

# Trigger focus cycle

484

picam2.autofocus_cycle()

485

486

# Capture with optimal focus

487

picam2.capture_file("focused_image.jpg")

488

489

# Manual focus sweep

490

picam2.set_controls({"AfMode": 0}) # Manual mode

491

focus_positions = [0.5, 1.0, 2.0, 5.0, 10.0]

492

493

for pos in focus_positions:

494

picam2.set_controls({"LensPosition": pos})

495

picam2.drop_frames(3) # Allow focus to settle

496

picam2.capture_file(f"focus_{pos:.1f}.jpg")

497

498

picam2.close()

499

```

500

501

### Asynchronous Operations

502

503

```python

504

from picamera2 import Picamera2

505

import time

506

507

def capture_complete(job):

508

print(f"Capture {job.result()} completed")

509

510

picam2 = Picamera2()

511

picam2.configure(picam2.create_preview_configuration())

512

picam2.start()

513

514

# Start multiple async captures

515

jobs = []

516

for i in range(5):

517

job = picam2.capture_file(

518

f"async_{i}.jpg",

519

wait=False,

520

signal_function=capture_complete

521

)

522

jobs.append(job)

523

time.sleep(0.2)

524

525

# Wait for all to complete

526

for job in jobs:

527

job.wait()

528

529

print("All captures completed")

530

picam2.close()

531

```

532

533

### IMX500 AI Integration

534

535

```python

536

from picamera2 import Picamera2

537

from picamera2.devices.imx500 import IMX500, postprocess_yolov5_detection

538

import numpy as np

539

540

picam2 = Picamera2()

541

542

# Configure for AI processing

543

config = picam2.create_preview_configuration(

544

main={"size": (640, 640), "format": "RGB888"}

545

)

546

picam2.configure(config)

547

548

# Initialize IMX500

549

imx500 = IMX500(picam2)

550

imx500.set_model("yolov5_model.rpk")

551

552

picam2.start()

553

554

while True:

555

# Capture frame

556

request = picam2.capture_request()

557

558

# Get AI inference results

559

outputs = imx500.get_outputs()

560

intrinsics = imx500.get_network_intrinsics()

561

562

# Postprocess detections

563

detections = postprocess_yolov5_detection(

564

outputs, intrinsics, threshold=0.5

565

)

566

567

# Process detections

568

for detection in detections:

569

bbox = detection['bbox']

570

score = detection['score']

571

class_id = detection['class_id']

572

print(f"Detected class {class_id} at {bbox} with score {score}")

573

574

request.release()

575

576

if len(detections) > 0:

577

break

578

579

picam2.close()

580

```

581

582

### Platform-Specific Optimization

583

584

```python

585

from picamera2 import Picamera2

586

from picamera2.platform import Platform, get_platform

587

588

picam2 = Picamera2()

589

590

# Configure based on platform capabilities

591

platform = get_platform()

592

593

if platform == Platform.PISP:

594

# Raspberry Pi 5 - can use higher resolutions and frame rates

595

config = picam2.create_video_configuration(

596

main={"size": (1920, 1080), "format": "YUV420"}

597

)

598

# Use higher buffer count for better performance

599

config.buffer_count = 4

600

else:

601

# Raspberry Pi 4 and earlier

602

config = picam2.create_video_configuration(

603

main={"size": (1280, 720), "format": "YUV420"}

604

)

605

config.buffer_count = 2

606

607

picam2.configure(config)

608

picam2.start()

609

610

print(f"Running on {platform.value} platform")

611

print(f"Configuration: {config.main.size} @ {config.buffer_count} buffers")

612

613

picam2.close()

614

```

615

616

### Memory-Mapped Access

617

618

```python

619

from picamera2 import Picamera2, MappedArray

620

import numpy as np

621

622

picam2 = Picamera2()

623

config = picam2.create_preview_configuration(

624

main={"format": "YUV420", "size": (640, 480)}

625

)

626

picam2.configure(config)

627

picam2.start()

628

629

# Capture request

630

request = picam2.capture_request()

631

632

# Zero-copy access to buffer data

633

with MappedArray(request, "main") as mapped:

634

# Direct access to camera buffer

635

yuv_data = mapped.array

636

637

# Process Y channel (luminance) in-place

638

y_channel = yuv_data[:480, :] # Y plane

639

y_channel[y_channel < 50] = 0 # Threshold dark pixels

640

641

# Changes are made directly to camera buffer

642

643

# Save processed result

644

request.save("main", "processed.jpg")

645

request.release()

646

647

picam2.close()

648

```

649

650

### Custom Function Dispatch

651

652

```python

653

from picamera2 import Picamera2

654

655

def custom_processing():

656

print("Custom processing started")

657

time.sleep(1)

658

print("Custom processing completed")

659

return "processing_result"

660

661

def another_function():

662

print("Another function executed")

663

return "another_result"

664

665

picam2 = Picamera2()

666

picam2.configure(picam2.create_preview_configuration())

667

picam2.start()

668

669

# Dispatch multiple functions

670

functions = [custom_processing, another_function]

671

picam2.dispatch_functions(functions, wait=True)

672

673

print("All functions completed")

674

picam2.close()

675

```