or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

advanced-mode.mdcontext-device.mdframes.mdindex.mdlogging.mdoptions.mdpipeline.mdpointclouds.mdprocessing.mdrecording.mdsensors.md

processing.mddocs/

0

# Processing & Filters

1

2

Real-time frame processing with built-in filters for noise reduction, alignment, colorization, and format conversion. Supports custom processing blocks and filter chaining for advanced computer vision pipelines.

3

4

## Capabilities

5

6

### Processing Framework

7

8

Base classes for frame processing and filtering.

9

10

```python { .api }

11

class processing_block:

12

def __init__(processing_function):

13

"""

14

Create custom processing block.

15

16

Args:

17

processing_function: Function that processes frames

18

"""

19

20

def start(callback_function):

21

"""

22

Start processing with callback.

23

24

Args:

25

callback_function: Function called with processed frames

26

"""

27

28

def invoke(frame):

29

"""

30

Process single frame synchronously.

31

32

Args:

33

frame (frame): Input frame to process

34

"""

35

36

def supports(camera_info) -> bool:

37

"""

38

Check if processing block supports camera info.

39

40

Args:

41

camera_info (camera_info): Info field to check

42

43

Returns:

44

bool: True if supported

45

"""

46

47

def get_info(camera_info) -> str:

48

"""

49

Get processing block information.

50

51

Args:

52

camera_info (camera_info): Info field to retrieve

53

54

Returns:

55

str: Information value

56

"""

57

58

class filter_interface:

59

def process(frame) -> frame:

60

"""

61

Process frame and return result.

62

63

Args:

64

frame (frame): Input frame

65

66

Returns:

67

frame: Processed frame

68

"""

69

70

class filter(processing_block, filter_interface):

71

def __init__(filter_function, queue_size=1):

72

"""

73

Create filter with custom function.

74

75

Args:

76

filter_function: Function that processes frames

77

queue_size (int): Internal queue size

78

"""

79

80

# Type checking methods for specific filters

81

def is_colorizer() -> bool:

82

"""Check if filter is a colorizer."""

83

84

def is_decimation_filter() -> bool:

85

"""Check if filter is a decimation filter."""

86

87

def is_temporal_filter() -> bool:

88

"""Check if filter is a temporal filter."""

89

90

def is_spatial_filter() -> bool:

91

"""Check if filter is a spatial filter."""

92

93

def is_hole_filling_filter() -> bool:

94

"""Check if filter is a hole filling filter."""

95

96

def is_disparity_transform() -> bool:

97

"""Check if filter is a disparity transform."""

98

99

def is_threshold_filter() -> bool:

100

"""Check if filter is a threshold filter."""

101

102

def is_align() -> bool:

103

"""Check if filter is an align filter."""

104

105

def is_pointcloud() -> bool:

106

"""Check if filter is a pointcloud filter."""

107

108

# Type casting methods

109

def as_colorizer() -> colorizer:

110

"""Cast to colorizer filter."""

111

112

def as_decimation_filter() -> decimation_filter:

113

"""Cast to decimation filter."""

114

115

def as_temporal_filter() -> temporal_filter:

116

"""Cast to temporal filter."""

117

118

def as_spatial_filter() -> spatial_filter:

119

"""Cast to spatial filter."""

120

121

def as_hole_filling_filter() -> hole_filling_filter:

122

"""Cast to hole filling filter."""

123

124

def as_disparity_transform() -> disparity_transform:

125

"""Cast to disparity transform."""

126

127

def as_threshold_filter() -> threshold_filter:

128

"""Cast to threshold filter."""

129

130

def as_align() -> align:

131

"""Cast to align filter."""

132

133

def as_pointcloud() -> pointcloud:

134

"""Cast to pointcloud filter."""

135

136

class frame_source:

137

def allocate_video_frame(profile, original, new_bpp=0, new_width=0,

138

new_height=0, new_stride=0, frame_type=rs.frame_type.video_frame) -> video_frame:

139

"""

140

Allocate new video frame.

141

142

Args:

143

profile (stream_profile): Stream profile for new frame

144

original (frame): Original frame for reference

145

new_bpp (int): New bits per pixel (0 for same as original)

146

new_width (int): New width (0 for same as original)

147

new_height (int): New height (0 for same as original)

148

new_stride (int): New stride (0 for calculated)

149

frame_type (frame_type): Type of frame to allocate

150

151

Returns:

152

video_frame: Allocated frame

153

"""

154

155

def allocate_motion_frame(profile, original, frame_type=rs.frame_type.motion_frame) -> motion_frame:

156

"""

157

Allocate new motion frame.

158

159

Args:

160

profile (stream_profile): Stream profile for new frame

161

original (frame): Original frame for reference

162

frame_type (frame_type): Type of frame to allocate

163

164

Returns:

165

motion_frame: Allocated frame

166

"""

167

168

def allocate_points(profile, original) -> points:

169

"""

170

Allocate new point cloud frame.

171

172

Args:

173

profile (stream_profile): Stream profile

174

original (frame): Original frame for reference

175

176

Returns:

177

points: Allocated point cloud

178

"""

179

180

def allocate_composite_frame(frames) -> frameset:

181

"""

182

Allocate composite frame from multiple frames.

183

184

Args:

185

frames (list[frame]): Frames to combine

186

187

Returns:

188

frameset: Composite frame

189

"""

190

191

def frame_ready(result_frame):

192

"""

193

Signal that frame is ready for output.

194

195

Args:

196

result_frame (frame): Processed frame to output

197

"""

198

```

199

200

### Synchronization

201

202

Frame synchronization and buffering for multi-stream processing.

203

204

```python { .api }

205

class frame_queue:

206

def __init__(capacity=1, keep_frames=False):

207

"""

208

Create frame queue for buffering.

209

210

Args:

211

capacity (int): Maximum number of frames to buffer

212

keep_frames (bool): Whether to keep frame references

213

"""

214

215

def enqueue(frame):

216

"""

217

Add frame to queue.

218

219

Args:

220

frame (frame): Frame to add

221

"""

222

223

def wait_for_frame(timeout_ms=5000) -> frame:

224

"""

225

Wait for next frame from queue.

226

227

Args:

228

timeout_ms (int): Maximum wait time

229

230

Returns:

231

frame: Next available frame

232

233

Raises:

234

rs.error: If timeout expires

235

"""

236

237

def poll_for_frame() -> frame:

238

"""

239

Get next frame without blocking.

240

241

Returns:

242

frame: Next frame or None if queue empty

243

"""

244

245

def try_wait_for_frame(timeout_ms=5000) -> tuple[bool, frame]:

246

"""

247

Try to get next frame with timeout.

248

249

Args:

250

timeout_ms (int): Maximum wait time

251

252

Returns:

253

tuple: (success, frame)

254

"""

255

256

def capacity() -> int:

257

"""

258

Get queue capacity.

259

260

Returns:

261

int: Maximum queue size

262

"""

263

264

def size() -> int:

265

"""

266

Get current queue size.

267

268

Returns:

269

int: Number of frames in queue

270

"""

271

272

def keep_frames() -> bool:

273

"""

274

Check if queue keeps frame references.

275

276

Returns:

277

bool: True if keeping references

278

"""

279

280

class syncer:

281

def __init__(queue_size=1):

282

"""

283

Create frame synchronizer.

284

285

Args:

286

queue_size (int): Internal queue size

287

"""

288

289

def wait_for_frames(timeout_ms=5000) -> frameset:

290

"""

291

Wait for synchronized frameset.

292

293

Args:

294

timeout_ms (int): Maximum wait time

295

296

Returns:

297

frameset: Synchronized frames

298

"""

299

300

def wait_for_frame(timeout_ms=5000) -> frameset:

301

"""

302

Alias for wait_for_frames.

303

304

Args:

305

timeout_ms (int): Maximum wait time

306

307

Returns:

308

frameset: Synchronized frames

309

"""

310

311

def poll_for_frames() -> frameset:

312

"""

313

Get synchronized frames without blocking.

314

315

Returns:

316

frameset: Available synchronized frames or None

317

"""

318

319

def poll_for_frame() -> frameset:

320

"""

321

Alias for poll_for_frames.

322

323

Returns:

324

frameset: Available synchronized frames or None

325

"""

326

327

def try_wait_for_frames(timeout_ms=5000) -> tuple[bool, frameset]:

328

"""

329

Try to get synchronized frames with timeout.

330

331

Args:

332

timeout_ms (int): Maximum wait time

333

334

Returns:

335

tuple: (success, frameset)

336

"""

337

338

def try_wait_for_frame(timeout_ms=5000) -> tuple[bool, frameset]:

339

"""

340

Alias for try_wait_for_frames.

341

342

Args:

343

timeout_ms (int): Maximum wait time

344

345

Returns:

346

tuple: (success, frameset)

347

"""

348

```

349

350

### Built-in Filters

351

352

#### Point Cloud Generation

353

354

```python { .api }

355

class pointcloud(filter):

356

def __init__(stream=rs.stream.any, index=0):

357

"""

358

Create point cloud generator.

359

360

Args:

361

stream (stream): Stream to use for texture mapping

362

index (int): Stream index

363

"""

364

365

def calculate(depth_frame) -> points:

366

"""

367

Generate point cloud from depth frame.

368

369

Args:

370

depth_frame (depth_frame): Input depth data

371

372

Returns:

373

points: Generated point cloud

374

"""

375

376

def map_to(texture_frame):

377

"""

378

Set texture source for point cloud.

379

380

Args:

381

texture_frame (video_frame): Frame to use for texture coordinates

382

"""

383

```

384

385

#### Frame Alignment

386

387

```python { .api }

388

class align(filter):

389

def __init__(align_to_stream):

390

"""

391

Create frame alignment filter.

392

393

Args:

394

align_to_stream (stream): Target stream to align to

395

"""

396

397

def process(frameset) -> frameset:

398

"""

399

Align frames to target stream.

400

401

Args:

402

frameset (frameset): Input frames to align

403

404

Returns:

405

frameset: Aligned frames

406

"""

407

```

408

409

#### Depth Colorization

410

411

```python { .api }

412

class colorizer(filter):

413

def __init__(color_scheme=0):

414

"""

415

Create depth colorizer.

416

417

Args:

418

color_scheme (int): Color scheme (0-8)

419

0: Jet (blue-red)

420

1: Classic (grayscale)

421

2: WhiteToBlack

422

3: BlackToWhite

423

4: Bio (green-based)

424

5: Cold (blue-based)

425

6: Warm (red-based)

426

7: Quantized

427

8: Pattern

428

"""

429

430

def colorize(depth_frame) -> video_frame:

431

"""

432

Colorize depth frame.

433

434

Args:

435

depth_frame (depth_frame): Input depth data

436

437

Returns:

438

video_frame: Colorized depth image

439

"""

440

```

441

442

#### Noise Reduction Filters

443

444

```python { .api }

445

class decimation_filter(filter):

446

def __init__(magnitude=2.0):

447

"""

448

Create decimation filter to reduce resolution.

449

450

Args:

451

magnitude (float): Decimation factor (1.0-8.0)

452

"""

453

454

class temporal_filter(filter):

455

def __init__(smooth_alpha=0.4, smooth_delta=20.0, persistence_control=3):

456

"""

457

Create temporal noise reduction filter.

458

459

Args:

460

smooth_alpha (float): Alpha factor for smoothing (0.0-1.0)

461

smooth_delta (float): Delta threshold for edge-preserving

462

persistence_control (int): Persistence control (0-8)

463

"""

464

465

class spatial_filter(filter):

466

def __init__(smooth_alpha=0.5, smooth_delta=20.0, magnitude=2.0, hole_fill=0.0):

467

"""

468

Create spatial noise reduction filter.

469

470

Args:

471

smooth_alpha (float): Alpha factor for smoothing (0.0-1.0)

472

smooth_delta (float): Delta threshold for edge-preserving

473

magnitude (float): Effect magnitude (1.0-5.0)

474

hole_fill (float): Hole filling factor (0.0-5.0)

475

"""

476

477

class hole_filling_filter(filter):

478

def __init__(mode=0):

479

"""

480

Create hole filling filter.

481

482

Args:

483

mode (int): Filling mode

484

0: fill_from_left

485

1: farest_from_around

486

2: nearest_from_around

487

"""

488

```

489

490

#### Format Conversion Filters

491

492

```python { .api }

493

class disparity_transform(filter):

494

def __init__(transform_to_disparity=True):

495

"""

496

Create depth/disparity transform filter.

497

498

Args:

499

transform_to_disparity (bool): True for depth->disparity, False for disparity->depth

500

"""

501

502

class threshold_filter(filter):

503

def __init__(min_dist=0.15, max_dist=4.0):

504

"""

505

Create depth range threshold filter.

506

507

Args:

508

min_dist (float): Minimum distance in meters

509

max_dist (float): Maximum distance in meters

510

"""

511

512

class units_transform(filter):

513

def __init__():

514

"""Create depth units transformation filter."""

515

516

class yuy_decoder(filter):

517

def __init__():

518

"""Create YUY format decoder filter."""

519

520

class rotation_filter(filter):

521

def __init__(streams=[]):

522

"""

523

Create frame rotation filter.

524

525

Args:

526

streams (list): Streams to apply rotation to

527

"""

528

529

class hdr_merge(filter):

530

def __init__():

531

"""Create HDR frame merging filter."""

532

533

class sequence_id_filter(filter):

534

def __init__(sequence_id=0.0):

535

"""

536

Create sequence ID filter.

537

538

Args:

539

sequence_id (float): Sequence identifier

540

"""

541

```

542

543

## Usage Examples

544

545

### Basic Filter Chain

546

547

```python

548

import pyrealsense2 as rs

549

import numpy as np

550

551

# Create pipeline

552

pipeline = rs.pipeline()

553

config = rs.config()

554

config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

555

556

pipeline.start(config)

557

558

# Create filter chain

559

decimation = rs.decimation_filter(2.0)

560

spatial = rs.spatial_filter()

561

temporal = rs.temporal_filter()

562

hole_filling = rs.hole_filling_filter()

563

colorizer = rs.colorizer()

564

565

try:

566

for i in range(100):

567

frames = pipeline.wait_for_frames()

568

depth_frame = frames.get_depth_frame()

569

570

if not depth_frame:

571

continue

572

573

# Apply filter chain

574

filtered = decimation.process(depth_frame)

575

filtered = spatial.process(filtered)

576

filtered = temporal.process(filtered)

577

filtered = hole_filling.process(filtered)

578

579

# Convert to depth frame for distance queries

580

depth_filtered = filtered.as_depth_frame()

581

582

# Get center distance

583

width = depth_filtered.get_width()

584

height = depth_filtered.get_height()

585

center_dist = depth_filtered.get_distance(width // 2, height // 2)

586

587

# Colorize for visualization

588

colorized = colorizer.process(filtered)

589

color_frame = colorized.as_video_frame()

590

591

print(f"Frame {i}: center distance = {center_dist:.3f}m, "

592

f"filtered size = {width}x{height}")

593

594

finally:

595

pipeline.stop()

596

```

597

598

### Point Cloud Generation

599

600

```python

601

import pyrealsense2 as rs

602

import numpy as np

603

604

# Configure streams

605

pipeline = rs.pipeline()

606

config = rs.config()

607

config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

608

config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

609

610

pipeline.start(config)

611

612

# Create point cloud generator

613

pc = rs.pointcloud()

614

points = rs.points()

615

616

try:

617

# Wait for frames with both color and depth

618

for i in range(10):

619

frames = pipeline.wait_for_frames()

620

621

depth_frame = frames.get_depth_frame()

622

color_frame = frames.get_color_frame()

623

624

if not depth_frame or not color_frame:

625

continue

626

627

# Map color texture to point cloud

628

pc.map_to(color_frame)

629

630

# Generate point cloud

631

points = pc.calculate(depth_frame)

632

633

print(f"Frame {i}: Generated {points.size()} points")

634

635

# Get vertices as numpy array

636

vertices = np.asanyarray(points.get_vertices())

637

print(f" Vertex data shape: {vertices.shape}")

638

639

# Get texture coordinates

640

tex_coords = np.asanyarray(points.get_texture_coordinates())

641

print(f" Texture coordinate shape: {tex_coords.shape}")

642

643

# Export first point cloud to PLY file

644

if i == 0:

645

points.export_to_ply("pointcloud.ply", color_frame)

646

print(" Exported to pointcloud.ply")

647

648

finally:

649

pipeline.stop()

650

```

651

652

### Frame Alignment

653

654

```python

655

import pyrealsense2 as rs

656

import numpy as np

657

658

# Configure streams

659

pipeline = rs.pipeline()

660

config = rs.config()

661

config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

662

config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

663

664

profile = pipeline.start(config)

665

666

# Create alignment filter to align depth to color

667

align_to_color = rs.align(rs.stream.color)

668

669

try:

670

for i in range(10):

671

frames = pipeline.wait_for_frames()

672

673

# Align frames

674

aligned_frames = align_to_color.process(frames)

675

676

# Get aligned frames

677

aligned_depth = aligned_frames.get_depth_frame()

678

color_frame = aligned_frames.get_color_frame()

679

680

if not aligned_depth or not color_frame:

681

continue

682

683

# Convert to numpy arrays

684

depth_image = np.asanyarray(aligned_depth.get_data())

685

color_image = np.asanyarray(color_frame.get_data())

686

687

print(f"Frame {i}:")

688

print(f" Aligned depth: {depth_image.shape}")

689

print(f" Color: {color_image.shape}")

690

691

# Now depth and color are pixel-aligned

692

# Can directly correlate depth and color at same coordinates

693

center_x, center_y = 320, 240

694

depth_at_center = aligned_depth.get_distance(center_x, center_y)

695

color_at_center = color_image[center_y, center_x]

696

697

print(f" Center pixel: depth={depth_at_center:.3f}m, "

698

f"color={color_at_center}")

699

700

finally:

701

pipeline.stop()

702

```

703

704

### Advanced Filter Configuration

705

706

```python

707

import pyrealsense2 as rs

708

709

pipeline = rs.pipeline()

710

config = rs.config()

711

config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

712

713

pipeline.start(config)

714

715

# Create and configure filters

716

decimation = rs.decimation_filter()

717

decimation.set_option(rs.option.filter_magnitude, 3.0)

718

719

temporal = rs.temporal_filter()

720

temporal.set_option(rs.option.filter_smooth_alpha, 0.3)

721

temporal.set_option(rs.option.filter_smooth_delta, 30.0)

722

temporal.set_option(rs.option.holes_fill, 2)

723

724

spatial = rs.spatial_filter()

725

spatial.set_option(rs.option.filter_smooth_alpha, 0.6)

726

spatial.set_option(rs.option.filter_smooth_delta, 25.0)

727

spatial.set_option(rs.option.filter_magnitude, 3.0)

728

spatial.set_option(rs.option.holes_fill, 2.0)

729

730

hole_filling = rs.hole_filling_filter()

731

hole_filling.set_option(rs.option.holes_fill, 2) # nearest_from_around

732

733

threshold = rs.threshold_filter()

734

threshold.set_option(rs.option.min_distance, 0.2)

735

threshold.set_option(rs.option.max_distance, 3.0)

736

737

colorizer = rs.colorizer()

738

colorizer.set_option(rs.option.color_scheme, 2) # WhiteToBlack

739

740

print("Filter configurations:")

741

print(f" Decimation magnitude: {decimation.get_option(rs.option.filter_magnitude)}")

742

print(f" Temporal alpha: {temporal.get_option(rs.option.filter_smooth_alpha)}")

743

print(f" Spatial magnitude: {spatial.get_option(rs.option.filter_magnitude)}")

744

print(f" Threshold range: {threshold.get_option(rs.option.min_distance):.1f} - "

745

f"{threshold.get_option(rs.option.max_distance):.1f}m")

746

747

try:

748

for i in range(10):

749

frames = pipeline.wait_for_frames()

750

depth_frame = frames.get_depth_frame()

751

752

if not depth_frame:

753

continue

754

755

# Apply comprehensive filter chain

756

filtered = decimation.process(depth_frame)

757

filtered = threshold.process(filtered)

758

filtered = spatial.process(filtered)

759

filtered = temporal.process(filtered)

760

filtered = hole_filling.process(filtered)

761

762

# Colorize result

763

colorized = colorizer.process(filtered)

764

765

# Get frame info

766

depth_filtered = filtered.as_depth_frame()

767

color_frame = colorized.as_video_frame()

768

769

print(f"Frame {i}: {depth_filtered.get_width()}x{depth_filtered.get_height()} -> "

770

f"{color_frame.get_width()}x{color_frame.get_height()}")

771

772

finally:

773

pipeline.stop()

774

```

775

776

### Custom Processing Block

777

778

```python

779

import pyrealsense2 as rs

780

import numpy as np

781

782

def custom_processing_function(frame):

783

"""Custom processing function that inverts depth values."""

784

if frame.is_depth_frame():

785

depth_frame = frame.as_depth_frame()

786

787

# Get frame data as numpy array

788

depth_data = np.asanyarray(depth_frame.get_data())

789

790

# Invert depth values (example processing)

791

max_depth = depth_data.max()

792

if max_depth > 0:

793

inverted_data = max_depth - depth_data

794

795

# Create new frame with processed data

796

# (This is simplified - real implementation would need proper frame allocation)

797

print(f"Processed depth frame: inverted {depth_data.shape} depth data")

798

799

return frame

800

801

# Create custom processing block

802

custom_processor = rs.processing_block(custom_processing_function)

803

804

pipeline = rs.pipeline()

805

config = rs.config()

806

config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

807

808

pipeline.start(config)

809

810

# Use custom processor with callback

811

def processed_frame_callback(processed_frame):

812

if processed_frame.is_depth_frame():

813

depth = processed_frame.as_depth_frame()

814

print(f"Received processed frame #{depth.get_frame_number()}")

815

816

custom_processor.start(processed_frame_callback)

817

818

try:

819

for i in range(10):

820

frames = pipeline.wait_for_frames()

821

depth_frame = frames.get_depth_frame()

822

823

if depth_frame:

824

# Send frame to custom processor

825

custom_processor.invoke(depth_frame)

826

827

finally:

828

pipeline.stop()

829

```

830

831

### Frame Queue Processing

832

833

```python

834

import pyrealsense2 as rs

835

import threading

836

import time

837

838

# Create frame queue for buffering

839

frame_queue = rs.frame_queue(capacity=10, keep_frames=True)

840

841

def processing_thread():

842

"""Background thread for frame processing."""

843

processed_count = 0

844

845

while True:

846

try:

847

# Wait for frame with timeout

848

frame = frame_queue.wait_for_frame(timeout_ms=1000)

849

850

if frame.is_depth_frame():

851

depth_frame = frame.as_depth_frame()

852

853

# Simulate processing time

854

time.sleep(0.01)

855

856

processed_count += 1

857

print(f"Processed frame #{depth_frame.get_frame_number()} "

858

f"(total: {processed_count})")

859

860

except rs.error:

861

# Timeout or other error

862

break

863

864

# Start processing thread

865

processor = threading.Thread(target=processing_thread)

866

processor.daemon = True

867

processor.start()

868

869

pipeline = rs.pipeline()

870

config = rs.config()

871

config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

872

873

pipeline.start(config)

874

875

try:

876

# Stream frames into queue

877

for i in range(100):

878

frames = pipeline.wait_for_frames()

879

depth_frame = frames.get_depth_frame()

880

881

if depth_frame:

882

frame_queue.enqueue(depth_frame)

883

884

# Print queue status

885

if i % 10 == 0:

886

print(f"Queue size: {frame_queue.size()}/{frame_queue.capacity()}")

887

888

time.sleep(0.03) # 30fps

889

890

finally:

891

pipeline.stop()

892

processor.join(timeout=1.0)

893

```

894

895

## Filter Chain Recommendations

896

897

### Standard Post-Processing Chain

898

899

```python

900

# Recommended filter order for general use

901

filters = [

902

rs.decimation_filter(2.0), # Reduce resolution for performance

903

rs.threshold_filter(0.15, 4.0), # Remove very close/far points

904

rs.spatial_filter(), # Spatial noise reduction

905

rs.temporal_filter(), # Temporal noise reduction

906

rs.hole_filling_filter() # Fill holes in depth data

907

]

908

909

# Apply filters in sequence

910

def apply_filters(frame, filters):

911

for filter in filters:

912

frame = filter.process(frame)

913

return frame

914

```

915

916

### Performance-Oriented Chain

917

918

```python

919

# Optimized for speed

920

filters = [

921

rs.decimation_filter(4.0), # Aggressive decimation

922

rs.threshold_filter(0.2, 2.0), # Narrow range

923

rs.hole_filling_filter(0) # Simple hole filling

924

]

925

```

926

927

### Quality-Oriented Chain

928

929

```python

930

# Optimized for quality

931

filters = [

932

rs.decimation_filter(1.0), # No decimation

933

rs.spatial_filter(0.8, 15.0, 5.0, 5.0), # Strong spatial filtering

934

rs.temporal_filter(0.2, 50.0, 5), # Strong temporal filtering

935

rs.hole_filling_filter(2) # Best hole filling

936

]

937

```