0
# 3D Data & Point Clouds
1
2
3D processing capabilities including point cloud generation, coordinate transformations, and spatial analysis. Supports various output formats, texture mapping, and seamless integration with computer vision workflows.
3
4
## Capabilities
5
6
### Point Cloud Generation
7
8
Core point cloud functionality for converting depth data to 3D coordinates.
9
10
```python { .api }
11
class pointcloud(filter):
12
def __init__(stream=rs.stream.any, index=0):
13
"""
14
Create point cloud generator.
15
16
Args:
17
stream (stream): Stream to use for texture mapping (default: any available)
18
index (int): Stream index for multi-instance streams
19
"""
20
21
def calculate(depth_frame) -> points:
22
"""
23
Generate 3D point cloud from depth frame.
24
25
Args:
26
depth_frame (depth_frame): Input depth data
27
28
Returns:
29
points: Generated point cloud frame
30
"""
31
32
def map_to(texture_frame):
33
"""
34
Set texture source for point cloud coloring.
35
36
Args:
37
texture_frame (video_frame): Frame to use for texture coordinates and colors
38
"""
39
40
class points(frame):
41
def get_vertices(dims=1) -> BufData:
42
"""
43
Get 3D vertex coordinates.
44
45
Args:
46
dims (int): Buffer dimensionality
47
1: Flat array of vertices
48
2: Structured array with point grouping
49
50
Returns:
51
BufData: Vertex coordinates as (x, y, z) buffer
52
"""
53
54
def get_texture_coordinates(dims=1) -> BufData:
55
"""
56
Get texture coordinates for point cloud.
57
58
Args:
59
dims (int): Buffer dimensionality
60
1: Flat array of texture coordinates
61
2: Structured array with coordinate grouping
62
63
Returns:
64
BufData: Texture coordinates as (u, v) buffer
65
"""
66
67
def export_to_ply(filename, texture_frame):
68
"""
69
Export point cloud to PLY format file.
70
71
Args:
72
filename (str): Output file path
73
texture_frame (video_frame): Frame providing color/texture data
74
"""
75
76
def size() -> int:
77
"""
78
Get number of points in cloud.
79
80
Returns:
81
int: Point count
82
"""
83
```
84
85
### PLY Export
86
87
Advanced PLY export with mesh generation and normal calculation.
88
89
```python { .api }
90
class save_to_ply(filter):
91
def __init__(filename="RealSense Pointcloud ", pc=rs.pointcloud()):
92
"""
93
Create PLY export filter.
94
95
Args:
96
filename (str): Base filename for export
97
pc (pointcloud): Point cloud generator to use
98
"""
99
100
# Static option constants
101
option_ignore_color: int # Ignore color information
102
option_ply_mesh: int # Generate mesh connectivity
103
option_ply_binary: int # Use binary PLY format
104
option_ply_normals: int # Calculate and include normals
105
option_ply_threshold: int # Distance threshold for mesh generation
106
```
107
108
### Coordinate System Utilities
109
110
Functions for coordinate transformations and camera geometry.
111
112
```python { .api }
113
def rs2_project_point_to_pixel(intrinsics, point_3d) -> list[float]:
114
"""
115
Project 3D point to 2D pixel coordinates.
116
117
Args:
118
intrinsics (intrinsics): Camera intrinsic parameters
119
point_3d (list[float]): 3D point [x, y, z] in meters
120
121
Returns:
122
list[float]: 2D pixel coordinates [u, v]
123
"""
124
125
def rs2_deproject_pixel_to_point(intrinsics, pixel_2d, depth) -> list[float]:
126
"""
127
Deproject 2D pixel to 3D point coordinates.
128
129
Args:
130
intrinsics (intrinsics): Camera intrinsic parameters
131
pixel_2d (list[float]): 2D pixel coordinates [u, v]
132
depth (float): Depth value in meters
133
134
Returns:
135
list[float]: 3D point coordinates [x, y, z] in meters
136
"""
137
138
def rs2_transform_point_to_point(extrinsics, from_point) -> list[float]:
139
"""
140
Transform 3D point between coordinate systems.
141
142
Args:
143
extrinsics (extrinsics): Transformation parameters
144
from_point (list[float]): Input 3D point [x, y, z]
145
146
Returns:
147
list[float]: Transformed 3D point [x, y, z]
148
"""
149
150
def rs2_fov(intrinsics) -> list[float]:
151
"""
152
Calculate field of view angles from intrinsics.
153
154
Args:
155
intrinsics (intrinsics): Camera intrinsic parameters
156
157
Returns:
158
list[float]: [horizontal_fov, vertical_fov] in radians
159
"""
160
161
def rs2_project_color_pixel_to_depth_pixel(data, depth_scale, depth_min, depth_max,
162
depth_intrin, color_intrin, color_to_depth,
163
depth_to_color, from_pixel) -> list[float]:
164
"""
165
Project color pixel to corresponding depth pixel.
166
167
Args:
168
data: Depth frame data
169
depth_scale (float): Depth units scale factor
170
depth_min (float): Minimum depth value
171
depth_max (float): Maximum depth value
172
depth_intrin (intrinsics): Depth camera intrinsics
173
color_intrin (intrinsics): Color camera intrinsics
174
color_to_depth (extrinsics): Color to depth transformation
175
depth_to_color (extrinsics): Depth to color transformation
176
from_pixel (list[float]): Source pixel coordinates [u, v]
177
178
Returns:
179
list[float]: Corresponding depth pixel coordinates [u, v]
180
"""
181
```
182
183
### 3D Data Types
184
185
Data structures for 3D geometry and transformations.
186
187
```python { .api }
188
class intrinsics:
189
width: int # Image width in pixels
190
height: int # Image height in pixels
191
ppx: float # Principal point x-coordinate
192
ppy: float # Principal point y-coordinate
193
fx: float # Focal length x-axis (pixels)
194
fy: float # Focal length y-axis (pixels)
195
model: distortion # Distortion model type
196
coeffs: list[float] # Distortion coefficients [k1, k2, p1, p2, k3]
197
198
class extrinsics:
199
rotation: list[list[float]] # 3x3 rotation matrix
200
translation: list[float] # Translation vector [x, y, z]
201
202
class motion_device_intrinsic:
203
data: list[list[float]] # 3x4 transformation matrix
204
noise_variances: list[float] # Noise variance per axis [x, y, z]
205
bias_variances: list[float] # Bias variance per axis [x, y, z]
206
207
class vertex:
208
x: float # X coordinate
209
y: float # Y coordinate
210
z: float # Z coordinate
211
212
class texture_coordinate:
213
u: float # U texture coordinate (0-1)
214
v: float # V texture coordinate (0-1)
215
216
class vector:
217
x: float # X component
218
y: float # Y component
219
z: float # Z component
220
221
class quaternion:
222
x: float # X component
223
y: float # Y component
224
z: float # Z component
225
w: float # W component (scalar part)
226
227
class pose_data:
228
translation: vector # Position [x, y, z] in meters
229
velocity: vector # Velocity [x, y, z] in m/s
230
acceleration: vector # Acceleration [x, y, z] in m/s²
231
rotation: quaternion # Orientation quaternion
232
angular_velocity: vector # Angular velocity [x, y, z] in rad/s
233
angular_acceleration: vector # Angular acceleration [x, y, z] in rad/s²
234
tracker_confidence: int # Tracking confidence (0-3)
235
mapper_confidence: int # Mapping confidence (0-3)
236
```
237
238
## Usage Examples
239
240
### Basic Point Cloud Generation
241
242
```python
243
import pyrealsense2 as rs
244
import numpy as np
245
246
# Configure streams
247
pipeline = rs.pipeline()
248
config = rs.config()
249
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
250
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
251
252
pipeline.start(config)
253
254
# Create point cloud generator
255
pc = rs.pointcloud()
256
257
try:
258
# Wait for frames
259
frames = pipeline.wait_for_frames()
260
depth_frame = frames.get_depth_frame()
261
color_frame = frames.get_color_frame()
262
263
if depth_frame and color_frame:
264
# Generate point cloud with color texture
265
pc.map_to(color_frame)
266
points = pc.calculate(depth_frame)
267
268
print(f"Generated {points.size()} points")
269
270
# Get vertices as NumPy array
271
vertices = np.asanyarray(points.get_vertices())
272
print(f"Vertices shape: {vertices.shape}")
273
print(f"Vertices dtype: {vertices.dtype}")
274
275
# Get texture coordinates
276
tex_coords = np.asanyarray(points.get_texture_coordinates())
277
print(f"Texture coordinates shape: {tex_coords.shape}")
278
279
# Analyze point cloud
280
valid_points = vertices[vertices['z'] > 0] # Filter out invalid points
281
print(f"Valid points: {len(valid_points)}")
282
283
if len(valid_points) > 0:
284
print(f"Depth range: {valid_points['z'].min():.3f} - {valid_points['z'].max():.3f}m")
285
print(f"X range: {valid_points['x'].min():.3f} - {valid_points['x'].max():.3f}m")
286
print(f"Y range: {valid_points['y'].min():.3f} - {valid_points['y'].max():.3f}m")
287
288
finally:
289
pipeline.stop()
290
```
291
292
### Point Cloud Export to PLY
293
294
```python
295
import pyrealsense2 as rs
296
297
# Configure pipeline
298
pipeline = rs.pipeline()
299
config = rs.config()
300
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
301
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
302
303
pipeline.start(config)
304
305
# Create point cloud generator
306
pc = rs.pointcloud()
307
308
# Create PLY export filter
309
ply = rs.save_to_ply("pointcloud.ply")
310
311
# Configure PLY export options
312
ply.set_option(rs.save_to_ply.option_ply_binary, False) # Text format
313
ply.set_option(rs.save_to_ply.option_ply_normals, True) # Include normals
314
ply.set_option(rs.save_to_ply.option_ply_mesh, True) # Generate mesh
315
316
try:
317
# Capture frame
318
frames = pipeline.wait_for_frames()
319
depth_frame = frames.get_depth_frame()
320
color_frame = frames.get_color_frame()
321
322
if depth_frame and color_frame:
323
# Generate colored point cloud
324
pc.map_to(color_frame)
325
points = pc.calculate(depth_frame)
326
327
print(f"Exporting {points.size()} points to PLY...")
328
329
# Export to PLY file
330
points.export_to_ply("simple_pointcloud.ply", color_frame)
331
332
# Alternative: Use save_to_ply filter for advanced options
333
colorizer = rs.colorizer()
334
colorized = colorizer.process(frames)
335
ply.process(colorized)
336
337
print("Export completed")
338
339
finally:
340
pipeline.stop()
341
```
342
343
### Coordinate Transformations
344
345
```python
346
import pyrealsense2 as rs
347
import numpy as np
348
349
# Get camera intrinsics
350
pipeline = rs.pipeline()
351
config = rs.config()
352
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
353
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
354
355
profile = pipeline.start(config)
356
357
# Get stream profiles and intrinsics
358
depth_profile = profile.get_stream(rs.stream.depth)
359
color_profile = profile.get_stream(rs.stream.color)
360
361
depth_intrinsics = depth_profile.as_video_stream_profile().get_intrinsics()
362
color_intrinsics = color_profile.as_video_stream_profile().get_intrinsics()
363
364
# Get extrinsics between streams
365
depth_to_color = depth_profile.get_extrinsics_to(color_profile)
366
367
print("Depth Camera Intrinsics:")
368
print(f" Resolution: {depth_intrinsics.width}x{depth_intrinsics.height}")
369
print(f" Principal Point: ({depth_intrinsics.ppx:.1f}, {depth_intrinsics.ppy:.1f})")
370
print(f" Focal Length: ({depth_intrinsics.fx:.1f}, {depth_intrinsics.fy:.1f})")
371
372
print("\nColor Camera Intrinsics:")
373
print(f" Resolution: {color_intrinsics.width}x{color_intrinsics.height}")
374
print(f" Principal Point: ({color_intrinsics.ppx:.1f}, {color_intrinsics.ppy:.1f})")
375
print(f" Focal Length: ({color_intrinsics.fx:.1f}, {color_intrinsics.fy:.1f})")
376
377
# Calculate field of view
378
depth_fov = rs.rs2_fov(depth_intrinsics)
379
color_fov = rs.rs2_fov(color_intrinsics)
380
381
print(f"\nField of View:")
382
print(f" Depth: {np.degrees(depth_fov[0]):.1f}° x {np.degrees(depth_fov[1]):.1f}°")
383
print(f" Color: {np.degrees(color_fov[0]):.1f}° x {np.degrees(color_fov[1]):.1f}°")
384
385
try:
386
# Get a frame for transformation examples
387
frames = pipeline.wait_for_frames()
388
depth_frame = frames.get_depth_frame()
389
390
if depth_frame:
391
# Example coordinate transformations
392
center_pixel = [320, 240] # Center of 640x480 image
393
depth_value = depth_frame.get_distance(center_pixel[0], center_pixel[1])
394
395
if depth_value > 0:
396
# Deproject pixel to 3D point
397
point_3d = rs.rs2_deproject_pixel_to_point(
398
depth_intrinsics, center_pixel, depth_value)
399
400
print(f"\nCoordinate Transformation Example:")
401
print(f" Pixel: ({center_pixel[0]}, {center_pixel[1]})")
402
print(f" Depth: {depth_value:.3f}m")
403
print(f" 3D Point: ({point_3d[0]:.3f}, {point_3d[1]:.3f}, {point_3d[2]:.3f})")
404
405
# Transform point to color camera coordinate system
406
point_in_color = rs.rs2_transform_point_to_point(depth_to_color, point_3d)
407
print(f" Point in color coordinates: ({point_in_color[0]:.3f}, {point_in_color[1]:.3f}, {point_in_color[2]:.3f})")
408
409
# Project back to color image
410
color_pixel = rs.rs2_project_point_to_pixel(color_intrinsics, point_in_color)
411
print(f" Corresponding color pixel: ({color_pixel[0]:.1f}, {color_pixel[1]:.1f})")
412
413
finally:
414
pipeline.stop()
415
```
416
417
### Point Cloud Filtering and Analysis
418
419
```python
420
import pyrealsense2 as rs
421
import numpy as np
422
423
pipeline = rs.pipeline()
424
config = rs.config()
425
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
426
427
pipeline.start(config)
428
429
# Create point cloud with filtering
430
pc = rs.pointcloud()
431
decimation = rs.decimation_filter(2.0) # Reduce point count
432
spatial = rs.spatial_filter() # Noise reduction
433
434
try:
435
frames = pipeline.wait_for_frames()
436
depth_frame = frames.get_depth_frame()
437
438
if depth_frame:
439
# Apply filters before point cloud generation
440
filtered = decimation.process(depth_frame)
441
filtered = spatial.process(filtered)
442
443
# Generate point cloud
444
points = pc.calculate(filtered.as_depth_frame())
445
446
# Get vertices as structured NumPy array
447
vertices = np.asanyarray(points.get_vertices())
448
449
print(f"Point cloud analysis:")
450
print(f" Total points: {points.size()}")
451
print(f" Vertices array shape: {vertices.shape}")
452
453
# Filter valid points (non-zero depth)
454
valid_mask = vertices['z'] > 0
455
valid_vertices = vertices[valid_mask]
456
457
print(f" Valid points: {len(valid_vertices)}")
458
459
if len(valid_vertices) > 0:
460
# Statistical analysis
461
x_coords = valid_vertices['x']
462
y_coords = valid_vertices['y']
463
z_coords = valid_vertices['z']
464
465
print(f" X range: {x_coords.min():.3f} to {x_coords.max():.3f}m")
466
print(f" Y range: {y_coords.min():.3f} to {y_coords.max():.3f}m")
467
print(f" Z range: {z_coords.min():.3f} to {z_coords.max():.3f}m")
468
469
# Calculate distances from origin
470
distances = np.sqrt(x_coords**2 + y_coords**2 + z_coords**2)
471
print(f" Distance range: {distances.min():.3f} to {distances.max():.3f}m")
472
print(f" Mean distance: {distances.mean():.3f}m")
473
474
# Find points within specific distance range
475
near_points = valid_vertices[(z_coords > 0.5) & (z_coords < 2.0)]
476
print(f" Points between 0.5-2.0m: {len(near_points)}")
477
478
# Spatial distribution analysis
479
x_center = x_coords.mean()
480
y_center = y_coords.mean()
481
z_center = z_coords.mean()
482
print(f" Centroid: ({x_center:.3f}, {y_center:.3f}, {z_center:.3f})")
483
484
finally:
485
pipeline.stop()
486
```
487
488
### Real-time Point Cloud Processing
489
490
```python
491
import pyrealsense2 as rs
492
import numpy as np
493
import time
494
495
class PointCloudProcessor:
496
def __init__(self):
497
self.pc = rs.pointcloud()
498
self.frame_count = 0
499
self.processing_times = []
500
501
def process_frame(self, depth_frame):
502
start_time = time.time()
503
504
# Generate point cloud
505
points = self.pc.calculate(depth_frame)
506
507
# Get vertices
508
vertices = np.asanyarray(points.get_vertices())
509
valid_points = vertices[vertices['z'] > 0]
510
511
# Simple analysis
512
if len(valid_points) > 1000: # Minimum points for analysis
513
distances = np.sqrt(valid_points['x']**2 +
514
valid_points['y']**2 +
515
valid_points['z']**2)
516
517
# Find closest point
518
closest_idx = np.argmin(distances)
519
closest_point = valid_points[closest_idx]
520
521
processing_time = time.time() - start_time
522
self.processing_times.append(processing_time)
523
524
self.frame_count += 1
525
526
if self.frame_count % 30 == 0: # Print every second at 30fps
527
avg_time = np.mean(self.processing_times[-30:])
528
print(f"Frame {self.frame_count}:")
529
print(f" Valid points: {len(valid_points)}")
530
print(f" Closest point: ({closest_point['x']:.3f}, "
531
f"{closest_point['y']:.3f}, {closest_point['z']:.3f})")
532
print(f" Processing time: {avg_time*1000:.1f}ms")
533
534
pipeline = rs.pipeline()
535
config = rs.config()
536
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
537
538
pipeline.start(config)
539
540
processor = PointCloudProcessor()
541
542
try:
543
print("Starting real-time point cloud processing...")
544
545
for i in range(300): # Process for 10 seconds at 30fps
546
frames = pipeline.wait_for_frames()
547
depth_frame = frames.get_depth_frame()
548
549
if depth_frame:
550
processor.process_frame(depth_frame)
551
552
finally:
553
pipeline.stop()
554
555
print(f"Processed {processor.frame_count} frames")
556
if processor.processing_times:
557
avg_time = np.mean(processor.processing_times)
558
print(f"Average processing time: {avg_time*1000:.1f}ms")
559
```
560
561
### Multi-Frame Point Cloud Accumulation
562
563
```python
564
import pyrealsense2 as rs
565
import numpy as np
566
567
class PointCloudAccumulator:
568
def __init__(self, max_frames=10):
569
self.pc = rs.pointcloud()
570
self.accumulated_points = []
571
self.max_frames = max_frames
572
573
def add_frame(self, depth_frame):
574
# Generate point cloud
575
points = self.pc.calculate(depth_frame)
576
vertices = np.asanyarray(points.get_vertices())
577
578
# Filter valid points
579
valid_points = vertices[vertices['z'] > 0]
580
581
if len(valid_points) > 0:
582
self.accumulated_points.append(valid_points)
583
584
# Keep only recent frames
585
if len(self.accumulated_points) > self.max_frames:
586
self.accumulated_points.pop(0)
587
588
def get_merged_pointcloud(self):
589
if not self.accumulated_points:
590
return np.array([])
591
592
# Merge all accumulated point clouds
593
merged = np.concatenate(self.accumulated_points)
594
595
# Optional: Remove duplicate points (simple spatial filtering)
596
# This is a basic approach - more sophisticated methods exist
597
unique_points = self._remove_nearby_duplicates(merged, threshold=0.01)
598
599
return unique_points
600
601
def _remove_nearby_duplicates(self, points, threshold=0.01):
602
if len(points) == 0:
603
return points
604
605
# Simple duplicate removal based on spatial proximity
606
# For production use, consider more efficient spatial data structures
607
unique_points = [points[0]]
608
609
for point in points[1:]:
610
is_duplicate = False
611
for unique_point in unique_points:
612
distance = np.sqrt((point['x'] - unique_point['x'])**2 +
613
(point['y'] - unique_point['y'])**2 +
614
(point['z'] - unique_point['z'])**2)
615
if distance < threshold:
616
is_duplicate = True
617
break
618
619
if not is_duplicate:
620
unique_points.append(point)
621
622
return np.array(unique_points)
623
624
pipeline = rs.pipeline()
625
config = rs.config()
626
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
627
628
pipeline.start(config)
629
630
accumulator = PointCloudAccumulator(max_frames=5)
631
632
try:
633
print("Accumulating point clouds...")
634
635
# Collect frames
636
for i in range(50):
637
frames = pipeline.wait_for_frames()
638
depth_frame = frames.get_depth_frame()
639
640
if depth_frame:
641
accumulator.add_frame(depth_frame)
642
643
if i % 10 == 0:
644
merged = accumulator.get_merged_pointcloud()
645
print(f"Frame {i}: accumulated {len(merged)} unique points")
646
647
# Get final merged point cloud
648
final_pointcloud = accumulator.get_merged_pointcloud()
649
print(f"Final merged point cloud: {len(final_pointcloud)} points")
650
651
if len(final_pointcloud) > 0:
652
# Analyze merged point cloud
653
x_coords = final_pointcloud['x']
654
y_coords = final_pointcloud['y']
655
z_coords = final_pointcloud['z']
656
657
print(f"Merged point cloud bounds:")
658
print(f" X: {x_coords.min():.3f} to {x_coords.max():.3f}m")
659
print(f" Y: {y_coords.min():.3f} to {y_coords.max():.3f}m")
660
print(f" Z: {z_coords.min():.3f} to {z_coords.max():.3f}m")
661
662
finally:
663
pipeline.stop()
664
```
665
666
## Integration with Computer Vision Libraries
667
668
### OpenCV Integration
669
670
```python
671
import pyrealsense2 as rs
672
import numpy as np
673
import cv2
674
675
pipeline = rs.pipeline()
676
config = rs.config()
677
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
678
679
pipeline.start(config)
680
681
pc = rs.pointcloud()
682
683
try:
684
frames = pipeline.wait_for_frames()
685
depth_frame = frames.get_depth_frame()
686
687
if depth_frame:
688
# Generate point cloud
689
points = pc.calculate(depth_frame)
690
vertices = np.asanyarray(points.get_vertices())
691
692
# Convert to OpenCV format for visualization
693
valid_points = vertices[vertices['z'] > 0]
694
695
if len(valid_points) > 0:
696
# Project to 2D for visualization
697
x_2d = valid_points['x'] / valid_points['z'] * 320 + 320
698
y_2d = valid_points['y'] / valid_points['z'] * 240 + 240
699
700
# Create visualization image
701
img = np.zeros((480, 640, 3), dtype=np.uint8)
702
703
for i in range(len(valid_points)):
704
if 0 <= x_2d[i] < 640 and 0 <= y_2d[i] < 480:
705
depth_val = int(valid_points[i]['z'] * 255 / 5.0) # Scale to 0-255
706
color = (depth_val, depth_val, depth_val)
707
cv2.circle(img, (int(x_2d[i]), int(y_2d[i])), 1, color, -1)
708
709
cv2.imshow('Point Cloud Projection', img)
710
cv2.waitKey(1)
711
712
finally:
713
pipeline.stop()
714
cv2.destroyAllWindows()
715
```
716
717
## Performance Considerations
718
719
- **Decimation**: Use decimation filter to reduce point count for performance
720
- **Spatial Filtering**: Apply before point cloud generation to reduce noise
721
- **Memory Usage**: Point clouds can be memory-intensive; consider processing in chunks
722
- **Export Formats**: Binary PLY files are smaller and faster than text format
723
- **Coordinate Systems**: Be aware of RealSense coordinate conventions (Z forward, Y down)