Python bindings for Intel RealSense SDK 2.0 providing access to depth and color cameras for computer vision applications.
—
3D processing capabilities including point cloud generation, coordinate transformations, and spatial analysis. Supports various output formats, texture mapping, and seamless integration with computer vision workflows.
Core point cloud functionality for converting depth data to 3D coordinates.
class pointcloud(filter):
def __init__(stream=rs.stream.any, index=0):
"""
Create point cloud generator.
Args:
stream (stream): Stream to use for texture mapping (default: any available)
index (int): Stream index for multi-instance streams
"""
def calculate(depth_frame) -> points:
"""
Generate 3D point cloud from depth frame.
Args:
depth_frame (depth_frame): Input depth data
Returns:
points: Generated point cloud frame
"""
def map_to(texture_frame):
"""
Set texture source for point cloud coloring.
Args:
texture_frame (video_frame): Frame to use for texture coordinates and colors
"""
class points(frame):
def get_vertices(dims=1) -> BufData:
"""
Get 3D vertex coordinates.
Args:
dims (int): Buffer dimensionality
1: Flat array of vertices
2: Structured array with point grouping
Returns:
BufData: Vertex coordinates as (x, y, z) buffer
"""
def get_texture_coordinates(dims=1) -> BufData:
"""
Get texture coordinates for point cloud.
Args:
dims (int): Buffer dimensionality
1: Flat array of texture coordinates
2: Structured array with coordinate grouping
Returns:
BufData: Texture coordinates as (u, v) buffer
"""
def export_to_ply(filename, texture_frame):
"""
Export point cloud to PLY format file.
Args:
filename (str): Output file path
texture_frame (video_frame): Frame providing color/texture data
"""
def size() -> int:
"""
Get number of points in cloud.
Returns:
int: Point count
"""Advanced PLY export with mesh generation and normal calculation.
class save_to_ply(filter):
def __init__(filename="RealSense Pointcloud ", pc=rs.pointcloud()):
"""
Create PLY export filter.
Args:
filename (str): Base filename for export
pc (pointcloud): Point cloud generator to use
"""
# Static option constants
option_ignore_color: int # Ignore color information
option_ply_mesh: int # Generate mesh connectivity
option_ply_binary: int # Use binary PLY format
option_ply_normals: int # Calculate and include normals
option_ply_threshold: int # Distance threshold for mesh generationFunctions for coordinate transformations and camera geometry.
def rs2_project_point_to_pixel(intrinsics, point_3d) -> list[float]:
"""
Project 3D point to 2D pixel coordinates.
Args:
intrinsics (intrinsics): Camera intrinsic parameters
point_3d (list[float]): 3D point [x, y, z] in meters
Returns:
list[float]: 2D pixel coordinates [u, v]
"""
def rs2_deproject_pixel_to_point(intrinsics, pixel_2d, depth) -> list[float]:
"""
Deproject 2D pixel to 3D point coordinates.
Args:
intrinsics (intrinsics): Camera intrinsic parameters
pixel_2d (list[float]): 2D pixel coordinates [u, v]
depth (float): Depth value in meters
Returns:
list[float]: 3D point coordinates [x, y, z] in meters
"""
def rs2_transform_point_to_point(extrinsics, from_point) -> list[float]:
"""
Transform 3D point between coordinate systems.
Args:
extrinsics (extrinsics): Transformation parameters
from_point (list[float]): Input 3D point [x, y, z]
Returns:
list[float]: Transformed 3D point [x, y, z]
"""
def rs2_fov(intrinsics) -> list[float]:
"""
Calculate field of view angles from intrinsics.
Args:
intrinsics (intrinsics): Camera intrinsic parameters
Returns:
list[float]: [horizontal_fov, vertical_fov] in radians
"""
def rs2_project_color_pixel_to_depth_pixel(data, depth_scale, depth_min, depth_max,
depth_intrin, color_intrin, color_to_depth,
depth_to_color, from_pixel) -> list[float]:
"""
Project color pixel to corresponding depth pixel.
Args:
data: Depth frame data
depth_scale (float): Depth units scale factor
depth_min (float): Minimum depth value
depth_max (float): Maximum depth value
depth_intrin (intrinsics): Depth camera intrinsics
color_intrin (intrinsics): Color camera intrinsics
color_to_depth (extrinsics): Color to depth transformation
depth_to_color (extrinsics): Depth to color transformation
from_pixel (list[float]): Source pixel coordinates [u, v]
Returns:
list[float]: Corresponding depth pixel coordinates [u, v]
"""Data structures for 3D geometry and transformations.
class intrinsics:
width: int # Image width in pixels
height: int # Image height in pixels
ppx: float # Principal point x-coordinate
ppy: float # Principal point y-coordinate
fx: float # Focal length x-axis (pixels)
fy: float # Focal length y-axis (pixels)
model: distortion # Distortion model type
coeffs: list[float] # Distortion coefficients [k1, k2, p1, p2, k3]
class extrinsics:
rotation: list[list[float]] # 3x3 rotation matrix
translation: list[float] # Translation vector [x, y, z]
class motion_device_intrinsic:
data: list[list[float]] # 3x4 transformation matrix
noise_variances: list[float] # Noise variance per axis [x, y, z]
bias_variances: list[float] # Bias variance per axis [x, y, z]
class vertex:
x: float # X coordinate
y: float # Y coordinate
z: float # Z coordinate
class texture_coordinate:
u: float # U texture coordinate (0-1)
v: float # V texture coordinate (0-1)
class vector:
x: float # X component
y: float # Y component
z: float # Z component
class quaternion:
x: float # X component
y: float # Y component
z: float # Z component
w: float # W component (scalar part)
class pose_data:
translation: vector # Position [x, y, z] in meters
velocity: vector # Velocity [x, y, z] in m/s
acceleration: vector # Acceleration [x, y, z] in m/s²
rotation: quaternion # Orientation quaternion
angular_velocity: vector # Angular velocity [x, y, z] in rad/s
angular_acceleration: vector # Angular acceleration [x, y, z] in rad/s²
tracker_confidence: int # Tracking confidence (0-3)
mapper_confidence: int # Mapping confidence (0-3)import pyrealsense2 as rs
import numpy as np
# Configure streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
pipeline.start(config)
# Create point cloud generator
pc = rs.pointcloud()
try:
# Wait for frames
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if depth_frame and color_frame:
# Generate point cloud with color texture
pc.map_to(color_frame)
points = pc.calculate(depth_frame)
print(f"Generated {points.size()} points")
# Get vertices as NumPy array
vertices = np.asanyarray(points.get_vertices())
print(f"Vertices shape: {vertices.shape}")
print(f"Vertices dtype: {vertices.dtype}")
# Get texture coordinates
tex_coords = np.asanyarray(points.get_texture_coordinates())
print(f"Texture coordinates shape: {tex_coords.shape}")
# Analyze point cloud
valid_points = vertices[vertices['z'] > 0] # Filter out invalid points
print(f"Valid points: {len(valid_points)}")
if len(valid_points) > 0:
print(f"Depth range: {valid_points['z'].min():.3f} - {valid_points['z'].max():.3f}m")
print(f"X range: {valid_points['x'].min():.3f} - {valid_points['x'].max():.3f}m")
print(f"Y range: {valid_points['y'].min():.3f} - {valid_points['y'].max():.3f}m")
finally:
pipeline.stop()import pyrealsense2 as rs
# Configure pipeline
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
pipeline.start(config)
# Create point cloud generator
pc = rs.pointcloud()
# Create PLY export filter
ply = rs.save_to_ply("pointcloud.ply")
# Configure PLY export options
ply.set_option(rs.save_to_ply.option_ply_binary, False) # Text format
ply.set_option(rs.save_to_ply.option_ply_normals, True) # Include normals
ply.set_option(rs.save_to_ply.option_ply_mesh, True) # Generate mesh
try:
# Capture frame
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if depth_frame and color_frame:
# Generate colored point cloud
pc.map_to(color_frame)
points = pc.calculate(depth_frame)
print(f"Exporting {points.size()} points to PLY...")
# Export to PLY file
points.export_to_ply("simple_pointcloud.ply", color_frame)
# Alternative: Use save_to_ply filter for advanced options
colorizer = rs.colorizer()
colorized = colorizer.process(frames)
ply.process(colorized)
print("Export completed")
finally:
pipeline.stop()import pyrealsense2 as rs
import numpy as np
# Get camera intrinsics
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
profile = pipeline.start(config)
# Get stream profiles and intrinsics
depth_profile = profile.get_stream(rs.stream.depth)
color_profile = profile.get_stream(rs.stream.color)
depth_intrinsics = depth_profile.as_video_stream_profile().get_intrinsics()
color_intrinsics = color_profile.as_video_stream_profile().get_intrinsics()
# Get extrinsics between streams
depth_to_color = depth_profile.get_extrinsics_to(color_profile)
print("Depth Camera Intrinsics:")
print(f" Resolution: {depth_intrinsics.width}x{depth_intrinsics.height}")
print(f" Principal Point: ({depth_intrinsics.ppx:.1f}, {depth_intrinsics.ppy:.1f})")
print(f" Focal Length: ({depth_intrinsics.fx:.1f}, {depth_intrinsics.fy:.1f})")
print("\nColor Camera Intrinsics:")
print(f" Resolution: {color_intrinsics.width}x{color_intrinsics.height}")
print(f" Principal Point: ({color_intrinsics.ppx:.1f}, {color_intrinsics.ppy:.1f})")
print(f" Focal Length: ({color_intrinsics.fx:.1f}, {color_intrinsics.fy:.1f})")
# Calculate field of view
depth_fov = rs.rs2_fov(depth_intrinsics)
color_fov = rs.rs2_fov(color_intrinsics)
print(f"\nField of View:")
print(f" Depth: {np.degrees(depth_fov[0]):.1f}° x {np.degrees(depth_fov[1]):.1f}°")
print(f" Color: {np.degrees(color_fov[0]):.1f}° x {np.degrees(color_fov[1]):.1f}°")
try:
# Get a frame for transformation examples
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
if depth_frame:
# Example coordinate transformations
center_pixel = [320, 240] # Center of 640x480 image
depth_value = depth_frame.get_distance(center_pixel[0], center_pixel[1])
if depth_value > 0:
# Deproject pixel to 3D point
point_3d = rs.rs2_deproject_pixel_to_point(
depth_intrinsics, center_pixel, depth_value)
print(f"\nCoordinate Transformation Example:")
print(f" Pixel: ({center_pixel[0]}, {center_pixel[1]})")
print(f" Depth: {depth_value:.3f}m")
print(f" 3D Point: ({point_3d[0]:.3f}, {point_3d[1]:.3f}, {point_3d[2]:.3f})")
# Transform point to color camera coordinate system
point_in_color = rs.rs2_transform_point_to_point(depth_to_color, point_3d)
print(f" Point in color coordinates: ({point_in_color[0]:.3f}, {point_in_color[1]:.3f}, {point_in_color[2]:.3f})")
# Project back to color image
color_pixel = rs.rs2_project_point_to_pixel(color_intrinsics, point_in_color)
print(f" Corresponding color pixel: ({color_pixel[0]:.1f}, {color_pixel[1]:.1f})")
finally:
pipeline.stop()import pyrealsense2 as rs
import numpy as np
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
pipeline.start(config)
# Create point cloud with filtering
pc = rs.pointcloud()
decimation = rs.decimation_filter(2.0) # Reduce point count
spatial = rs.spatial_filter() # Noise reduction
try:
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
if depth_frame:
# Apply filters before point cloud generation
filtered = decimation.process(depth_frame)
filtered = spatial.process(filtered)
# Generate point cloud
points = pc.calculate(filtered.as_depth_frame())
# Get vertices as structured NumPy array
vertices = np.asanyarray(points.get_vertices())
print(f"Point cloud analysis:")
print(f" Total points: {points.size()}")
print(f" Vertices array shape: {vertices.shape}")
# Filter valid points (non-zero depth)
valid_mask = vertices['z'] > 0
valid_vertices = vertices[valid_mask]
print(f" Valid points: {len(valid_vertices)}")
if len(valid_vertices) > 0:
# Statistical analysis
x_coords = valid_vertices['x']
y_coords = valid_vertices['y']
z_coords = valid_vertices['z']
print(f" X range: {x_coords.min():.3f} to {x_coords.max():.3f}m")
print(f" Y range: {y_coords.min():.3f} to {y_coords.max():.3f}m")
print(f" Z range: {z_coords.min():.3f} to {z_coords.max():.3f}m")
# Calculate distances from origin
distances = np.sqrt(x_coords**2 + y_coords**2 + z_coords**2)
print(f" Distance range: {distances.min():.3f} to {distances.max():.3f}m")
print(f" Mean distance: {distances.mean():.3f}m")
# Find points within specific distance range
near_points = valid_vertices[(z_coords > 0.5) & (z_coords < 2.0)]
print(f" Points between 0.5-2.0m: {len(near_points)}")
# Spatial distribution analysis
x_center = x_coords.mean()
y_center = y_coords.mean()
z_center = z_coords.mean()
print(f" Centroid: ({x_center:.3f}, {y_center:.3f}, {z_center:.3f})")
finally:
pipeline.stop()import pyrealsense2 as rs
import numpy as np
import time
class PointCloudProcessor:
def __init__(self):
self.pc = rs.pointcloud()
self.frame_count = 0
self.processing_times = []
def process_frame(self, depth_frame):
start_time = time.time()
# Generate point cloud
points = self.pc.calculate(depth_frame)
# Get vertices
vertices = np.asanyarray(points.get_vertices())
valid_points = vertices[vertices['z'] > 0]
# Simple analysis
if len(valid_points) > 1000: # Minimum points for analysis
distances = np.sqrt(valid_points['x']**2 +
valid_points['y']**2 +
valid_points['z']**2)
# Find closest point
closest_idx = np.argmin(distances)
closest_point = valid_points[closest_idx]
processing_time = time.time() - start_time
self.processing_times.append(processing_time)
self.frame_count += 1
if self.frame_count % 30 == 0: # Print every second at 30fps
avg_time = np.mean(self.processing_times[-30:])
print(f"Frame {self.frame_count}:")
print(f" Valid points: {len(valid_points)}")
print(f" Closest point: ({closest_point['x']:.3f}, "
f"{closest_point['y']:.3f}, {closest_point['z']:.3f})")
print(f" Processing time: {avg_time*1000:.1f}ms")
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
pipeline.start(config)
processor = PointCloudProcessor()
try:
print("Starting real-time point cloud processing...")
for i in range(300): # Process for 10 seconds at 30fps
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
if depth_frame:
processor.process_frame(depth_frame)
finally:
pipeline.stop()
print(f"Processed {processor.frame_count} frames")
if processor.processing_times:
avg_time = np.mean(processor.processing_times)
print(f"Average processing time: {avg_time*1000:.1f}ms")import pyrealsense2 as rs
import numpy as np
class PointCloudAccumulator:
def __init__(self, max_frames=10):
self.pc = rs.pointcloud()
self.accumulated_points = []
self.max_frames = max_frames
def add_frame(self, depth_frame):
# Generate point cloud
points = self.pc.calculate(depth_frame)
vertices = np.asanyarray(points.get_vertices())
# Filter valid points
valid_points = vertices[vertices['z'] > 0]
if len(valid_points) > 0:
self.accumulated_points.append(valid_points)
# Keep only recent frames
if len(self.accumulated_points) > self.max_frames:
self.accumulated_points.pop(0)
def get_merged_pointcloud(self):
if not self.accumulated_points:
return np.array([])
# Merge all accumulated point clouds
merged = np.concatenate(self.accumulated_points)
# Optional: Remove duplicate points (simple spatial filtering)
# This is a basic approach - more sophisticated methods exist
unique_points = self._remove_nearby_duplicates(merged, threshold=0.01)
return unique_points
def _remove_nearby_duplicates(self, points, threshold=0.01):
if len(points) == 0:
return points
# Simple duplicate removal based on spatial proximity
# For production use, consider more efficient spatial data structures
unique_points = [points[0]]
for point in points[1:]:
is_duplicate = False
for unique_point in unique_points:
distance = np.sqrt((point['x'] - unique_point['x'])**2 +
(point['y'] - unique_point['y'])**2 +
(point['z'] - unique_point['z'])**2)
if distance < threshold:
is_duplicate = True
break
if not is_duplicate:
unique_points.append(point)
return np.array(unique_points)
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
pipeline.start(config)
accumulator = PointCloudAccumulator(max_frames=5)
try:
print("Accumulating point clouds...")
# Collect frames
for i in range(50):
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
if depth_frame:
accumulator.add_frame(depth_frame)
if i % 10 == 0:
merged = accumulator.get_merged_pointcloud()
print(f"Frame {i}: accumulated {len(merged)} unique points")
# Get final merged point cloud
final_pointcloud = accumulator.get_merged_pointcloud()
print(f"Final merged point cloud: {len(final_pointcloud)} points")
if len(final_pointcloud) > 0:
# Analyze merged point cloud
x_coords = final_pointcloud['x']
y_coords = final_pointcloud['y']
z_coords = final_pointcloud['z']
print(f"Merged point cloud bounds:")
print(f" X: {x_coords.min():.3f} to {x_coords.max():.3f}m")
print(f" Y: {y_coords.min():.3f} to {y_coords.max():.3f}m")
print(f" Z: {z_coords.min():.3f} to {z_coords.max():.3f}m")
finally:
pipeline.stop()import pyrealsense2 as rs
import numpy as np
import cv2
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
pipeline.start(config)
pc = rs.pointcloud()
try:
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
if depth_frame:
# Generate point cloud
points = pc.calculate(depth_frame)
vertices = np.asanyarray(points.get_vertices())
# Convert to OpenCV format for visualization
valid_points = vertices[vertices['z'] > 0]
if len(valid_points) > 0:
# Project to 2D for visualization
x_2d = valid_points['x'] / valid_points['z'] * 320 + 320
y_2d = valid_points['y'] / valid_points['z'] * 240 + 240
# Create visualization image
img = np.zeros((480, 640, 3), dtype=np.uint8)
for i in range(len(valid_points)):
if 0 <= x_2d[i] < 640 and 0 <= y_2d[i] < 480:
depth_val = int(valid_points[i]['z'] * 255 / 5.0) # Scale to 0-255
color = (depth_val, depth_val, depth_val)
cv2.circle(img, (int(x_2d[i]), int(y_2d[i])), 1, color, -1)
cv2.imshow('Point Cloud Projection', img)
cv2.waitKey(1)
finally:
pipeline.stop()
cv2.destroyAllWindows()Install with Tessl CLI
npx tessl i tessl/pypi-pyrealsense2