Python bindings for Intel RealSense SDK 2.0 providing access to depth and color cameras for computer vision applications.
—
Low-level sensor control for advanced applications requiring precise stream configuration, custom frame processing, or multi-device synchronization. Provides direct access to individual sensors and their capabilities.
Core sensor interface with streaming and configuration capabilities.
class sensor:
def get_stream_profiles() -> list[stream_profile]:
"""
Get all available stream profiles for this sensor.
Returns:
list[stream_profile]: Available stream configurations
"""
@property
def profiles(self) -> list[stream_profile]:
"""All available stream profiles."""
def open(profile):
"""
Open sensor with single stream profile.
Args:
profile (stream_profile): Stream configuration to open
"""
def open(profiles):
"""
Open sensor with multiple stream profiles.
Args:
profiles (list[stream_profile]): Stream configurations to open
"""
def close():
"""Close sensor and stop streaming."""
def get_active_streams() -> list[stream_profile]:
"""
Get currently active stream profiles.
Returns:
list[stream_profile]: Active stream configurations
"""
def start(callback_function):
"""
Start streaming with frame callback.
Args:
callback_function: Function called for each frame
"""
def start(syncer):
"""
Start streaming into frame synchronizer.
Args:
syncer (syncer): Frame synchronizer
"""
def start(frame_queue):
"""
Start streaming into frame queue.
Args:
frame_queue (frame_queue): Frame buffer queue
"""
def stop():
"""Stop streaming."""
@property
def name(self) -> str:
"""Sensor name if available."""
def supports(camera_info) -> bool:
"""
Check if sensor supports information field.
Args:
camera_info (camera_info): Information field to check
Returns:
bool: True if supported
"""
def get_info(camera_info) -> str:
"""
Get sensor information.
Args:
camera_info (camera_info): Information field to retrieve
Returns:
str: Information value
"""
def get_recommended_filters() -> list[filter]:
"""
Get recommended processing filters for this sensor.
Returns:
list[filter]: Recommended filters
"""
def set_notifications_callback(callback):
"""
Set callback for sensor notifications.
Args:
callback: Function called with notification events
"""
# Type checking methods
def is_depth_sensor() -> bool:
"""Check if sensor is a depth sensor."""
def is_color_sensor() -> bool:
"""Check if sensor is a color sensor."""
def is_motion_sensor() -> bool:
"""Check if sensor is a motion sensor."""
def is_fisheye_sensor() -> bool:
"""Check if sensor is a fisheye sensor."""
def is_pose_sensor() -> bool:
"""Check if sensor is a pose sensor."""
def is_roi_sensor() -> bool:
"""Check if sensor supports region of interest."""
# Type casting methods
def as_depth_sensor() -> depth_sensor:
"""Cast to depth sensor."""
def as_color_sensor() -> color_sensor:
"""Cast to color sensor."""
def as_motion_sensor() -> motion_sensor:
"""Cast to motion sensor."""
def as_fisheye_sensor() -> fisheye_sensor:
"""Cast to fisheye sensor."""
def as_pose_sensor() -> pose_sensor:
"""Cast to pose sensor."""
def as_roi_sensor() -> roi_sensor:
"""Cast to ROI sensor."""
@staticmethod
def from_frame(frame) -> sensor:
"""
Get sensor that produced a frame.
Args:
frame (frame): Frame to trace back to sensor
Returns:
sensor: Originating sensor
"""class depth_sensor(sensor):
def get_depth_scale() -> float:
"""
Get depth scale factor (meters per depth unit).
Returns:
float: Depth scale in meters per unit
"""class depth_stereo_sensor(depth_sensor):
def get_stereo_baseline() -> float:
"""
Get stereo baseline distance.
Returns:
float: Baseline distance in millimeters
"""class roi_sensor(sensor):
def set_region_of_interest(roi):
"""
Set region of interest for processing.
Args:
roi (region_of_interest): ROI bounds
"""
def get_region_of_interest() -> region_of_interest:
"""
Get current region of interest.
Returns:
region_of_interest: Current ROI bounds
"""class pose_sensor(sensor):
def import_localization_map(map_data):
"""
Import localization map data.
Args:
map_data (bytes): Map data to import
"""
def export_localization_map() -> bytes:
"""
Export current localization map.
Returns:
bytes: Map data
"""
def set_static_node(guid, position, orientation):
"""
Set static node in map.
Args:
guid (str): Node unique identifier
position (list[float]): 3D position [x, y, z]
orientation (list[float]): Quaternion [x, y, z, w]
"""
def get_static_node(guid) -> tuple[bool, list[float], list[float]]:
"""
Get static node from map.
Args:
guid (str): Node unique identifier
Returns:
tuple: (success, position, orientation)
"""
def remove_static_node(guid):
"""
Remove static node from map.
Args:
guid (str): Node unique identifier
"""class color_sensor(sensor):
"""Color camera sensor - inherits base sensor functionality."""
class motion_sensor(sensor):
"""IMU/motion sensor - inherits base sensor functionality."""
class fisheye_sensor(sensor):
"""Fisheye camera sensor - inherits base sensor functionality."""
class wheel_odometer(sensor):
def load_wheel_odometery_config(config_data):
"""
Load wheel odometry configuration.
Args:
config_data (bytes): Configuration data
"""
def send_wheel_odometry(sensor_id, frame_num, translational_velocity):
"""
Send wheel odometry data.
Args:
sensor_id (int): Sensor identifier
frame_num (int): Frame number
translational_velocity (list[float]): Velocity vector [x, y, z]
"""
class max_usable_range_sensor(sensor):
def get_max_usable_depth_range() -> float:
"""
Get maximum usable depth range.
Returns:
float: Maximum range in meters
"""
class debug_stream_sensor(sensor):
def get_debug_stream_profiles() -> list[stream_profile]:
"""
Get debug stream profiles.
Returns:
list[stream_profile]: Debug stream configurations
"""Stream configuration and parameter definitions.
class stream_profile:
def stream_index() -> int:
"""
Get stream index for multi-instance streams.
Returns:
int: Stream index
"""
def stream_type() -> stream:
"""
Get stream type.
Returns:
stream: Stream type enum
"""
def format() -> format:
"""
Get data format.
Returns:
format: Data format enum
"""
def fps() -> int:
"""
Get framerate.
Returns:
int: Frames per second
"""
def unique_id() -> int:
"""
Get unique stream identifier.
Returns:
int: Unique ID
"""
def stream_name() -> str:
"""
Get stream name.
Returns:
str: Human-readable stream name
"""
def is_default() -> bool:
"""
Check if this is the default profile for the stream.
Returns:
bool: True if default profile
"""
def bytes_per_pixel() -> int:
"""
Get bytes per pixel for this format.
Returns:
int: Bytes per pixel
"""
def get_extrinsics_to(other_profile) -> extrinsics:
"""
Get extrinsic transformation to another stream.
Args:
other_profile (stream_profile): Target stream profile
Returns:
extrinsics: Transformation parameters
"""
def register_extrinsics_to(other_profile, extrinsics):
"""
Register extrinsic transformation to another stream.
Args:
other_profile (stream_profile): Target stream profile
extrinsics (extrinsics): Transformation parameters
"""
def clone(stream_type, stream_index, format) -> stream_profile:
"""
Clone profile with different parameters.
Args:
stream_type (stream): New stream type
stream_index (int): New stream index
format (format): New format
Returns:
stream_profile: Cloned profile
"""
# Type checking and casting
def is_video_stream_profile() -> bool:
"""Check if profile is for video stream."""
def is_motion_stream_profile() -> bool:
"""Check if profile is for motion stream."""
def is_pose_stream_profile() -> bool:
"""Check if profile is for pose stream."""
def as_video_stream_profile() -> video_stream_profile:
"""Cast to video stream profile."""
def as_motion_stream_profile() -> motion_stream_profile:
"""Cast to motion stream profile."""
def as_pose_stream_profile() -> pose_stream_profile:
"""Cast to pose stream profile."""class video_stream_profile(stream_profile):
def width() -> int:
"""
Get frame width.
Returns:
int: Width in pixels
"""
def height() -> int:
"""
Get frame height.
Returns:
int: Height in pixels
"""
def get_intrinsics() -> intrinsics:
"""
Get camera intrinsic parameters.
Returns:
intrinsics: Camera calibration parameters
"""
@property
def intrinsics(self) -> intrinsics:
"""Camera intrinsic parameters."""class motion_stream_profile(stream_profile):
def get_motion_intrinsics() -> motion_device_intrinsic:
"""
Get motion sensor intrinsic parameters.
Returns:
motion_device_intrinsic: IMU calibration parameters
"""class pose_stream_profile(stream_profile):
"""Pose stream profile - inherits base functionality."""Sensor notification system for device events and status changes.
class notification:
def get_category() -> notification_category:
"""
Get notification category.
Returns:
notification_category: Notification type
"""
def get_description() -> str:
"""
Get human-readable description.
Returns:
str: Notification description
"""
def get_timestamp() -> float:
"""
Get notification timestamp.
Returns:
float: Timestamp in milliseconds
"""
def get_severity() -> log_severity:
"""
Get notification severity level.
Returns:
log_severity: Severity level
"""
def get_serialized_data() -> str:
"""
Get serialized notification data.
Returns:
str: Serialized data
"""
# Properties (read-only)
@property
def category(self) -> notification_category:
"""Notification category."""
@property
def description(self) -> str:
"""Notification description."""
@property
def timestamp(self) -> float:
"""Notification timestamp."""
@property
def severity(self) -> log_severity:
"""Notification severity."""
@property
def serialized_data(self) -> str:
"""Serialized notification data."""import pyrealsense2 as rs
# Get device and sensors
ctx = rs.context()
device = ctx.query_devices()[0]
sensors = device.query_sensors()
# Find depth sensor
depth_sensor = None
for sensor in sensors:
if sensor.is_depth_sensor():
depth_sensor = sensor.as_depth_sensor()
break
if not depth_sensor:
print("No depth sensor found")
exit()
# Get available profiles
profiles = depth_sensor.get_stream_profiles()
print(f"Found {len(profiles)} stream profiles:")
for i, profile in enumerate(profiles):
vp = profile.as_video_stream_profile()
print(f" {i}: {vp.stream_type().name} "
f"{vp.width()}x{vp.height()} "
f"{vp.format().name} @ {vp.fps()}fps")
# Select 640x480 Z16 profile
selected_profile = None
for profile in profiles:
vp = profile.as_video_stream_profile()
if (vp.width() == 640 and vp.height() == 480 and
vp.format() == rs.format.z16):
selected_profile = profile
break
if selected_profile:
print(f"Selected profile: {selected_profile.stream_name()}")
# Open and start streaming
depth_sensor.open(selected_profile)
# Use frame queue for processing
frame_queue = rs.frame_queue(capacity=10)
depth_sensor.start(frame_queue)
try:
for i in range(50):
frame = frame_queue.wait_for_frame(timeout_ms=1000)
depth_frame = frame.as_depth_frame()
print(f"Frame {depth_frame.get_frame_number()}: "
f"{depth_frame.get_width()}x{depth_frame.get_height()}")
finally:
depth_sensor.stop()
depth_sensor.close()import pyrealsense2 as rs
import threading
class FrameProcessor:
def __init__(self):
self.frame_count = 0
self.lock = threading.Lock()
def depth_callback(self, frame):
with self.lock:
self.frame_count += 1
depth_frame = frame.as_depth_frame()
print(f"Depth frame {depth_frame.get_frame_number()}")
def color_callback(self, frame):
with self.lock:
self.frame_count += 1
color_frame = frame.as_video_frame()
print(f"Color frame {color_frame.get_frame_number()}")
# Get device and sensors
ctx = rs.context()
device = ctx.query_devices()[0]
# Find sensors
depth_sensor = device.first_depth_sensor()
color_sensor = device.first_color_sensor()
# Configure sensors
depth_profiles = depth_sensor.get_stream_profiles()
color_profiles = color_sensor.get_stream_profiles()
# Select profiles (simplified selection)
depth_profile = depth_profiles[0] # Use first available
color_profile = color_profiles[0] # Use first available
processor = FrameProcessor()
# Start streaming
depth_sensor.open(depth_profile)
color_sensor.open(color_profile)
depth_sensor.start(processor.depth_callback)
color_sensor.start(processor.color_callback)
try:
import time
time.sleep(5) # Stream for 5 seconds
finally:
depth_sensor.stop()
color_sensor.stop()
depth_sensor.close()
color_sensor.close()
print(f"Processed {processor.frame_count} frames total")import pyrealsense2 as rs
ctx = rs.context()
device = ctx.query_devices()[0]
for i, sensor in enumerate(device.query_sensors()):
print(f"\nSensor {i}:")
if sensor.supports(rs.camera_info.name):
print(f" Name: {sensor.get_info(rs.camera_info.name)}")
# Check sensor type
sensor_types = []
if sensor.is_depth_sensor():
sensor_types.append("Depth")
ds = sensor.as_depth_sensor()
print(f" Depth Scale: {ds.get_depth_scale()}")
if sensor.is_roi_sensor():
sensor_types.append("ROI")
if sensor.is_color_sensor():
sensor_types.append("Color")
if sensor.is_motion_sensor():
sensor_types.append("Motion")
if sensor.is_fisheye_sensor():
sensor_types.append("Fisheye")
if sensor.is_pose_sensor():
sensor_types.append("Pose")
print(f" Types: {', '.join(sensor_types)}")
# List stream profiles
profiles = sensor.get_stream_profiles()
print(f" Stream Profiles: {len(profiles)}")
for j, profile in enumerate(profiles[:5]): # Show first 5
if profile.is_video_stream_profile():
vp = profile.as_video_stream_profile()
print(f" {j}: {vp.stream_type().name} "
f"{vp.width()}x{vp.height()} "
f"{vp.format().name} @ {vp.fps()}fps")
else:
print(f" {j}: {profile.stream_type().name} "
f"{profile.format().name} @ {profile.fps()}fps")
# Get recommended filters
filters = sensor.get_recommended_filters()
if filters:
filter_names = [type(f).__name__ for f in filters]
print(f" Recommended Filters: {', '.join(filter_names)}")import pyrealsense2 as rs
import time
def notification_callback(notification):
print(f"Notification: {notification.get_description()}")
print(f" Category: {notification.get_category().name}")
print(f" Severity: {notification.get_severity().name}")
print(f" Timestamp: {notification.get_timestamp()}")
ctx = rs.context()
device = ctx.query_devices()[0]
sensor = device.query_sensors()[0]
# Set notification callback
sensor.set_notifications_callback(notification_callback)
# Start streaming to trigger notifications
profiles = sensor.get_stream_profiles()
if profiles:
sensor.open(profiles[0])
frame_queue = rs.frame_queue()
sensor.start(frame_queue)
try:
time.sleep(5) # Monitor for 5 seconds
finally:
sensor.stop()
sensor.close()import pyrealsense2 as rs
ctx = rs.context()
device = ctx.query_devices()[0]
# Get depth and color sensors
depth_sensor = device.first_depth_sensor()
color_sensor = device.first_color_sensor()
# Get profiles
depth_profiles = depth_sensor.get_stream_profiles()
color_profiles = color_sensor.get_stream_profiles()
# Find matching resolution profiles
depth_profile = None
color_profile = None
for dp in depth_profiles:
if dp.is_video_stream_profile():
dvp = dp.as_video_stream_profile()
if dvp.width() == 640 and dvp.height() == 480:
depth_profile = dp
break
for cp in color_profiles:
if cp.is_video_stream_profile():
cvp = cp.as_video_stream_profile()
if cvp.width() == 640 and cvp.height() == 480:
color_profile = cp
break
if depth_profile and color_profile:
# Get intrinsics
depth_intrinsics = depth_profile.as_video_stream_profile().get_intrinsics()
color_intrinsics = color_profile.as_video_stream_profile().get_intrinsics()
print("Depth Intrinsics:")
print(f" Resolution: {depth_intrinsics.width}x{depth_intrinsics.height}")
print(f" Principal Point: ({depth_intrinsics.ppx}, {depth_intrinsics.ppy})")
print(f" Focal Length: ({depth_intrinsics.fx}, {depth_intrinsics.fy})")
print(f" Distortion Model: {depth_intrinsics.model}")
print("\nColor Intrinsics:")
print(f" Resolution: {color_intrinsics.width}x{color_intrinsics.height}")
print(f" Principal Point: ({color_intrinsics.ppx}, {color_intrinsics.ppy})")
print(f" Focal Length: ({color_intrinsics.fx}, {color_intrinsics.fy})")
# Get extrinsics between streams
extrinsics = depth_profile.get_extrinsics_to(color_profile)
print(f"\nDepth to Color Extrinsics:")
print(f" Translation: {extrinsics.translation}")
print(f" Rotation: {extrinsics.rotation}")Install with Tessl CLI
npx tessl i tessl/pypi-pyrealsense2