Remove image background using advanced AI models including U-Net, BiRefNet, and SAM with support for multiple input formats and GPU acceleration
84
Model session creation and management system that provides access to 23 different AI models optimized for various background removal tasks. Sessions encapsulate model loading, GPU configuration, and prediction logic.
Create new model sessions with automatic provider detection and configuration.
def new_session(model_name: str = "u2net", *args, **kwargs) -> BaseSession:
"""
Create a new session object based on the specified model name.
Parameters:
- model_name: Name of the AI model to use (default: "u2net")
- providers: List of execution providers (auto-detected if not specified)
- *args: Additional positional arguments passed to session
- **kwargs: Additional keyword arguments passed to session
Returns:
BaseSession instance for the specified model
Raises:
ValueError: If model_name is not found in available sessions
"""Usage Examples:
from rembg import new_session
# Create default U2Net session
session = new_session()
# Create specific model session
portrait_session = new_session('birefnet_portrait')
# Create session with custom providers
gpu_session = new_session('u2net', providers=['CUDAExecutionProvider'])
# Use session for background removal
from rembg import remove
result = remove(image, session=session)Abstract base class that all model sessions inherit from, providing common functionality.
class BaseSession:
"""Base class for managing a session with a machine learning model."""
def __init__(
self,
model_name: str,
sess_opts: ort.SessionOptions,
*args,
**kwargs
):
"""
Initialize a session instance.
Parameters:
- model_name: Name of the model
- sess_opts: ONNX Runtime session options
- providers: List of execution providers (optional)
- *args: Additional positional arguments
- **kwargs: Additional keyword arguments
"""
def normalize(
self,
img: PILImage,
mean: Tuple[float, float, float],
std: Tuple[float, float, float],
size: Tuple[int, int],
*args,
**kwargs
) -> Dict[str, np.ndarray]:
"""
Normalize input image for model inference.
Parameters:
- img: Input PIL image
- mean: RGB mean values for normalization
- std: RGB standard deviation values for normalization
- size: Target size (width, height) for resizing
Returns:
Dictionary with normalized image data for model input
"""
def predict(self, img: PILImage, *args, **kwargs) -> List[PILImage]:
"""
Abstract method for model prediction.
Parameters:
- img: Input PIL image
- *args: Additional positional arguments
- **kwargs: Additional keyword arguments
Returns:
List of PIL Images containing prediction masks
"""
@classmethod
def checksum_disabled(cls, *args, **kwargs) -> bool:
"""Check if model checksum validation is disabled via environment variable."""
@classmethod
def u2net_home(cls, *args, **kwargs) -> str:
"""Get the home directory for model storage."""
@classmethod
def download_models(cls, *args, **kwargs):
"""Abstract method for downloading model weights."""
@classmethod
def name(cls, *args, **kwargs) -> str:
"""Abstract method returning the model name."""Complete list of available AI model sessions, each optimized for specific use cases.
# General-purpose models
class U2netSession(BaseSession):
"""U-Net 2.0 general-purpose background removal."""
class U2netpSession(BaseSession):
"""U-Net 2.0 portrait-optimized model."""
class U2netCustomSession(BaseSession):
"""U-Net 2.0 with custom training."""
# Human segmentation models
class U2netHumanSegSession(BaseSession):
"""U-Net 2.0 optimized for human segmentation."""
class Unet2ClothSession(BaseSession):
"""U-Net 2.0 specialized for clothing segmentation."""
# BiRefNet models (high-quality)
class BiRefNetSessionGeneral(BaseSession):
"""BiRefNet general-purpose high-quality model."""
class BiRefNetSessionGeneralLite(BaseSession):
"""BiRefNet general-purpose lightweight model."""
class BiRefNetSessionPortrait(BaseSession):
"""BiRefNet optimized for portrait photography."""
class BiRefNetSessionDIS(BaseSession):
"""BiRefNet with DIS (Dichotomous Image Segmentation)."""
class BiRefNetSessionHRSOD(BaseSession):
"""BiRefNet for High-Resolution Salient Object Detection."""
class BiRefNetSessionCOD(BaseSession):
"""BiRefNet for Camouflaged Object Detection."""
class BiRefNetSessionMassive(BaseSession):
"""BiRefNet massive model for highest quality."""
# Specialized models
class DisSession(BaseSession):
"""DIS model optimized for anime/cartoon characters."""
class DisCustomSession(BaseSession):
"""DIS model with custom training."""
class DisSessionGeneralUse(BaseSession):
"""DIS model for general use cases."""
class SamSession(BaseSession):
"""Segment Anything Model for versatile segmentation."""
class SiluetaSession(BaseSession):
"""Silueta model for silhouette extraction."""
class BriaRmBgSession(BaseSession):
"""Bria background removal specialized model."""
class BenCustomSession(BaseSession):
"""Ben custom-trained model."""Access to the complete session registry and model names.
# Dictionary mapping model names to session classes
sessions: Dict[str, type[BaseSession]]
# List of all available model names
sessions_names: List[str]
# List of all session classes
sessions_class: List[type[BaseSession]]Usage Examples:
from rembg.sessions import sessions, sessions_names, sessions_class
# List all available models
print("Available models:", sessions_names)
# Get session class by name
u2net_class = sessions['u2net']
# Create session instance directly
session = u2net_class('u2net', sess_opts)Sessions automatically detect and configure GPU acceleration:
# GPU providers are auto-detected based on availability:
# - CUDAExecutionProvider (NVIDIA GPUs)
# - ROCMExecutionProvider (AMD GPUs)
# - CPUExecutionProvider (fallback)
# Manual provider specification
session = new_session('u2net', providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])OMP_NUM_THREADS: Set number of threads for CPU processingMODEL_CHECKSUM_DISABLED: Disable model file checksum validationU2NET_HOME: Custom directory for model storage (default: ~/.u2net)Install with Tessl CLI
npx tessl i tessl/pypi-rembgevals
scenario-1
scenario-2
scenario-3
scenario-4
scenario-5
scenario-6
scenario-7
scenario-8
scenario-9
scenario-10