CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-insightface

A comprehensive 2D and 3D face analysis toolkit with state-of-the-art algorithms for face recognition, detection, and alignment.

Pending
Overview
Eval results
Files

mask-rendering.mddocs/

3D Mask Rendering

Advanced 3D face mask rendering capabilities using morphable face models for applications like face swapping, virtual try-on, augmented reality, and face mask simulation. Supports both realistic mask overlays and data augmentation for training.

Capabilities

MaskRenderer Class

3D face mask rendering engine that uses morphable models to fit virtual masks to faces with proper perspective and lighting.

class MaskRenderer:
    def __init__(self, name='buffalo_l', root='~/.insightface', insfa=None):
        """
        Initialize 3D mask renderer.
        
        Parameters:
        - name: str, model pack name containing 3D morphable model
        - root: str, model storage directory
        - insfa: FaceAnalysis instance, if provided will use for face detection
        """
    
    def prepare(self, ctx_id=0, det_thresh=0.5, det_size=(128, 128)):
        """
        Prepare renderer for inference.
        
        Parameters:
        - ctx_id: int, device context ID
        - det_thresh: float, detection confidence threshold
        - det_size: tuple, detection input size for face analysis
        """
    
    def build_params(self, face_image):
        """
        Build 3D face parameters from input image.
        
        Parameters:
        - face_image: np.ndarray, input face image
        
        Returns:
        dict: 3D face parameters including shape, expression, pose, and texture
        """
    
    def render_mask(self, face_image, mask_image, params, input_is_rgb=False, auto_blend=True, positions=[0.1, 0.33, 0.9, 0.7]):
        """
        Render 3D mask on face image.
        
        Parameters:
        - face_image: np.ndarray, target face image
        - mask_image: np.ndarray, mask texture image
        - params: dict, 3D face parameters from build_params()
        - input_is_rgb: bool, whether input images are RGB (default: BGR)
        - auto_blend: bool, automatically blend mask edges
        - positions: list, mask positioning parameters [x_offset, y_offset, width_scale, height_scale]
        
        Returns:
        np.ndarray: image with rendered 3D mask
        """
    
    def draw_lmk(self, face_image):
        """
        Draw 3D landmarks on face image for debugging.
        
        Parameters:
        - face_image: np.ndarray, input face image
        
        Returns:
        np.ndarray: image with drawn 3D landmarks
        """

Static Parameter Operations

Utilities for encoding and decoding 3D face parameters for storage and transmission.

@staticmethod
def encode_params(params):
    """
    Encode face parameters for storage.
    
    Parameters:
    - params: dict, face parameters dictionary
    
    Returns:
    bytes: encoded parameter data
    """

@staticmethod 
def decode_params(params):
    """
    Decode stored face parameters.
    
    Parameters:
    - params: bytes, encoded parameter data
    
    Returns:
    dict: decoded face parameters
    """

MaskAugmentation Class

Albumentations-compatible data augmentation transform for training robust face analysis models.

class MaskAugmentation(ImageOnlyTransform):
    def __init__(self, mask_names=['mask_white', 'mask_blue', 'mask_black', 'mask_green'],
                 mask_probs=[0.4, 0.4, 0.1, 0.1], h_low=0.33, h_high=0.35, always_apply=False, p=1.0):
        """
        Initialize mask augmentation transform.
        
        Parameters:
        - mask_names: list, available mask types
        - mask_probs: list, probability for each mask type
        - h_low: float, minimum mask height ratio
        - h_high: float, maximum mask height ratio  
        - always_apply: bool, whether to always apply transform
        - p: float, probability of applying transform
        """
    
    def apply(self, image, hlabel, mask_name, h_pos, **params):
        """
        Apply mask augmentation to image.
        
        Parameters:
        - image: np.ndarray, input image
        - hlabel: float, mask height label
        - mask_name: str, mask type to apply
        - h_pos: float, vertical position of mask
        - **params: additional parameters
        
        Returns:
        np.ndarray: augmented image with mask
        """

Usage Examples

Basic Mask Rendering

import cv2
from insightface.app.mask_renderer import MaskRenderer

# Initialize mask renderer
renderer = MaskRenderer(name='buffalo_l')
renderer.prepare(ctx_id=0)

# Load face and mask images
face_img = cv2.imread('person.jpg')
mask_img = cv2.imread('surgical_mask.png')

# Build 3D face parameters
params = renderer.build_params(face_img)
print(f"Built parameters with keys: {list(params.keys())}")

# Render mask on face
result = renderer.render_mask(face_img, mask_img, params, auto_blend=True)

# Save result
cv2.imwrite('masked_face.jpg', result)

Custom Mask Positioning

# Fine-tune mask position and size
positions = [0.05, 0.4, 0.8, 0.6]  # [x_offset, y_offset, width_scale, height_scale]

result = renderer.render_mask(
    face_img, 
    mask_img, 
    params,
    positions=positions,
    auto_blend=True
)

cv2.imwrite('custom_positioned_mask.jpg', result)

Batch Mask Rendering

from insightface.app import FaceAnalysis

# Setup face analysis and mask renderer
app = FaceAnalysis()
app.prepare(ctx_id=0)

renderer = MaskRenderer()
renderer.prepare(ctx_id=0)

def render_masks_on_all_faces(image, mask_image):
    """Apply masks to all detected faces in an image."""
    faces = app.get(image)
    result_img = image.copy()
    
    for i, face in enumerate(faces):
        # Extract face region
        x1, y1, x2, y2 = face.bbox.astype(int)
        face_region = image[y1:y2, x1:x2]
        
        if face_region.size > 0:
            # Build parameters for this face
            params = renderer.build_params(face_region)
            
            # Render mask on face region
            masked_region = renderer.render_mask(face_region, mask_image, params)
            
            # Paste back to original image
            result_img[y1:y2, x1:x2] = masked_region
    
    return result_img

# Process group photo
group_img = cv2.imread('group_photo.jpg')
mask_texture = cv2.imread('blue_mask.png')

masked_group = render_masks_on_all_faces(group_img, mask_texture)
cv2.imwrite('masked_group.jpg', masked_group)

Parameter Persistence

# Build and save parameters for later use
face_img = cv2.imread('face.jpg')
params = renderer.build_params(face_img)

# Encode parameters for storage
encoded_params = MaskRenderer.encode_params(params)
print(f"Encoded parameters size: {len(encoded_params)} bytes")

# Save to file
with open('face_params.bin', 'wb') as f:
    f.write(encoded_params)

# Later: load and decode parameters
with open('face_params.bin', 'rb') as f:
    loaded_encoded = f.read()

decoded_params = MaskRenderer.decode_params(loaded_encoded)

# Use loaded parameters for rendering
mask_img = cv2.imread('new_mask.png')
result = renderer.render_mask(face_img, mask_img, decoded_params)

Data Augmentation for Training

import albumentations as A
from insightface.app.mask_renderer import MaskAugmentation

# Setup augmentation pipeline
transform = A.Compose([
    A.HorizontalFlip(p=0.5),
    A.RandomBrightnessContrast(p=0.3),
    MaskAugmentation(
        mask_names=['mask_white', 'mask_blue', 'mask_surgical'],
        mask_probs=[0.3, 0.3, 0.4],
        p=0.7  # 70% chance to apply mask
    ),
    A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

# Apply to training images
def augment_training_data(image_path):
    image = cv2.imread(image_path)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    
    augmented = transform(image=image)
    return augmented['image']

# Process training dataset
training_images = ['face1.jpg', 'face2.jpg', 'face3.jpg']
augmented_data = []

for img_path in training_images:
    # Generate multiple augmented versions
    for i in range(5):
        aug_image = augment_training_data(img_path)
        augmented_data.append(aug_image)

print(f"Generated {len(augmented_data)} augmented training samples")

3D Landmark Visualization

# Visualize 3D face fitting quality
def visualize_3d_fitting(face_image):
    """Visualize 3D face model fitting quality."""
    # Build 3D parameters
    params = renderer.build_params(face_image)
    
    # Draw 3D landmarks
    landmark_img = renderer.draw_lmk(face_image)
    
    # Create side-by-side comparison
    combined = np.hstack([face_image, landmark_img])
    
    return combined, params

face_img = cv2.imread('test_face.jpg')
comparison, params = visualize_3d_fitting(face_img)

cv2.imwrite('3d_fitting_comparison.jpg', comparison)
print(f"3D model parameters: {params.keys()}")

Multiple Mask Types

# Apply different mask types with varying parameters
mask_types = {
    'surgical': {
        'image': 'surgical_mask.png',
        'positions': [0.1, 0.35, 0.8, 0.5]
    },
    'n95': {
        'image': 'n95_mask.png', 
        'positions': [0.05, 0.3, 0.9, 0.6]
    },
    'cloth': {
        'image': 'cloth_mask.png',
        'positions': [0.08, 0.38, 0.85, 0.45]
    }
}

def render_different_masks(face_image, params):
    """Render different mask types on the same face."""
    results = {}
    
    for mask_name, config in mask_types.items():
        mask_img = cv2.imread(config['image'])
        
        result = renderer.render_mask(
            face_image,
            mask_img, 
            params,
            positions=config['positions'],
            auto_blend=True
        )
        
        results[mask_name] = result
    
    return results

# Apply different masks
face_img = cv2.imread('person.jpg')
params = renderer.build_params(face_img)
mask_results = render_different_masks(face_img, params)

# Save results
for mask_type, result_img in mask_results.items():
    cv2.imwrite(f'face_with_{mask_type}_mask.jpg', result_img)

Real-time Mask Rendering

import time

def real_time_mask_rendering():
    """Real-time mask rendering from webcam."""
    cap = cv2.VideoCapture(0)
    
    # Load mask image
    mask_img = cv2.imread('default_mask.png')
    
    while True:
        ret, frame = cap.read()
        if not ret:
            break
        
        try:
            # Build parameters for current frame
            params = renderer.build_params(frame)
            
            # Render mask
            masked_frame = renderer.render_mask(frame, mask_img, params)
            
            cv2.imshow('Real-time Mask', masked_frame)
            
        except Exception as e:
            # Fallback to original frame if rendering fails
            cv2.imshow('Real-time Mask', frame)
        
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    
    cap.release()
    cv2.destroyAllWindows()

# Run real-time rendering (uncomment to use)
# real_time_mask_rendering()

Install with Tessl CLI

npx tessl i tessl/pypi-insightface

docs

3d-models.md

cli.md

face-analysis.md

face-processing.md

index.md

mask-rendering.md

model-management.md

model-zoo.md

sample-data.md

tile.json