CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-mmengine

Engine of OpenMMLab projects for training deep learning models based on PyTorch with large-scale training frameworks, configuration management, and monitoring capabilities

Pending
Overview
Eval results
Files

visualization.mddocs/

Visualization and Analysis

Visualization framework supporting multiple backends and model analysis tools for computing FLOPs, activation memory, and parameter counts with comprehensive reporting capabilities. The system provides tools for monitoring training progress and analyzing model complexity.

Capabilities

Visualizer Class

Main visualization class for drawing and displaying various visual elements.

class Visualizer:
    def __init__(self, name: str = 'visualizer', image: np.ndarray = None, vis_backends: list = None, save_dir: str = None, bbox_color: str = 'green', text_color: str = 'green', mask_color: str = 'green', line_width: int = 3, alpha: float = 0.8):
        """
        Initialize visualizer with display and backend configuration.
        
        Parameters:
        - name: Visualizer name
        - image: Background image for visualization
        - vis_backends: List of visualization backends
        - save_dir: Directory to save visualizations
        - bbox_color: Default bounding box color
        - text_color: Default text color
        - mask_color: Default mask color
        - line_width: Default line width
        - alpha: Default transparency
        """

    def add_datasample(self, name: str, image: np.ndarray, data_sample = None, draw_gt: bool = True, draw_pred: bool = True, show: bool = False, wait_time: float = 0, out_file: str = None, pred_score_thr: float = 0.3, step: int = 0):
        """
        Add data sample visualization.
        
        Parameters:
        - name: Sample name
        - image: Input image
        - data_sample: Data sample with annotations/predictions
        - draw_gt: Whether to draw ground truth
        - draw_pred: Whether to draw predictions
        - show: Whether to display visualization
        - wait_time: Wait time for display
        - out_file: Output file path
        - pred_score_thr: Prediction score threshold
        - step: Step number for logging
        """

    def draw_bboxes(self, bboxes: np.ndarray, edge_colors: str = 'green', line_styles: str = '-', line_widths: int = 2, face_colors: str = 'none', alpha: float = 0.8):
        """
        Draw bounding boxes.
        
        Parameters:
        - bboxes: Bounding box coordinates
        - edge_colors: Edge colors
        - line_styles: Line styles
        - line_widths: Line widths
        - face_colors: Face colors
        - alpha: Transparency
        """

    def draw_points(self, positions: np.ndarray, colors: str = 'red', marker: str = 'o', sizes: int = 6, alpha: float = 0.8):
        """
        Draw points.
        
        Parameters:
        - positions: Point positions
        - colors: Point colors
        - marker: Marker style
        - sizes: Point sizes
        - alpha: Transparency
        """

    def draw_lines(self, x_datas: np.ndarray, y_datas: np.ndarray, colors: str = 'blue', line_styles: str = '-', line_widths: int = 1):
        """
        Draw lines.
        
        Parameters:
        - x_datas: X coordinates
        - y_datas: Y coordinates
        - colors: Line colors
        - line_styles: Line styles
        - line_widths: Line widths
        """

    def draw_texts(self, texts: list, positions: np.ndarray, font_sizes: int = 8, colors: str = 'black', vertical_alignments: str = 'bottom', horizontal_alignments: str = 'left'):
        """
        Draw text annotations.
        
        Parameters:
        - texts: Text strings
        - positions: Text positions
        - font_sizes: Font sizes
        - colors: Text colors
        - vertical_alignments: Vertical alignments
        - horizontal_alignments: Horizontal alignments
        """

    def draw_polygons(self, polygons: list, edge_colors: str = 'blue', line_styles: str = '-', line_widths: int = 1, face_colors: str = 'none', alpha: float = 0.8):
        """
        Draw polygons.
        
        Parameters:
        - polygons: Polygon coordinates
        - edge_colors: Edge colors
        - line_styles: Line styles
        - line_widths: Line widths
        - face_colors: Face colors
        - alpha: Transparency
        """

    def get_image(self) -> np.ndarray:
        """
        Get current visualization image.
        
        Returns:
        Visualization image as numpy array
        """

    def show(self, drawn_img: np.ndarray = None, win_name: str = 'image', wait_time: int = 0):
        """
        Display visualization.
        
        Parameters:
        - drawn_img: Image to display
        - win_name: Window name
        - wait_time: Wait time in milliseconds
        """

    @property
    def dataset_meta(self) -> dict:
        """Get dataset meta information."""

Visualization Backends

Backend classes for different visualization and logging platforms.

class BaseVisBackend:
    def __init__(self, save_dir: str = None):
        """
        Base visualization backend.
        
        Parameters:
        - save_dir: Directory to save outputs
        """

    def add_config(self, config: dict, **kwargs):
        """Add configuration to backend."""

    def add_image(self, name: str, image: np.ndarray, step: int = 0, **kwargs):
        """Add image to backend."""

    def add_scalar(self, name: str, value: float, step: int = 0, **kwargs):
        """Add scalar value to backend."""

    def add_scalars(self, scalar_dict: dict, step: int = 0, **kwargs):
        """Add multiple scalars to backend."""

class LocalVisBackend(BaseVisBackend):
    def __init__(self, save_dir: str = 'vis_data'):
        """
        Local filesystem visualization backend.
        
        Parameters:
        - save_dir: Local directory to save files
        """

class TensorboardVisBackend(BaseVisBackend):
    def __init__(self, save_dir: str = 'runs', **kwargs):
        """
        TensorBoard visualization backend.
        
        Parameters:
        - save_dir: TensorBoard log directory
        - **kwargs: Additional TensorBoard arguments
        """

class WandbVisBackend(BaseVisBackend):
    def __init__(self, save_dir: str = None, init_kwargs: dict = None, define_metric_cfg: dict = None, commit: bool = True, log_code_name: str = None, watch_kwargs: dict = None):
        """
        Weights & Biases visualization backend.
        
        Parameters:
        - save_dir: Save directory
        - init_kwargs: W&B initialization arguments
        - define_metric_cfg: Metric definition configuration
        - commit: Whether to commit logs immediately
        - log_code_name: Code logging name
        - watch_kwargs: Model watching arguments
        """

class MLflowVisBackend(BaseVisBackend):
    def __init__(self, save_dir: str = None, exp_name: str = None, run_name: str = None, tags: dict = None, params: dict = None, tracking_uri: str = None, artifact_suffix: tuple = ('.json', '.log', '.py', 'yaml')):
        """
        MLflow visualization backend.
        
        Parameters:
        - save_dir: Save directory
        - exp_name: Experiment name
        - run_name: Run name
        - tags: Run tags
        - params: Run parameters
        - tracking_uri: MLflow tracking URI
        - artifact_suffix: Artifact file suffixes to log
        """

class ClearMLVisBackend(BaseVisBackend):
    def __init__(self, save_dir: str = None, init_kwargs: dict = None, artifact_suffix: tuple = ('.json', '.log', '.py', 'yaml')):
        """
        ClearML visualization backend.
        
        Parameters:
        - save_dir: Save directory
        - init_kwargs: ClearML initialization arguments
        - artifact_suffix: Artifact file suffixes
        """

class NeptuneVisBackend(BaseVisBackend):
    def __init__(self, save_dir: str = None, init_kwargs: dict = None, artifact_suffix: tuple = ('.json', '.log', '.py', 'yaml')):
        """
        Neptune visualization backend.
        
        Parameters:
        - save_dir: Save directory
        - init_kwargs: Neptune initialization arguments
        - artifact_suffix: Artifact file suffixes
        """

class DVCLiveVisBackend(BaseVisBackend):
    def __init__(self, save_dir: str = None, init_kwargs: dict = None, artifact_suffix: tuple = ('.json', '.log', '.py', 'yaml')):
        """
        DVC Live visualization backend.
        
        Parameters:
        - save_dir: Save directory
        - init_kwargs: DVC Live initialization arguments
        - artifact_suffix: Artifact file suffixes
        """

class AimVisBackend(BaseVisBackend):
    def __init__(self, save_dir: str = None, init_kwargs: dict = None, artifact_suffix: tuple = ('.json', '.log', '.py', 'yaml')):
        """
        Aim visualization backend.
        
        Parameters:
        - save_dir: Save directory
        - init_kwargs: Aim initialization arguments
        - artifact_suffix: Artifact file suffixes
        """

Model Analysis Tools

Tools for analyzing model complexity and performance characteristics.

def get_model_complexity_info(model, input_shape: tuple, print_per_layer_stat: bool = True, as_strings: bool = True, input_constructor: callable = None, ost: any = None, verbose: bool = True, ignore_modules: list = None, custom_modules_hooks: dict = None):
    """
    Get comprehensive model complexity information.
    
    Parameters:
    - model: PyTorch model
    - input_shape: Input tensor shape
    - print_per_layer_stat: Whether to print per-layer statistics
    - as_strings: Whether to return formatted strings
    - input_constructor: Function to construct input
    - ost: Output stream for printing
    - verbose: Whether to print detailed information
    - ignore_modules: Modules to ignore in analysis
    - custom_modules_hooks: Custom hooks for specific modules
    
    Returns:
    Dictionary with FLOPs, parameters, and memory information
    """

def parameter_count(model) -> int:
    """
    Count total model parameters.
    
    Parameters:
    - model: PyTorch model
    
    Returns:
    Total number of parameters
    """

def parameter_count_table(model, max_depth: int = 3) -> str:
    """
    Generate parameter count table.
    
    Parameters:
    - model: PyTorch model
    - max_depth: Maximum depth for module hierarchy
    
    Returns:
    Formatted table string
    """

def flop_count(model, inputs: tuple, supported_ops: dict = None) -> int:
    """
    Count FLOPs of model.
    
    Parameters:
    - model: PyTorch model
    - inputs: Input tensors
    - supported_ops: Dictionary of supported operations
    
    Returns:
    Total FLOPs count
    """

def activation_count(model, inputs: tuple) -> int:
    """
    Count activation memory.
    
    Parameters:
    - model: PyTorch model
    - inputs: Input tensors
    
    Returns:
    Activation memory in bytes
    """

Analysis Classes

Specialized analyzer classes for detailed model analysis.

class FlopAnalyzer:
    def __init__(self, model, inputs: tuple):
        """
        Analyzer for FLOPs computation.
        
        Parameters:
        - model: PyTorch model
        - inputs: Input tensors
        """

    def total(self) -> int:
        """
        Get total FLOPs.
        
        Returns:
        Total FLOPs count
        """

    def by_module(self) -> dict:
        """
        Get FLOPs by module.
        
        Returns:
        Dictionary of module FLOPs
        """

    def by_operator(self) -> dict:
        """
        Get FLOPs by operator type.
        
        Returns:
        Dictionary of operator FLOPs
        """

class ActivationAnalyzer:
    def __init__(self, model, inputs: tuple):
        """
        Analyzer for activation memory.
        
        Parameters:
        - model: PyTorch model
        - inputs: Input tensors
        """

    def total(self) -> int:
        """
        Get total activation memory.
        
        Returns:
        Total activation memory in bytes
        """

    def by_module(self) -> dict:
        """
        Get activation memory by module.
        
        Returns:
        Dictionary of module activation memory
        """

Usage Examples

Basic Visualization Setup

from mmengine.visualization import Visualizer
import numpy as np

# Create visualizer with multiple backends
visualizer = Visualizer(
    name='my_visualizer',
    vis_backends=[
        dict(type='LocalVisBackend', save_dir='./vis_outputs'),
        dict(type='TensorboardVisBackend', save_dir='./tb_logs'),
        dict(type='WandbVisBackend', init_kwargs=dict(project='my_project'))
    ]
)

# Visualize data sample
image = np.random.randint(0, 255, (224, 224, 3), dtype=np.uint8)
visualizer.add_datasample(
    name='sample_1',
    image=image,
    show=True,
    out_file='sample_1.jpg'
)

Drawing Annotations

# Draw bounding boxes
bboxes = np.array([[10, 10, 100, 100], [50, 50, 150, 150]])
visualizer.draw_bboxes(
    bboxes=bboxes,
    edge_colors=['red', 'blue'],
    line_widths=2,
    alpha=0.7
)

# Draw points
points = np.array([[30, 30], [70, 70], [120, 120]])
visualizer.draw_points(
    positions=points,
    colors=['red', 'green', 'blue'],
    sizes=10
)

# Draw text annotations
texts = ['Object 1', 'Object 2']
positions = np.array([[10, 5], [50, 45]])
visualizer.draw_texts(
    texts=texts,
    positions=positions,
    font_sizes=12,
    colors='white'
)

# Get final image
result_image = visualizer.get_image()

Model Complexity Analysis

import torch
from mmengine.analysis import get_model_complexity_info, parameter_count_table

# Create model
model = torch.nn.Sequential(
    torch.nn.Conv2d(3, 64, 3, padding=1),
    torch.nn.ReLU(),
    torch.nn.AdaptiveAvgPool2d(1),
    torch.nn.Flatten(),
    torch.nn.Linear(64, 10)
)

# Get complexity information
complexity_info = get_model_complexity_info(
    model=model,
    input_shape=(3, 224, 224),
    print_per_layer_stat=True,
    as_strings=True
)

print(f"FLOPs: {complexity_info['flops_str']}")
print(f"Parameters: {complexity_info['params_str']}")
print(f"Activations: {complexity_info['activations_str']}")

# Generate parameter table
param_table = parameter_count_table(model, max_depth=2)
print(param_table)

Training Visualization Integration

from mmengine import Runner
from mmengine.visualization import Visualizer

# Configure visualizer for training
visualizer_cfg = dict(
    type='Visualizer',
    vis_backends=[
        dict(type='LocalVisBackend'),
        dict(type='TensorboardVisBackend'),
        dict(
            type='WandbVisBackend',
            init_kwargs=dict(
                project='my_training',
                name='experiment_1',
                tags=['baseline', 'resnet50']
            )
        )
    ]
)

# Use in runner
runner = Runner(
    model=model,
    visualizer=visualizer_cfg,
    # ... other configs
)

# Custom visualization hook
class VisualizationHook(Hook):
    def __init__(self, interval=100):
        self.interval = interval
    
    def after_train_iter(self, runner):
        if runner.iter % self.interval == 0:
            # Log training images
            data_batch = runner.data_batch
            runner.visualizer.add_datasample(
                name=f'train_iter_{runner.iter}',
                image=data_batch['inputs'][0].cpu().numpy(),
                step=runner.iter
            )

runner.register_hook(VisualizationHook(interval=500))

Custom Visualization Backend

from mmengine.visualization import BaseVisBackend

class CustomVisBackend(BaseVisBackend):
    def __init__(self, api_key, project_id, **kwargs):
        super().__init__(**kwargs)
        self.api_key = api_key
        self.project_id = project_id
        self.client = self._init_client()
    
    def _init_client(self):
        # Initialize custom logging client
        return CustomLoggingClient(self.api_key, self.project_id)
    
    def add_scalar(self, name, value, step=0, **kwargs):
        self.client.log_metric(name, value, step)
    
    def add_image(self, name, image, step=0, **kwargs):
        self.client.log_image(name, image, step)

# Use custom backend
visualizer = Visualizer(
    vis_backends=[
        dict(type='CustomVisBackend', api_key='xxx', project_id='yyy')
    ]
)

Detailed Model Analysis

from mmengine.analysis import FlopAnalyzer, ActivationAnalyzer

# Create analyzers
inputs = (torch.randn(1, 3, 224, 224),)
flop_analyzer = FlopAnalyzer(model, inputs)
activation_analyzer = ActivationAnalyzer(model, inputs)

# Get detailed analysis
total_flops = flop_analyzer.total()
flops_by_module = flop_analyzer.by_module()
flops_by_operator = flop_analyzer.by_operator()

total_activations = activation_analyzer.total()
activations_by_module = activation_analyzer.by_module()

print(f"Total FLOPs: {total_flops:,}")
print(f"Total Activations: {total_activations:,} bytes")

# Print top modules by FLOPs
sorted_modules = sorted(flops_by_module.items(), 
                       key=lambda x: x[1], reverse=True)
print("\nTop modules by FLOPs:")
for module_name, flops in sorted_modules[:5]:
    print(f"  {module_name}: {flops:,}")

Visualization with Multiple Data Types

# Visualize different data types
for i, (image, target, prediction) in enumerate(dataloader):
    if i >= 5:  # Only visualize first 5 samples
        break
    
    visualizer.add_datasample(
        name=f'sample_{i}',
        image=image[0].permute(1, 2, 0).cpu().numpy(),
        data_sample={
            'gt_label': target[0].item(),
            'pred_label': prediction[0].argmax().item(),
            'pred_score': prediction[0].softmax(0).max().item()
        },
        step=i
    )

Install with Tessl CLI

npx tessl i tessl/pypi-mmengine

docs

configuration.md

dataset.md

distributed.md

fileio.md

index.md

logging.md

models.md

optimization.md

registry.md

training.md

visualization.md

tile.json