CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-tensorboardx

TensorBoardX lets you watch Tensors Flow without Tensorflow

Overview
Eval results
Files

torchvis.mddocs/

Multi-Backend Visualization

Multi-backend visualization class supporting both TensorBoard and Visdom simultaneously. Enables cross-platform experiment monitoring and visualization comparison by proxying all SummaryWriter methods to multiple registered backends.

Capabilities

Initialization

Creates a TorchVis instance that can register multiple visualization backends and proxy method calls to all of them.

class TorchVis:
    def __init__(self, *args, **init_kwargs):
        """
        Creates a TorchVis multi-backend visualization wrapper.

        Parameters:
        - *args: Variable arguments passed to registered backends
        - **init_kwargs: Keyword arguments passed to registered backends
        """

Backend Management

Register and unregister visualization backends for simultaneous logging to multiple platforms.

def register(self, *args, **init_kwargs):
    """
    Register visualization backends.

    Parameters:
    - *args: Backend classes or instances to register
    - **init_kwargs: Initialization keyword arguments for backends
    """

def unregister(self, *args):
    """
    Unregister previously registered backends.

    Parameters:
    - *args: Backend classes or instances to unregister
    """

Dynamic Method Proxying

All SummaryWriter methods are dynamically available and proxied to all registered backends. This includes all logging methods like add_scalar, add_image, add_histogram, etc.

# All SummaryWriter methods are available through dynamic proxying:
# add_scalar, add_scalars, add_image, add_images, add_histogram,
# add_figure, add_video, add_audio, add_text, add_graph,
# add_embedding, add_pr_curve, add_mesh, add_hparams,
# flush, close, etc.

Usage Examples

Dual TensorBoard and Visdom Logging

from tensorboardX import TorchVis, SummaryWriter
import numpy as np

# Import visdom for second backend
try:
    import visdom
    visdom_available = True
except ImportError:
    visdom_available = False

# Create TorchVis with multiple backends
if visdom_available:
    vis = TorchVis()
    
    # Register TensorBoard backend
    tensorboard_writer = SummaryWriter('logs/tensorboard')
    vis.register(tensorboard_writer)
    
    # Register Visdom backend
    visdom_vis = visdom.Visdom()
    vis.register(visdom_vis)
    
    # Now all method calls go to both backends
    for step in range(100):
        loss = np.random.random()
        accuracy = np.random.random()
        
        # This logs to both TensorBoard and Visdom
        vis.add_scalar('Loss', loss, step)
        vis.add_scalar('Accuracy', accuracy, step)
        
        # Images also go to both
        if step % 10 == 0:
            image = np.random.rand(3, 64, 64)
            vis.add_image('Sample', image, step)
    
    # Close all backends
    vis.close()
else:
    print("Visdom not available, using TensorBoard only")
    vis = TorchVis()
    vis.register(SummaryWriter('logs/tensorboard_only'))

Custom Backend Registration

from tensorboardX import TorchVis, SummaryWriter

class CustomLogger:
    """Custom logging backend example."""
    
    def __init__(self, log_file):
        self.log_file = log_file
        self.file = open(log_file, 'w')
    
    def add_scalar(self, tag, scalar_value, global_step=None, walltime=None):
        self.file.write(f"SCALAR: {tag} = {scalar_value} at step {global_step}\n")
        self.file.flush()
    
    def add_image(self, tag, img_tensor, global_step=None, walltime=None, dataformats='CHW'):
        self.file.write(f"IMAGE: {tag} shape={img_tensor.shape} at step {global_step}\n")
        self.file.flush()
    
    def close(self):
        self.file.close()

# Create multi-backend visualization
vis = TorchVis()

# Register multiple backends
vis.register(SummaryWriter('logs/tensorboard'))  # TensorBoard
vis.register(CustomLogger('logs/custom.log'))    # Custom logger

# Log to all backends simultaneously
for i in range(10):
    vis.add_scalar('metric', i * 0.1, i)
    
    if i % 5 == 0:
        image = np.random.rand(3, 32, 32)
        vis.add_image('sample', image, i)

vis.close()

Dynamic Backend Management

from tensorboardX import TorchVis, SummaryWriter
import numpy as np

vis = TorchVis()

# Start with one backend
tb_writer = SummaryWriter('logs/primary')
vis.register(tb_writer)

# Log some data
for i in range(20):
    vis.add_scalar('Phase1/Loss', np.random.random(), i)

# Add another backend mid-experiment
secondary_writer = SummaryWriter('logs/secondary')
vis.register(secondary_writer)

# Now logs go to both backends
for i in range(20, 40):
    vis.add_scalar('Phase2/Loss', np.random.random(), i)

# Remove one backend
vis.unregister(tb_writer)

# Now only logs to secondary backend
for i in range(40, 60):
    vis.add_scalar('Phase3/Loss', np.random.random(), i)

vis.close()

Conditional Backend Registration

from tensorboardX import TorchVis, SummaryWriter
import os

def create_multi_backend_logger(config):
    """Create logger based on configuration."""
    vis = TorchVis()
    
    # Always register TensorBoard
    tb_writer = SummaryWriter(config['tensorboard_logdir'])
    vis.register(tb_writer)
    
    # Conditionally register Visdom
    if config.get('use_visdom', False):
        try:
            import visdom
            visdom_vis = visdom.Visdom(
                server=config.get('visdom_server', 'localhost'),
                port=config.get('visdom_port', 8097)
            )
            vis.register(visdom_vis)
            print("Registered Visdom backend")
        except ImportError:
            print("Visdom not available, skipping")
    
    # Conditionally register file logger
    if config.get('use_file_logging', False):
        file_logger = CustomFileLogger(config['log_file'])
        vis.register(file_logger)
        print("Registered file logging backend")
    
    return vis

# Configuration-driven backend setup
config = {
    'tensorboard_logdir': 'logs/experiment',
    'use_visdom': True,
    'use_file_logging': True,
    'log_file': 'experiment.log',
    'visdom_server': 'localhost',
    'visdom_port': 8097
}

logger = create_multi_backend_logger(config)

# Log to all configured backends
for epoch in range(100):
    loss = train_one_epoch()
    accuracy = validate_model()
    
    logger.add_scalar('Loss', loss, epoch)
    logger.add_scalar('Accuracy', accuracy, epoch)

logger.close()

Backend Requirements

For backends to work with TorchVis, they should implement the same method signatures as SummaryWriter:

Required Methods

  • add_scalar(tag, scalar_value, global_step=None, walltime=None)
  • add_image(tag, img_tensor, global_step=None, walltime=None, dataformats='CHW')
  • close()

Optional Methods

  • add_histogram, add_figure, add_video, add_audio, add_text
  • add_graph, add_embedding, add_pr_curve, add_mesh
  • flush(), any other SummaryWriter methods

Error Handling

TorchVis gracefully handles backend failures:

from tensorboardX import TorchVis, SummaryWriter

class FailingBackend:
    def add_scalar(self, *args, **kwargs):
        raise Exception("Backend failed!")
    
    def close(self):
        pass

vis = TorchVis()
vis.register(SummaryWriter('logs/good'))
vis.register(FailingBackend())

# TorchVis continues working even if one backend fails
vis.add_scalar('metric', 1.0, 0)  # Good backend receives data
vis.close()

Performance Considerations

  • Method Calls: Each method call is proxied to all registered backends
  • Memory Usage: Data is passed to each backend (not copied by default)
  • Network Latency: Remote backends (like Visdom servers) may introduce delays
  • Error Propagation: Backend failures don't stop other backends from receiving data

Integration Patterns

Experiment Tracking

# Compare local vs remote visualization
vis = TorchVis()
vis.register(SummaryWriter('logs/local'))        # Local TensorBoard
vis.register(RemoteLogger('https://api.wandb.ai')) # Remote tracking service

# Both receive the same data
vis.add_scalar('loss', loss_value, step)

A/B Testing Visualization

# Log to different TensorBoard instances for comparison
vis = TorchVis()
vis.register(SummaryWriter('logs/experiment_A'))
vis.register(SummaryWriter('logs/experiment_B'))

# Same data logged to both experiments for comparison
vis.add_scalar('metric', value, step)

Install with Tessl CLI

npx tessl i tessl/pypi-tensorboardx

docs

file-writer.md

global-writer.md

index.md

record-writer.md

summary-writer.md

torchvis.md

utilities.md

tile.json