CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-tqdm

Fast, extensible progress meter for loops and iterators in Python

Overview
Eval results
Files

integrations.mddocs/

Framework Integrations

Integration modules for popular Python frameworks enabling seamless progress tracking within existing workflows. These integrations provide framework-specific callbacks and adapters while maintaining tqdm's consistent interface.

Capabilities

Keras Integration

Training callback for monitoring Keras model training progress with epoch and batch-level progress tracking.

from tqdm.keras import TqdmCallback

class TqdmCallback(keras.callbacks.Callback):
    """
    Keras callback for training progress with tqdm progress bars.
    
    Provides epoch-level and optional batch-level progress tracking
    with support for validation metrics and custom formatting.
    """
    def __init__(self, epochs=None, data_size=None, batch_size=None, 
                 verbose=1, tqdm_class=tqdm_auto, **tqdm_kwargs):
        """
        Initialize Keras callback with progress tracking.
        
        Parameters:
        - epochs: Total number of training epochs (auto-detected if None)
        - data_size: Size of training dataset (auto-detected if None)
        - batch_size: Training batch size (auto-detected if None)
        - verbose: Verbosity level (0=silent, 1=progress bar, 2=one line per epoch)
        - tqdm_class: tqdm class to use (default: tqdm.auto.tqdm)
        - **tqdm_kwargs: Additional arguments passed to tqdm constructor
        """
        
    @staticmethod
    def bar2callback(bar, pop=None, delta=lambda logs: 1):
        """
        Convert tqdm progress bar to Keras callback function.
        
        Parameters:
        - bar: tqdm progress bar instance
        - pop: List of keys to remove from logs before display
        - delta: Function to calculate progress increment from logs
        
        Returns:
        Callback function compatible with Keras training
        """

Dask Integration

Computation callback for tracking Dask task execution progress across distributed computing environments.

from tqdm.dask import TqdmCallback

class TqdmCallback(Callback):
    """
    Dask computation callback with tqdm progress tracking.
    
    Monitors task execution across Dask workers and provides
    real-time progress updates for complex computation graphs.
    """
    def __init__(self, start=None, pretask=None, tqdm_class=tqdm_auto, **tqdm_kwargs):
        """
        Initialize Dask callback with progress tracking.
        
        Parameters:
        - start: Callback for computation start (optional)
        - pretask: Callback before each task (optional)  
        - tqdm_class: tqdm class to use (default: tqdm.auto.tqdm)
        - **tqdm_kwargs: Additional arguments passed to tqdm constructor
        """
        
    def display(self):
        """Display progress in notebook environments"""

Pandas Integration

Class method integration enabling progress tracking for pandas DataFrame operations through monkey-patching.

# Enable pandas integration
tqdm.pandas(**tqdm_kwargs)

# This enables methods like:
df.progress_apply(func)                    # Apply with progress
df.progress_map(func)                      # Map with progress  
df.progress_applymap(func)                 # Element-wise apply with progress
df.groupby(col).progress_apply(func)       # GroupBy operations with progress

Usage Examples

Keras Model Training

from tqdm.keras import TqdmCallback
import tensorflow as tf
from tensorflow import keras
import numpy as np

# Create sample data
X_train = np.random.random((1000, 32))
y_train = np.random.randint(2, size=(1000, 1))
X_val = np.random.random((200, 32))
y_val = np.random.randint(2, size=(200, 1))

# Build model
model = keras.Sequential([
    keras.layers.Dense(64, activation='relu', input_shape=(32,)),
    keras.layers.Dense(32, activation='relu'),
    keras.layers.Dense(1, activation='sigmoid')
])

model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

# Train with tqdm callback
tqdm_callback = TqdmCallback(
    epochs=10,
    verbose=1,
    desc="Training",
    leave=True
)

history = model.fit(
    X_train, y_train,
    epochs=10,
    batch_size=32,
    validation_data=(X_val, y_val),
    callbacks=[tqdm_callback],
    verbose=0  # Disable default Keras progress
)

Advanced Keras Integration

from tqdm.keras import TqdmCallback
from tqdm.auto import tqdm
import tensorflow as tf

# Custom callback with detailed progress
class DetailedTqdmCallback(TqdmCallback):
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.epoch_bar = None
        self.batch_bar = None
    
    def on_train_begin(self, logs=None):
        super().on_train_begin(logs)
        self.epoch_bar = tqdm(total=self.epochs, desc="Epochs", position=0)
    
    def on_epoch_begin(self, epoch, logs=None):
        super().on_epoch_begin(epoch, logs)
        if self.data_size:
            self.batch_bar = tqdm(
                total=self.data_size // self.batch_size,
                desc=f"Epoch {epoch+1}/{self.epochs}",
                position=1,
                leave=False
            )
    
    def on_batch_end(self, batch, logs=None):
        if self.batch_bar:
            self.batch_bar.update(1)
            # Update with current metrics
            if logs:
                self.batch_bar.set_postfix({
                    'loss': f"{logs.get('loss', 0):.4f}",
                    'acc': f"{logs.get('accuracy', 0):.4f}"
                })
    
    def on_epoch_end(self, epoch, logs=None):
        if self.batch_bar:
            self.batch_bar.close()
        if self.epoch_bar:
            self.epoch_bar.update(1)
            if logs:
                self.epoch_bar.set_postfix({
                    'val_loss': f"{logs.get('val_loss', 0):.4f}",
                    'val_acc': f"{logs.get('val_accuracy', 0):.4f}"
                })

# Use detailed callback
detailed_callback = DetailedTqdmCallback()
model.fit(X_train, y_train, epochs=5, callbacks=[detailed_callback])

Dask Distributed Computing

from tqdm.dask import TqdmCallback
import dask
import dask.array as da
from dask.distributed import Client
import numpy as np

# Setup Dask client
client = Client('localhost:8786')  # Connect to Dask scheduler

# Create large array computation
x = da.random.random((10000, 10000), chunks=(1000, 1000))
y = da.random.random((10000, 10000), chunks=(1000, 1000))

# Matrix operations with progress tracking
with TqdmCallback(desc="Matrix Multiplication"):
    result = da.dot(x, y)
    computed_result = result.compute()

# Complex computation graph with progress
def process_chunk(chunk):
    return np.sum(chunk ** 2)

with TqdmCallback(desc="Processing Chunks", leave=True):
    chunks = [da.from_array(np.random.random((1000, 1000))) for _ in range(100)]
    processed = [da.apply_gufunc(process_chunk, chunk, signature='(i,j)->()') 
                 for chunk in chunks]
    results = dask.compute(*processed)

client.close()

Pandas DataFrame Operations

import pandas as pd
import numpy as np
from tqdm import tqdm

# Enable pandas integration
tqdm.pandas(desc="Processing")

# Create sample DataFrame
df = pd.DataFrame({
    'A': np.random.randn(100000),
    'B': np.random.randn(100000),
    'C': np.random.choice(['X', 'Y', 'Z'], 100000)
})

# Apply operations with progress bars
def complex_function(x):
    # Simulate complex computation
    return x ** 2 + np.sin(x) + np.log(abs(x) + 1)

# DataFrame operations with progress
result1 = df['A'].progress_apply(complex_function)
result2 = df.progress_apply(lambda row: row['A'] + row['B'], axis=1)

# GroupBy operations with progress
grouped_result = df.groupby('C').progress_apply(
    lambda group: group['A'].mean() + group['B'].std()
)

# Element-wise operations with progress
df_processed = df[['A', 'B']].progress_applymap(lambda x: x * 2 if x > 0 else x / 2)

# Custom pandas integration with detailed progress
tqdm.pandas(desc="Custom Processing", unit="rows", leave=True)

def detailed_processing(row):
    # Simulate complex row processing
    result = row['A'] * row['B']
    if row['C'] == 'X':
        result *= 2
    elif row['C'] == 'Y':
        result += 10
    return result

df['processed'] = df.progress_apply(detailed_processing, axis=1)

Custom Integration Patterns

import tensorflow as tf
from tqdm.auto import tqdm
import time

# Custom TensorFlow integration
class TqdmTensorFlowCallback(tf.keras.callbacks.Callback):
    def __init__(self, **tqdm_kwargs):
        super().__init__()
        self.tqdm_kwargs = tqdm_kwargs
        self.pbar = None
        
    def on_train_begin(self, logs=None):
        self.pbar = tqdm(total=self.params['epochs'], **self.tqdm_kwargs)
        
    def on_epoch_end(self, epoch, logs=None):
        if self.pbar:
            self.pbar.update(1)
            if logs:
                # Update with metrics
                postfix = {k: f"{v:.4f}" for k, v in logs.items() 
                          if k in ['loss', 'accuracy', 'val_loss', 'val_accuracy']}
                self.pbar.set_postfix(postfix)
                
    def on_train_end(self, logs=None):
        if self.pbar:
            self.pbar.close()

# Custom PyTorch integration
class TqdmPyTorchTrainer:
    def __init__(self, model, optimizer, criterion):
        self.model = model
        self.optimizer = optimizer
        self.criterion = criterion
        
    def train_epoch(self, dataloader, epoch):
        self.model.train()
        pbar = tqdm(dataloader, desc=f"Epoch {epoch}")
        
        total_loss = 0
        for batch_idx, (data, target) in enumerate(pbar):
            self.optimizer.zero_grad()
            output = self.model(data)
            loss = self.criterion(output, target)
            loss.backward()
            self.optimizer.step()
            
            total_loss += loss.item()
            avg_loss = total_loss / (batch_idx + 1)
            
            # Update progress bar with current metrics
            pbar.set_postfix({
                'loss': f"{loss.item():.4f}",
                'avg_loss': f"{avg_loss:.4f}"
            })
            
        return avg_loss

# Generic framework integration helper
def create_framework_callback(framework_callback_class, tqdm_class=tqdm):
    """Factory function for creating framework-specific tqdm callbacks"""
    
    class TqdmFrameworkCallback(framework_callback_class):
        def __init__(self, total=None, desc=None, **tqdm_kwargs):
            super().__init__()
            self.total = total
            self.desc = desc
            self.tqdm_kwargs = tqdm_kwargs
            self.pbar = None
            
        def start_progress(self, total=None):
            if self.pbar is None:
                self.pbar = tqdm_class(
                    total=total or self.total,
                    desc=self.desc,
                    **self.tqdm_kwargs
                )
                
        def update_progress(self, n=1, **postfix):
            if self.pbar:
                self.pbar.update(n)
                if postfix:
                    self.pbar.set_postfix(postfix)
                    
        def finish_progress(self):
            if self.pbar:
                self.pbar.close()
                self.pbar = None
                
    return TqdmFrameworkCallback

Framework-Specific Considerations

Threading and Multiprocessing

  • Keras: Use verbose=0 to disable default progress and avoid conflicts
  • Dask: Progress tracking works across distributed workers
  • Pandas: Thread-safe by default when using tqdm.pandas()

Memory Usage

  • Large datasets: Consider chunking or streaming approaches
  • Deep learning: Monitor GPU memory alongside training progress
  • Distributed computing: Progress aggregation across workers

Error Handling

  • Framework exceptions are propagated through tqdm callbacks
  • Progress bars are automatically cleaned up on errors
  • Use context managers for guaranteed cleanup

Performance Considerations

  • Minimal overhead: ~60ns per update
  • Batch updates recommended for high-frequency operations
  • Disable progress bars in production if performance critical

Install with Tessl CLI

npx tessl i tessl/pypi-tqdm

docs

core.md

environments.md

index.md

integrations.md

parallel.md

utilities.md

tile.json