CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-fastai

fastai simplifies training fast and accurate neural nets using modern best practices

Pending
Overview
Eval results
Files

metrics-losses.mddocs/

Metrics and Loss Functions

Comprehensive metrics for evaluating model performance and loss functions for training across different domains and tasks including classification, regression, segmentation, and language modeling.

Capabilities

Classification Metrics

Metrics for evaluating classification model performance across binary and multi-class scenarios.

def accuracy(inp, targ, axis=-1):
    """
    Classification accuracy metric.
    
    Parameters:
    - inp: Model predictions (logits or probabilities)
    - targ: Target labels
    - axis: Axis for prediction classes
    
    Returns:
    - Accuracy as a tensor
    """

def error_rate(inp, targ, axis=-1):
    """
    Classification error rate (1 - accuracy).
    
    Parameters:
    - inp: Model predictions
    - targ: Target labels  
    - axis: Axis for prediction classes
    
    Returns:
    - Error rate as a tensor
    """

def top_k_accuracy(inp, targ, k=5, axis=-1):
    """
    Top-k accuracy for multi-class classification.
    
    Parameters:
    - inp: Model predictions
    - targ: Target labels
    - k: Number of top predictions to consider
    - axis: Axis for prediction classes
    
    Returns:
    - Top-k accuracy as a tensor
    """

class Precision(Metric):
    """Precision metric for classification."""
    
    def __init__(self, axis=1, pos_label=1, average='binary'): ...
    
    def reset(self): ...
    def accumulate(self, learn): ...
    @property
    def value(self): ...

class Recall(Metric):
    """Recall metric for classification."""
    
    def __init__(self, axis=1, pos_label=1, average='binary'): ...
    
    def reset(self): ...
    def accumulate(self, learn): ...
    @property  
    def value(self): ...

class F1Score(Metric):
    """F1 score combining precision and recall."""
    
    def __init__(self, axis=1, pos_label=1, average='binary'): ...
    
    def reset(self): ...
    def accumulate(self, learn): ...
    @property
    def value(self): ...

class MatthewsCorrCoef(Metric):
    """Matthews Correlation Coefficient for binary classification."""
    
    def __init__(self, axis=1): ...
    
    def reset(self): ...
    def accumulate(self, learn): ...
    @property
    def value(self): ...

class RocAuc(Metric):
    """Area Under the ROC Curve."""
    
    def __init__(self, axis=1): ...
    
    def reset(self): ...
    def accumulate(self, learn): ...
    @property
    def value(self): ...

Regression Metrics

Metrics for evaluating regression model performance and continuous value predictions.

def rmse(inp, targ):
    """
    Root Mean Square Error.
    
    Parameters:
    - inp: Model predictions
    - targ: Target values
    
    Returns:
    - RMSE as a tensor
    """

def mae(inp, targ):
    """
    Mean Absolute Error.
    
    Parameters:
    - inp: Model predictions
    - targ: Target values
    
    Returns:
    - MAE as a tensor
    """

def msle(inp, targ):
    """
    Mean Squared Logarithmic Error.
    
    Parameters:
    - inp: Model predictions (must be positive)
    - targ: Target values (must be positive)
    
    Returns:
    - MSLE as a tensor
    """

def exp_rmspe(inp, targ):
    """
    Exponential Root Mean Square Percentage Error.
    
    Parameters:
    - inp: Model predictions
    - targ: Target values
    
    Returns:
    - Exponential RMSPE as a tensor
    """

def mse(inp, targ):
    """Mean Squared Error."""

def r2_score(inp, targ):
    """R-squared coefficient of determination."""

class ExplainedVariance(Metric):
    """Explained variance regression score."""
    
    def reset(self): ...
    def accumulate(self, learn): ...
    @property
    def value(self): ...

Segmentation Metrics

Specialized metrics for image segmentation and pixel-level prediction tasks.

class Dice(Metric):
    """
    Dice coefficient for segmentation.
    Measures overlap between predicted and target masks.
    """
    
    def __init__(self, axis=1): ...
    
    def reset(self): ...
    def accumulate(self, learn): ...
    @property
    def value(self): ...

class JaccardCoeff(Metric):
    """
    Jaccard coefficient (Intersection over Union) for segmentation.
    """
    
    def __init__(self, axis=1): ...
    
    def reset(self): ...
    def accumulate(self, learn): ...
    @property
    def value(self): ...

def foreground_acc(inp, targ, bkg_idx=0, axis=1):
    """
    Foreground accuracy ignoring background class.
    
    Parameters:
    - inp: Model predictions
    - targ: Target masks
    - bkg_idx: Background class index to ignore
    - axis: Class axis
    
    Returns:
    - Foreground accuracy
    """

def Iou(inp, targ, ignore_idx=None):
    """Intersection over Union for segmentation."""

Language Model Metrics

Metrics specialized for language modeling and text generation tasks.

class Perplexity(Metric):
    """
    Perplexity metric for language models.
    Measures how well the model predicts the text.
    """
    
    def __init__(self, dim=-1): ...
    
    def reset(self): ...
    def accumulate(self, learn): ...
    @property
    def value(self):
        """Returns perplexity = exp(average_loss)."""

class BLEU(Metric):
    """
    BLEU score for text generation evaluation.
    Measures n-gram overlap between generated and reference text.
    """
    
    def __init__(self, n_gram=4, weights=None): ...
    
    def reset(self): ...
    def accumulate(self, learn): ...
    @property
    def value(self): ...

def bleu_score(pred_tokens, targ_tokens, n_gram=4):
    """Calculate BLEU score for token sequences."""

Loss Functions

Core loss functions for training models across different tasks and domains.

class CrossEntropyLossFlat(nn.CrossEntropyLoss):
    """
    Cross-entropy loss with flattened inputs.
    Handles multi-dimensional inputs by flattening before loss computation.
    """
    
    def __init__(self, weight=None, ignore_index=-100, reduction='mean', 
                 label_smoothing=0.0, axis=-1): ...
    
    def forward(self, input, target): ...

class BCEWithLogitsLossFlat(nn.BCEWithLogitsLoss):
    """Binary cross-entropy with logits, flattened inputs."""
    
    def __init__(self, weight=None, reduction='mean', pos_weight=None, 
                 axis=-1): ...

class MSELossFlat(nn.MSELoss):
    """Mean squared error loss with flattened inputs."""
    
    def __init__(self, reduction='mean', axis=-1): ...

class L1LossFlat(nn.L1Loss):
    """L1 (mean absolute error) loss with flattened inputs."""
    
    def __init__(self, reduction='mean', axis=-1): ...

class FocalLoss(nn.Module):
    """
    Focal loss for handling class imbalance.
    Focuses learning on hard examples by down-weighting easy examples.
    """
    
    def __init__(self, alpha=1, gamma=2, reduction='mean'):
        """
        Initialize focal loss.
        
        Parameters:
        - alpha: Weighting factor for rare class
        - gamma: Focusing parameter (higher = more focus on hard examples)
        - reduction: Reduction method ('mean', 'sum', 'none')
        """
    
    def forward(self, input, target): ...

class LabelSmoothingCrossEntropy(nn.Module):
    """
    Cross-entropy loss with label smoothing regularization.
    Prevents overfitting by smoothing target distribution.
    """
    
    def __init__(self, eps=0.1, reduction='mean', ignore_index=-100): ...
    
    def forward(self, input, target): ...

class DiceLoss(nn.Module):
    """
    Dice loss for segmentation tasks.
    Optimizes directly for Dice coefficient.
    """
    
    def __init__(self, axis=1, smooth=1e-6): ...
    
    def forward(self, input, target): ...

class JaccardLoss(nn.Module):
    """Jaccard (IoU) loss for segmentation."""
    
    def __init__(self, axis=1, smooth=1e-6): ...
    
    def forward(self, input, target): ...

class CombinedLoss(nn.Module):
    """Combine multiple loss functions with weights."""
    
    def __init__(self, losses, weights=None): ...
    
    def forward(self, input, target): ...

Base Metric Classes

Foundation classes for implementing custom metrics.

class Metric:
    """Base class for metrics that accumulate over batches."""
    
    def __init__(self): ...
    
    def reset(self):
        """Reset metric state for new epoch."""
    
    def accumulate(self, learn):
        """Accumulate metric for current batch."""
    
    @property
    def value(self):
        """Get current metric value."""
    
    @property
    def name(self):
        """Get metric name for display."""

class AccumMetric(Metric):
    """
    Base class for metrics that accumulate values.
    Automatically handles accumulation and averaging.
    """
    
    def __init__(self, func, dim_argmax=None, activation=ActivationType.No, 
                 thresh=None, to_np=False, invert_arg=False, flatten=True): ...

class AvgMetric(Metric):
    """Average a function across all batches."""
    
    def __init__(self, func, name=None): ...

class AvgLoss(Metric):
    """Average loss metric."""
    
    def reset(self): ...
    def accumulate(self, learn): ...
    @property
    def value(self): ...

class AvgSmoothLoss(Metric):
    """Smoothed average loss metric."""
    
    def __init__(self, beta=0.98): ...
    
    def reset(self): ...
    def accumulate(self, learn): ...
    @property
    def value(self): ...

class ValueMetric(Metric):
    """Track a single value (like learning rate)."""
    
    def __init__(self, func, name=None): ...

Metric Utilities

Utility functions for working with metrics and evaluation.

def accuracy_multi(inp, targ, thresh=0.5, sigmoid=True):
    """Multi-label accuracy with threshold."""

def precision_multi(inp, targ, thresh=0.5, sigmoid=True):
    """Multi-label precision."""

def recall_multi(inp, targ, thresh=0.5, sigmoid=True):  
    """Multi-label recall."""

def fbeta_multi(inp, targ, beta=2, thresh=0.5, sigmoid=True):
    """Multi-label F-beta score."""

def hamming_loss(inp, targ, thresh=0.5, sigmoid=True):
    """Hamming loss for multi-label classification."""

def confusion_matrix(inp, targ, normalize=None):
    """Compute confusion matrix."""

def classification_report(inp, targ, labels=None):
    """Generate classification report with precision, recall, F1."""

def roc_curve(inp, targ, pos_label=1):
    """Compute ROC curve."""

def auc(fpr, tpr):
    """Compute Area Under Curve."""

Install with Tessl CLI

npx tessl i tessl/pypi-fastai

docs

callbacks.md

collaborative-filtering.md

core-training.md

data-loading.md

index.md

interpretation.md

medical.md

metrics-losses.md

tabular.md

text.md

vision.md

tile.json