PyTorch native metrics library providing 400+ rigorously tested metrics across classification, regression, audio, image, text, and other ML domains
Object detection and instance segmentation metrics for evaluating bounding box predictions, IoU calculations, and mean average precision in computer vision detection tasks.
The primary metric for object detection evaluation, computing precision-recall curves across IoU thresholds and object classes.
class MeanAveragePrecision(Metric):
def __init__(
self,
box_format: str = "xyxy",
iou_type: str = "bbox",
iou_thresholds: Optional[List[float]] = None,
rec_thresholds: Optional[List[float]] = None,
max_detection_thresholds: Optional[List[int]] = None,
class_metrics: bool = False,
backend: str = "pycocotools",
**kwargs
): ...Various IoU-based metrics for bounding box evaluation and overlap computation.
class IntersectionOverUnion(Metric):
def __init__(
self,
box_format: str = "xyxy",
iou_threshold: float = 0.5,
class_metrics: bool = False,
respect_labels: bool = True,
**kwargs
): ...
class GeneralizedIntersectionOverUnion(Metric):
def __init__(
self,
box_format: str = "xyxy",
**kwargs
): ...
class DistanceIntersectionOverUnion(Metric):
def __init__(
self,
box_format: str = "xyxy",
**kwargs
): ...
class CompleteIntersectionOverUnion(Metric):
def __init__(
self,
box_format: str = "xyxy",
**kwargs
): ...Metrics for evaluating panoptic segmentation combining instance and semantic segmentation.
class PanopticQuality(Metric):
def __init__(
self,
things: Set[int],
stuffs: Set[int],
allow_unknown_category: bool = False,
**kwargs
): ...
class ModifiedPanopticQuality(Metric):
def __init__(
self,
things: Set[int],
stuffs: Set[int],
allow_unknown_category: bool = False,
**kwargs
): ...import torch
from torchmetrics.detection import MeanAveragePrecision
# Initialize mAP metric
map_metric = MeanAveragePrecision()
# Sample detection predictions
preds = [
{
"boxes": torch.tensor([[258.0, 41.0, 606.0, 285.0]]),
"scores": torch.tensor([0.536]),
"labels": torch.tensor([0]),
}
]
# Ground truth targets
target = [
{
"boxes": torch.tensor([[214.0, 41.0, 562.0, 285.0]]),
"labels": torch.tensor([0]),
}
]
# Update and compute mAP
map_metric.update(preds, target)
map_result = map_metric.compute()
print(f"mAP: {map_result['map']:.4f}")
print(f"mAP@50: {map_result['map_50']:.4f}")
print(f"mAP@75: {map_result['map_75']:.4f}")from torchmetrics.detection import MeanAveragePrecision
# mAP with class-specific metrics
map_metric = MeanAveragePrecision(class_metrics=True)
# Multi-class predictions
preds = [
{
"boxes": torch.tensor([
[258.0, 41.0, 606.0, 285.0], # person
[100.0, 150.0, 200.0, 250.0] # car
]),
"scores": torch.tensor([0.8, 0.6]),
"labels": torch.tensor([1, 2]), # person=1, car=2
}
]
target = [
{
"boxes": torch.tensor([
[214.0, 41.0, 562.0, 285.0], # person
[95.0, 145.0, 205.0, 255.0] # car
]),
"labels": torch.tensor([1, 2]),
}
]
# Compute per-class mAP
map_metric.update(preds, target)
map_result = map_metric.compute()
print(f"Overall mAP: {map_result['map']:.4f}")
print(f"mAP per class: {map_result['map_per_class']}")from torchmetrics.detection import IntersectionOverUnion
# Basic IoU metric
iou_metric = IntersectionOverUnion()
# Detection predictions and targets (same format as mAP)
preds = [
{
"boxes": torch.tensor([[100, 100, 200, 200]]),
"labels": torch.tensor([1]),
}
]
target = [
{
"boxes": torch.tensor([[110, 110, 210, 210]]),
"labels": torch.tensor([1]),
}
]
# Compute IoU
iou_metric.update(preds, target)
iou_result = iou_metric.compute()
print(f"IoU: {iou_result['iou']:.4f}")from torchmetrics.detection import GeneralizedIntersectionOverUnion
# GIoU handles non-overlapping boxes better than standard IoU
giou_metric = GeneralizedIntersectionOverUnion()
# Non-overlapping boxes example
preds = [
{
"boxes": torch.tensor([[0, 0, 100, 100]]),
"labels": torch.tensor([1]),
}
]
target = [
{
"boxes": torch.tensor([[200, 200, 300, 300]]), # No overlap
"labels": torch.tensor([1]),
}
]
giou_metric.update(preds, target)
giou_result = giou_metric.compute()
print(f"GIoU: {giou_result['giou']:.4f}") # Will be negative for non-overlappingfrom torchmetrics.detection import DistanceIntersectionOverUnion, CompleteIntersectionOverUnion
# DIoU considers center point distance
diou_metric = DistanceIntersectionOverUnion()
# CIoU additionally considers aspect ratio
ciou_metric = CompleteIntersectionOverUnion()
preds = [{"boxes": torch.tensor([[50, 50, 150, 100]]), "labels": torch.tensor([1])}]
target = [{"boxes": torch.tensor([[60, 55, 160, 105]]), "labels": torch.tensor([1])}]
# Compute advanced IoU variants
diou_metric.update(preds, target)
ciou_metric.update(preds, target)
diou_result = diou_metric.compute()
ciou_result = ciou_metric.compute()
print(f"DIoU: {diou_result['diou']:.4f}")
print(f"CIoU: {ciou_result['ciou']:.4f}")from torchmetrics.detection import PanopticQuality
# Define thing and stuff classes
things = {1, 2, 3} # person, car, bicycle
stuffs = {4, 5} # road, building
# Initialize PQ metric
pq_metric = PanopticQuality(things=things, stuffs=stuffs)
# Panoptic predictions and targets (segmentation masks with instance IDs)
preds = torch.randint(0, 6, (2, 100, 100)) # batch_size=2, H=100, W=100
target = torch.randint(0, 6, (2, 100, 100))
# Compute panoptic quality
pq_result = pq_metric(preds, target)
print(f"Panoptic Quality: {pq_result['pq']:.4f}")
print(f"Segmentation Quality: {pq_result['sq']:.4f}")
print(f"Recognition Quality: {pq_result['rq']:.4f}")from torchmetrics.detection import MeanAveragePrecision
# Custom IoU thresholds for specific evaluation
custom_iou_thresholds = [0.3, 0.5, 0.7, 0.9]
map_custom = MeanAveragePrecision(iou_thresholds=custom_iou_thresholds)
# Sample predictions
preds = [
{
"boxes": torch.tensor([[100, 100, 200, 200], [300, 300, 400, 400]]),
"scores": torch.tensor([0.9, 0.7]),
"labels": torch.tensor([1, 1]),
}
]
target = [
{
"boxes": torch.tensor([[110, 110, 210, 210], [290, 290, 410, 410]]),
"labels": torch.tensor([1, 1]),
}
]
map_custom.update(preds, target)
custom_result = map_custom.compute()
print(f"Custom mAP: {custom_result['map']:.4f}")from torchmetrics.detection import MeanAveragePrecision
# Process multiple images in batch
map_metric = MeanAveragePrecision()
# Batch of predictions
batch_preds = [
{ # Image 1
"boxes": torch.tensor([[100, 100, 200, 200]]),
"scores": torch.tensor([0.8]),
"labels": torch.tensor([1]),
},
{ # Image 2
"boxes": torch.tensor([[50, 50, 150, 150], [250, 250, 350, 350]]),
"scores": torch.tensor([0.9, 0.6]),
"labels": torch.tensor([1, 2]),
}
]
batch_targets = [
{ # Image 1 GT
"boxes": torch.tensor([[105, 105, 205, 205]]),
"labels": torch.tensor([1]),
},
{ # Image 2 GT
"boxes": torch.tensor([[45, 45, 155, 155], [245, 245, 355, 355]]),
"labels": torch.tensor([1, 2]),
}
]
# Process batch
map_metric.update(batch_preds, batch_targets)
batch_result = map_metric.compute()
print(f"Batch mAP: {batch_result['map']:.4f}")from typing import Dict, List, Optional, Set, Union, Any
import torch
from torch import Tensor
# Detection data structures
DetectionBox = Tensor # Shape: (N, 4) for N boxes in format [x1, y1, x2, y2]
DetectionScores = Tensor # Shape: (N,) confidence scores
DetectionLabels = Tensor # Shape: (N,) class labels
DetectionPrediction = Dict[str, Tensor] # {"boxes": DetectionBox, "scores": DetectionScores, "labels": DetectionLabels}
DetectionTarget = Dict[str, Tensor] # {"boxes": DetectionBox, "labels": DetectionLabels}
BoxFormat = Union["xyxy", "xywh", "cxcywh"]
IoUType = Union["bbox", "segm", "keypoints"]Install with Tessl CLI
npx tessl i tessl/pypi-torchmetrics