or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

audio.mdclassification.mdclustering.mddetection.mdfunctional.mdimage.mdindex.mdmultimodal.mdnominal.mdregression.mdretrieval.mdsegmentation.mdshape.mdtext.mdutilities.mdvideo.md

detection.mddocs/

0

# Detection Metrics

1

2

Object detection and instance segmentation metrics for evaluating bounding box predictions, IoU calculations, and mean average precision in computer vision detection tasks.

3

4

## Capabilities

5

6

### Mean Average Precision

7

8

The primary metric for object detection evaluation, computing precision-recall curves across IoU thresholds and object classes.

9

10

```python { .api }

11

class MeanAveragePrecision(Metric):

12

def __init__(

13

self,

14

box_format: str = "xyxy",

15

iou_type: str = "bbox",

16

iou_thresholds: Optional[List[float]] = None,

17

rec_thresholds: Optional[List[float]] = None,

18

max_detection_thresholds: Optional[List[int]] = None,

19

class_metrics: bool = False,

20

backend: str = "pycocotools",

21

**kwargs

22

): ...

23

```

24

25

### Intersection over Union Metrics

26

27

Various IoU-based metrics for bounding box evaluation and overlap computation.

28

29

```python { .api }

30

class IntersectionOverUnion(Metric):

31

def __init__(

32

self,

33

box_format: str = "xyxy",

34

iou_threshold: float = 0.5,

35

class_metrics: bool = False,

36

respect_labels: bool = True,

37

**kwargs

38

): ...

39

40

class GeneralizedIntersectionOverUnion(Metric):

41

def __init__(

42

self,

43

box_format: str = "xyxy",

44

**kwargs

45

): ...

46

47

class DistanceIntersectionOverUnion(Metric):

48

def __init__(

49

self,

50

box_format: str = "xyxy",

51

**kwargs

52

): ...

53

54

class CompleteIntersectionOverUnion(Metric):

55

def __init__(

56

self,

57

box_format: str = "xyxy",

58

**kwargs

59

): ...

60

```

61

62

### Panoptic Segmentation Metrics

63

64

Metrics for evaluating panoptic segmentation combining instance and semantic segmentation.

65

66

```python { .api }

67

class PanopticQuality(Metric):

68

def __init__(

69

self,

70

things: Set[int],

71

stuffs: Set[int],

72

allow_unknown_category: bool = False,

73

**kwargs

74

): ...

75

76

class ModifiedPanopticQuality(Metric):

77

def __init__(

78

self,

79

things: Set[int],

80

stuffs: Set[int],

81

allow_unknown_category: bool = False,

82

**kwargs

83

): ...

84

```

85

86

## Usage Examples

87

88

### Basic Object Detection with mAP

89

90

```python

91

import torch

92

from torchmetrics.detection import MeanAveragePrecision

93

94

# Initialize mAP metric

95

map_metric = MeanAveragePrecision()

96

97

# Sample detection predictions

98

preds = [

99

{

100

"boxes": torch.tensor([[258.0, 41.0, 606.0, 285.0]]),

101

"scores": torch.tensor([0.536]),

102

"labels": torch.tensor([0]),

103

}

104

]

105

106

# Ground truth targets

107

target = [

108

{

109

"boxes": torch.tensor([[214.0, 41.0, 562.0, 285.0]]),

110

"labels": torch.tensor([0]),

111

}

112

]

113

114

# Update and compute mAP

115

map_metric.update(preds, target)

116

map_result = map_metric.compute()

117

118

print(f"mAP: {map_result['map']:.4f}")

119

print(f"mAP@50: {map_result['map_50']:.4f}")

120

print(f"mAP@75: {map_result['map_75']:.4f}")

121

```

122

123

### Multi-class Detection Evaluation

124

125

```python

126

from torchmetrics.detection import MeanAveragePrecision

127

128

# mAP with class-specific metrics

129

map_metric = MeanAveragePrecision(class_metrics=True)

130

131

# Multi-class predictions

132

preds = [

133

{

134

"boxes": torch.tensor([

135

[258.0, 41.0, 606.0, 285.0], # person

136

[100.0, 150.0, 200.0, 250.0] # car

137

]),

138

"scores": torch.tensor([0.8, 0.6]),

139

"labels": torch.tensor([1, 2]), # person=1, car=2

140

}

141

]

142

143

target = [

144

{

145

"boxes": torch.tensor([

146

[214.0, 41.0, 562.0, 285.0], # person

147

[95.0, 145.0, 205.0, 255.0] # car

148

]),

149

"labels": torch.tensor([1, 2]),

150

}

151

]

152

153

# Compute per-class mAP

154

map_metric.update(preds, target)

155

map_result = map_metric.compute()

156

157

print(f"Overall mAP: {map_result['map']:.4f}")

158

print(f"mAP per class: {map_result['map_per_class']}")

159

```

160

161

### IoU Evaluation

162

163

```python

164

from torchmetrics.detection import IntersectionOverUnion

165

166

# Basic IoU metric

167

iou_metric = IntersectionOverUnion()

168

169

# Detection predictions and targets (same format as mAP)

170

preds = [

171

{

172

"boxes": torch.tensor([[100, 100, 200, 200]]),

173

"labels": torch.tensor([1]),

174

}

175

]

176

177

target = [

178

{

179

"boxes": torch.tensor([[110, 110, 210, 210]]),

180

"labels": torch.tensor([1]),

181

}

182

]

183

184

# Compute IoU

185

iou_metric.update(preds, target)

186

iou_result = iou_metric.compute()

187

print(f"IoU: {iou_result['iou']:.4f}")

188

```

189

190

### Generalized IoU for Better Localization

191

192

```python

193

from torchmetrics.detection import GeneralizedIntersectionOverUnion

194

195

# GIoU handles non-overlapping boxes better than standard IoU

196

giou_metric = GeneralizedIntersectionOverUnion()

197

198

# Non-overlapping boxes example

199

preds = [

200

{

201

"boxes": torch.tensor([[0, 0, 100, 100]]),

202

"labels": torch.tensor([1]),

203

}

204

]

205

206

target = [

207

{

208

"boxes": torch.tensor([[200, 200, 300, 300]]), # No overlap

209

"labels": torch.tensor([1]),

210

}

211

]

212

213

giou_metric.update(preds, target)

214

giou_result = giou_metric.compute()

215

print(f"GIoU: {giou_result['giou']:.4f}") # Will be negative for non-overlapping

216

```

217

218

### Distance-based IoU Variants

219

220

```python

221

from torchmetrics.detection import DistanceIntersectionOverUnion, CompleteIntersectionOverUnion

222

223

# DIoU considers center point distance

224

diou_metric = DistanceIntersectionOverUnion()

225

226

# CIoU additionally considers aspect ratio

227

ciou_metric = CompleteIntersectionOverUnion()

228

229

preds = [{"boxes": torch.tensor([[50, 50, 150, 100]]), "labels": torch.tensor([1])}]

230

target = [{"boxes": torch.tensor([[60, 55, 160, 105]]), "labels": torch.tensor([1])}]

231

232

# Compute advanced IoU variants

233

diou_metric.update(preds, target)

234

ciou_metric.update(preds, target)

235

236

diou_result = diou_metric.compute()

237

ciou_result = ciou_metric.compute()

238

239

print(f"DIoU: {diou_result['diou']:.4f}")

240

print(f"CIoU: {ciou_result['ciou']:.4f}")

241

```

242

243

### Panoptic Segmentation Quality

244

245

```python

246

from torchmetrics.detection import PanopticQuality

247

248

# Define thing and stuff classes

249

things = {1, 2, 3} # person, car, bicycle

250

stuffs = {4, 5} # road, building

251

252

# Initialize PQ metric

253

pq_metric = PanopticQuality(things=things, stuffs=stuffs)

254

255

# Panoptic predictions and targets (segmentation masks with instance IDs)

256

preds = torch.randint(0, 6, (2, 100, 100)) # batch_size=2, H=100, W=100

257

target = torch.randint(0, 6, (2, 100, 100))

258

259

# Compute panoptic quality

260

pq_result = pq_metric(preds, target)

261

print(f"Panoptic Quality: {pq_result['pq']:.4f}")

262

print(f"Segmentation Quality: {pq_result['sq']:.4f}")

263

print(f"Recognition Quality: {pq_result['rq']:.4f}")

264

```

265

266

### Detection with Custom IoU Thresholds

267

268

```python

269

from torchmetrics.detection import MeanAveragePrecision

270

271

# Custom IoU thresholds for specific evaluation

272

custom_iou_thresholds = [0.3, 0.5, 0.7, 0.9]

273

map_custom = MeanAveragePrecision(iou_thresholds=custom_iou_thresholds)

274

275

# Sample predictions

276

preds = [

277

{

278

"boxes": torch.tensor([[100, 100, 200, 200], [300, 300, 400, 400]]),

279

"scores": torch.tensor([0.9, 0.7]),

280

"labels": torch.tensor([1, 1]),

281

}

282

]

283

284

target = [

285

{

286

"boxes": torch.tensor([[110, 110, 210, 210], [290, 290, 410, 410]]),

287

"labels": torch.tensor([1, 1]),

288

}

289

]

290

291

map_custom.update(preds, target)

292

custom_result = map_custom.compute()

293

print(f"Custom mAP: {custom_result['map']:.4f}")

294

```

295

296

### Batch Processing

297

298

```python

299

from torchmetrics.detection import MeanAveragePrecision

300

301

# Process multiple images in batch

302

map_metric = MeanAveragePrecision()

303

304

# Batch of predictions

305

batch_preds = [

306

{ # Image 1

307

"boxes": torch.tensor([[100, 100, 200, 200]]),

308

"scores": torch.tensor([0.8]),

309

"labels": torch.tensor([1]),

310

},

311

{ # Image 2

312

"boxes": torch.tensor([[50, 50, 150, 150], [250, 250, 350, 350]]),

313

"scores": torch.tensor([0.9, 0.6]),

314

"labels": torch.tensor([1, 2]),

315

}

316

]

317

318

batch_targets = [

319

{ # Image 1 GT

320

"boxes": torch.tensor([[105, 105, 205, 205]]),

321

"labels": torch.tensor([1]),

322

},

323

{ # Image 2 GT

324

"boxes": torch.tensor([[45, 45, 155, 155], [245, 245, 355, 355]]),

325

"labels": torch.tensor([1, 2]),

326

}

327

]

328

329

# Process batch

330

map_metric.update(batch_preds, batch_targets)

331

batch_result = map_metric.compute()

332

print(f"Batch mAP: {batch_result['map']:.4f}")

333

```

334

335

## Types

336

337

```python { .api }

338

from typing import Dict, List, Optional, Set, Union, Any

339

import torch

340

from torch import Tensor

341

342

# Detection data structures

343

DetectionBox = Tensor # Shape: (N, 4) for N boxes in format [x1, y1, x2, y2]

344

DetectionScores = Tensor # Shape: (N,) confidence scores

345

DetectionLabels = Tensor # Shape: (N,) class labels

346

347

DetectionPrediction = Dict[str, Tensor] # {"boxes": DetectionBox, "scores": DetectionScores, "labels": DetectionLabels}

348

DetectionTarget = Dict[str, Tensor] # {"boxes": DetectionBox, "labels": DetectionLabels}

349

350

BoxFormat = Union["xyxy", "xywh", "cxcywh"]

351

IoUType = Union["bbox", "segm", "keypoints"]

352

```