or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

audio.mdclassification.mdclustering.mddetection.mdfunctional.mdimage.mdindex.mdmultimodal.mdnominal.mdregression.mdretrieval.mdsegmentation.mdshape.mdtext.mdutilities.mdvideo.md

retrieval.mddocs/

0

# Information Retrieval Metrics

1

2

Metrics for ranking and retrieval systems including precision at k, mean average precision, and normalized discounted cumulative gain for evaluating search and recommendation systems.

3

4

## Capabilities

5

6

### Core Retrieval Metrics

7

8

```python { .api }

9

class RetrievalMAP(Metric):

10

def __init__(

11

self,

12

empty_target_action: str = "neg",

13

ignore_index: Optional[int] = None,

14

**kwargs

15

): ...

16

17

class RetrievalMRR(Metric):

18

def __init__(

19

self,

20

empty_target_action: str = "neg",

21

ignore_index: Optional[int] = None,

22

**kwargs

23

): ...

24

25

class RetrievalPrecision(Metric):

26

def __init__(

27

self,

28

empty_target_action: str = "neg",

29

ignore_index: Optional[int] = None,

30

k: Optional[int] = None,

31

**kwargs

32

): ...

33

34

class RetrievalRecall(Metric):

35

def __init__(

36

self,

37

empty_target_action: str = "neg",

38

ignore_index: Optional[int] = None,

39

k: Optional[int] = None,

40

**kwargs

41

): ...

42

```

43

44

### Advanced Retrieval Metrics

45

46

```python { .api }

47

class RetrievalNormalizedDCG(Metric):

48

def __init__(

49

self,

50

empty_target_action: str = "neg",

51

ignore_index: Optional[int] = None,

52

k: Optional[int] = None,

53

**kwargs

54

): ...

55

56

class RetrievalHitRate(Metric):

57

def __init__(

58

self,

59

empty_target_action: str = "neg",

60

ignore_index: Optional[int] = None,

61

k: Optional[int] = None,

62

**kwargs

63

): ...

64

65

class RetrievalFallOut(Metric):

66

def __init__(

67

self,

68

empty_target_action: str = "neg",

69

ignore_index: Optional[int] = None,

70

k: Optional[int] = None,

71

**kwargs

72

): ...

73

```

74

75

### Specialized Retrieval Metrics

76

77

```python { .api }

78

class RetrievalRPrecision(Metric):

79

def __init__(

80

self,

81

empty_target_action: str = "neg",

82

ignore_index: Optional[int] = None,

83

**kwargs

84

): ...

85

86

class RetrievalAUROC(Metric):

87

def __init__(

88

self,

89

empty_target_action: str = "neg",

90

ignore_index: Optional[int] = None,

91

**kwargs

92

): ...

93

94

class RetrievalPrecisionRecallCurve(Metric):

95

def __init__(

96

self,

97

empty_target_action: str = "neg",

98

ignore_index: Optional[int] = None,

99

**kwargs

100

): ...

101

102

class RetrievalRecallAtFixedPrecision(Metric):

103

def __init__(

104

self,

105

min_precision: float,

106

empty_target_action: str = "neg",

107

ignore_index: Optional[int] = None,

108

**kwargs

109

): ...

110

```

111

112

## Usage Examples

113

114

```python

115

import torch

116

from torchmetrics.retrieval import (

117

RetrievalMAP, RetrievalNormalizedDCG,

118

RetrievalPrecision, RetrievalRecall

119

)

120

121

# Initialize retrieval metrics

122

map_metric = RetrievalMAP()

123

ndcg_metric = RetrievalNormalizedDCG(k=10)

124

precision_k = RetrievalPrecision(k=5)

125

recall_k = RetrievalRecall(k=10)

126

127

# Sample retrieval data

128

# Scores: higher values indicate higher relevance

129

preds = torch.tensor([0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])

130

# Targets: binary relevance (1 = relevant, 0 = not relevant)

131

target = torch.tensor([1, 0, 1, 1, 0, 0, 1, 0, 0])

132

# Indexes: query/user identifiers (same query for all items here)

133

indexes = torch.zeros(9, dtype=torch.long)

134

135

# Compute retrieval metrics

136

map_score = map_metric(preds, target, indexes=indexes)

137

ndcg_score = ndcg_metric(preds, target, indexes=indexes)

138

precision_score = precision_k(preds, target, indexes=indexes)

139

recall_score = recall_k(preds, target, indexes=indexes)

140

141

print(f"MAP: {map_score:.4f}")

142

print(f"NDCG@10: {ndcg_score:.4f}")

143

print(f"Precision@5: {precision_score:.4f}")

144

print(f"Recall@10: {recall_score:.4f}")

145

```

146

147

## Types

148

149

```python { .api }

150

RetrievalTarget = Tensor # Binary relevance labels

151

RetrievalPreds = Tensor # Relevance scores

152

RetrievalIndexes = Tensor # Query/user identifiers

153

EmptyTargetAction = Union["neg", "pos", "skip"]

154

```