0
# Regression Metrics
1
2
Metrics for evaluating regression tasks including error measurements, correlation coefficients, and explained variance measures for continuous target prediction evaluation.
3
4
## Capabilities
5
6
### Error Metrics
7
8
Measures various types of prediction errors for regression models.
9
10
```python { .api }
11
class MeanSquaredError(Metric):
12
def __init__(
13
self,
14
squared: bool = True,
15
num_outputs: int = 1,
16
**kwargs
17
): ...
18
19
class MeanAbsoluteError(Metric):
20
def __init__(
21
self,
22
num_outputs: int = 1,
23
**kwargs
24
): ...
25
26
class MeanSquaredLogError(Metric):
27
def __init__(
28
self,
29
num_outputs: int = 1,
30
**kwargs
31
): ...
32
33
class LogCoshError(Metric):
34
def __init__(
35
self,
36
num_outputs: int = 1,
37
**kwargs
38
): ...
39
```
40
41
### Percentage Error Metrics
42
43
Error metrics expressed as percentages of the true values.
44
45
```python { .api }
46
class MeanAbsolutePercentageError(Metric):
47
def __init__(
48
self,
49
num_outputs: int = 1,
50
**kwargs
51
): ...
52
53
class SymmetricMeanAbsolutePercentageError(Metric):
54
def __init__(
55
self,
56
num_outputs: int = 1,
57
**kwargs
58
): ...
59
60
class WeightedMeanAbsolutePercentageError(Metric):
61
def __init__(
62
self,
63
num_outputs: int = 1,
64
**kwargs
65
): ...
66
```
67
68
### Normalized Error Metrics
69
70
Error metrics normalized by baseline measures.
71
72
```python { .api }
73
class NormalizedRootMeanSquaredError(Metric):
74
def __init__(
75
self,
76
normalization: Union[str, Tensor] = "rmse",
77
num_outputs: int = 1,
78
**kwargs
79
): ...
80
81
class RelativeSquaredError(Metric):
82
def __init__(
83
self,
84
num_outputs: int = 1,
85
multioutput: str = "uniform_average",
86
**kwargs
87
): ...
88
```
89
90
### Correlation Coefficients
91
92
Measures of linear and monotonic relationships between predictions and targets.
93
94
```python { .api }
95
class PearsonCorrCoef(Metric):
96
def __init__(
97
self,
98
num_outputs: int = 1,
99
**kwargs
100
): ...
101
102
class SpearmanCorrCoef(Metric):
103
def __init__(
104
self,
105
num_outputs: int = 1,
106
**kwargs
107
): ...
108
109
class KendallRankCorrCoef(Metric):
110
def __init__(
111
self,
112
variant: str = "b",
113
num_outputs: int = 1,
114
**kwargs
115
): ...
116
117
class ConcordanceCorrCoef(Metric):
118
def __init__(
119
self,
120
num_outputs: int = 1,
121
**kwargs
122
): ...
123
```
124
125
### Coefficient of Determination
126
127
Measures the proportion of variance in the target variable explained by the model.
128
129
```python { .api }
130
class R2Score(Metric):
131
def __init__(
132
self,
133
num_outputs: int = 1,
134
multioutput: str = "uniform_average",
135
adjusted: int = 0,
136
**kwargs
137
): ...
138
139
class ExplainedVariance(Metric):
140
def __init__(
141
self,
142
num_outputs: int = 1,
143
multioutput: str = "uniform_average",
144
**kwargs
145
): ...
146
```
147
148
### Distance and Similarity Metrics
149
150
Measures of distance and similarity between prediction and target vectors.
151
152
```python { .api }
153
class CosineSimilarity(Metric):
154
def __init__(
155
self,
156
reduction: str = "sum",
157
**kwargs
158
): ...
159
160
class MinkowskiDistance(Metric):
161
def __init__(
162
self,
163
p: float = 2.0,
164
**kwargs
165
): ...
166
```
167
168
### Divergence Metrics
169
170
Information-theoretic measures of difference between prediction and target distributions.
171
172
```python { .api }
173
class KLDivergence(Metric):
174
def __init__(
175
self,
176
reduction: str = "mean",
177
log_prob: bool = False,
178
**kwargs
179
): ...
180
181
class JensenShannonDivergence(Metric):
182
def __init__(
183
self,
184
reduction: str = "mean",
185
log_prob: bool = False,
186
**kwargs
187
): ...
188
```
189
190
### Specialized Regression Metrics
191
192
Domain-specific regression metrics for particular use cases.
193
194
```python { .api }
195
class TweedieDevianceScore(Metric):
196
def __init__(
197
self,
198
power: float = 0.0,
199
**kwargs
200
): ...
201
202
class CriticalSuccessIndex(Metric):
203
def __init__(
204
self,
205
threshold: float = 0.0,
206
**kwargs
207
): ...
208
209
class ContinuousRankedProbabilityScore(Metric):
210
def __init__(
211
self,
212
num_outputs: int = 1,
213
**kwargs
214
): ...
215
```
216
217
## Usage Examples
218
219
### Basic Error Metrics
220
221
```python
222
import torch
223
from torchmetrics import MeanSquaredError, MeanAbsoluteError, R2Score
224
225
# Initialize metrics
226
mse = MeanSquaredError()
227
mae = MeanAbsoluteError()
228
r2 = R2Score()
229
230
# Sample predictions and targets
231
preds = torch.randn(10, 1)
232
target = torch.randn(10, 1)
233
234
# Compute metrics
235
mse_score = mse(preds, target)
236
mae_score = mae(preds, target)
237
r2_score = r2(preds, target)
238
239
print(f"MSE: {mse_score:.4f}")
240
print(f"MAE: {mae_score:.4f}")
241
print(f"R²: {r2_score:.4f}")
242
```
243
244
### Correlation Analysis
245
246
```python
247
from torchmetrics import PearsonCorrCoef, SpearmanCorrCoef
248
249
# Initialize correlation metrics
250
pearson = PearsonCorrCoef()
251
spearman = SpearmanCorrCoef()
252
253
# Sample data
254
preds = torch.randn(100)
255
target = 2 * preds + torch.randn(100) * 0.1 # Linear relationship with noise
256
257
# Compute correlations
258
pearson_corr = pearson(preds, target)
259
spearman_corr = spearman(preds, target)
260
261
print(f"Pearson correlation: {pearson_corr:.4f}")
262
print(f"Spearman correlation: {spearman_corr:.4f}")
263
```
264
265
### Multi-output Regression
266
267
```python
268
from torchmetrics import MeanSquaredError, R2Score
269
270
# Multi-output regression
271
mse_multi = MeanSquaredError(num_outputs=3)
272
r2_multi = R2Score(num_outputs=3, multioutput="raw_values")
273
274
# Sample multi-output data
275
preds = torch.randn(50, 3)
276
target = torch.randn(50, 3)
277
278
# Compute metrics
279
mse_scores = mse_multi(preds, target)
280
r2_scores = r2_multi(preds, target)
281
282
print(f"MSE (overall): {mse_scores:.4f}")
283
print(f"R² per output: {r2_scores}")
284
```
285
286
### Percentage Error Metrics
287
288
```python
289
from torchmetrics import MeanAbsolutePercentageError, SymmetricMeanAbsolutePercentageError
290
291
# Initialize percentage error metrics
292
mape = MeanAbsolutePercentageError()
293
smape = SymmetricMeanAbsolutePercentageError()
294
295
# Sample data (avoid zeros in targets for MAPE)
296
preds = torch.randn(100) + 10
297
target = torch.randn(100) + 10
298
299
# Compute percentage errors
300
mape_score = mape(preds, target)
301
smape_score = smape(preds, target)
302
303
print(f"MAPE: {mape_score:.2f}%")
304
print(f"SMAPE: {smape_score:.2f}%")
305
```
306
307
### Probabilistic Regression
308
309
```python
310
from torchmetrics import ContinuousRankedProbabilityScore
311
312
# CRPS for probabilistic forecasts
313
crps = ContinuousRankedProbabilityScore()
314
315
# Sample probabilistic predictions (ensemble of forecasts)
316
num_samples, num_forecasts = 100, 50
317
ensemble_preds = torch.randn(num_samples, num_forecasts)
318
target = torch.randn(num_samples)
319
320
# Compute CRPS
321
crps_score = crps(ensemble_preds, target)
322
print(f"CRPS: {crps_score:.4f}")
323
```
324
325
## Types
326
327
```python { .api }
328
MultioutputType = Union["raw_values", "uniform_average", "variance_weighted"]
329
ReductionType = Union["mean", "sum", "none"]
330
```