or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

amazon-algorithms.mdautoml.mdcore-training.mddata-processing.mddebugging-profiling.mdexperiments.mdframework-training.mdhyperparameter-tuning.mdindex.mdmodel-monitoring.mdmodel-serving.mdremote-functions.md

model-monitoring.mddocs/

0

# Model Monitoring

1

2

Comprehensive model monitoring including data quality, model quality, bias detection, and explainability analysis with scheduled monitoring jobs and automated alerting capabilities.

3

4

## Capabilities

5

6

### Base Model Monitoring

7

8

Core model monitoring functionality for detecting data drift, model quality degradation, and performance issues in production deployments.

9

10

```python { .api }

11

class ModelMonitor:

12

"""

13

Base class for SageMaker Model Monitor services.

14

15

Parameters:

16

- role (str): IAM role ARN with monitoring permissions

17

- instance_count (int, optional): Number of monitoring instances

18

- instance_type (str, optional): EC2 instance type for monitoring

19

- volume_size_in_gb (int, optional): Storage volume size

20

- volume_kms_key (str, optional): KMS key for volume encryption

21

- output_kms_key (str, optional): KMS key for output encryption

22

- max_runtime_in_seconds (int, optional): Maximum runtime

23

- base_job_name (str, optional): Base name for monitoring jobs

24

- sagemaker_session (Session, optional): SageMaker session

25

- env (dict, optional): Environment variables

26

- tags (list, optional): Resource tags

27

- network_config (NetworkConfig, optional): Network configuration

28

"""

29

def __init__(self, role: str, **kwargs): ...

30

31

def create_monitoring_schedule(self, monitor_schedule_name: str,

32

endpoint_input: EndpointInput,

33

output_s3_uri: str,

34

statistics: Statistics = None,

35

constraints: Constraints = None,

36

schedule_cron_expression: str = None,

37

enable_cloudwatch_metrics: bool = True) -> dict: ...

38

39

def update_monitoring_schedule(self, monitoring_schedule_name: str,

40

**kwargs) -> dict: ...

41

42

def delete_monitoring_schedule(self, monitoring_schedule_name: str): ...

43

44

def describe_monitoring_schedule(self, monitoring_schedule_name: str) -> dict: ...

45

46

def list_monitoring_executions(self, monitoring_schedule_name: str,

47

**kwargs) -> List[dict]: ...

48

49

class DefaultModelMonitor(ModelMonitor):

50

"""

51

Default model monitor for data quality monitoring with drift detection.

52

53

Monitors data quality metrics, distribution changes, and statistical properties

54

of inference inputs compared to training data baselines.

55

"""

56

def __init__(self, **kwargs): ...

57

58

def suggest_baseline(self, baseline_dataset: str, dataset_format: str,

59

output_s3_uri: str, wait: bool = True,

60

logs: bool = True) -> BaseliningJob: ...

61

```

62

63

### Specialized Model Monitors

64

65

Advanced monitoring capabilities for bias detection, model quality assessment, and explainability analysis.

66

67

```python { .api }

68

class ModelQualityMonitor(ModelMonitor):

69

"""

70

Monitor for model quality metrics and performance degradation.

71

72

Tracks model accuracy, precision, recall, and other quality metrics

73

over time to detect performance degradation.

74

75

Parameters:

76

- problem_type (str): Problem type ("Regression", "BinaryClassification", "MulticlassClassification")

77

- inference_attribute (str, optional): Attribute containing model predictions

78

- probability_attribute (str, optional): Attribute containing prediction probabilities

79

- ground_truth_attribute (str, optional): Attribute containing ground truth labels

80

- All ModelMonitor parameters

81

"""

82

def __init__(self, problem_type: str, **kwargs): ...

83

84

class ModelBiasMonitor(ModelMonitor):

85

"""

86

Monitor for model bias detection and fairness analysis.

87

88

Detects bias in model predictions across different demographic groups

89

and protected attributes using statistical parity and equalized odds metrics.

90

91

Parameters:

92

- config_uri (str): S3 URI for bias analysis configuration

93

- All ModelMonitor parameters

94

"""

95

def __init__(self, config_uri: str, **kwargs): ...

96

97

class ModelExplainabilityMonitor(ModelMonitor):

98

"""

99

Monitor for model explainability and feature importance analysis.

100

101

Tracks feature importance, SHAP values, and other explainability metrics

102

to understand model behavior and decision patterns.

103

104

Parameters:

105

- config_uri (str): S3 URI for explainability configuration

106

- All ModelMonitor parameters

107

"""

108

def __init__(self, config_uri: str, **kwargs): ...

109

```

110

111

### Data Capture and Configuration

112

113

Configuration classes for capturing inference data and setting up monitoring parameters.

114

115

```python { .api }

116

class DataCaptureConfig:

117

"""

118

Configuration for capturing inference requests and responses.

119

120

Parameters:

121

- enable_capture (bool): Enable data capture

122

- sampling_percentage (int): Percentage of requests to capture (0-100)

123

- destination_s3_uri (str): S3 URI for captured data storage

124

- kms_key_id (str, optional): KMS key for encryption

125

- capture_options (List[dict], optional): Request/Response capture options

126

- csv_content_types (List[str], optional): CSV content types to capture

127

- json_content_types (List[str], optional): JSON content types to capture

128

"""

129

def __init__(self, enable_capture: bool, sampling_percentage: int,

130

destination_s3_uri: str, **kwargs): ...

131

132

class EndpointInput:

133

"""

134

Configuration for endpoint input data for monitoring.

135

136

Parameters:

137

- endpoint_name (str): Name of the endpoint to monitor

138

- destination (str): Local path for captured data processing

139

- s3_input_mode (str, optional): Input mode ("File" or "Pipe")

140

- s3_data_distribution_type (str, optional): Data distribution type

141

- features_attribute (str, optional): Attribute containing features

142

- inference_attribute (str, optional): Attribute containing predictions

143

- probability_attribute (str, optional): Attribute containing probabilities

144

- probability_threshold_attribute (float, optional): Probability threshold

145

- start_time_offset (str, optional): Start time offset for data

146

- end_time_offset (str, optional): End time offset for data

147

"""

148

def __init__(self, endpoint_name: str, destination: str, **kwargs): ...

149

150

class MonitoringOutput:

151

"""

152

Configuration for monitoring job output.

153

154

Parameters:

155

- source (str): Local path where monitoring output is generated

156

- destination (str): S3 URI for monitoring results storage

157

- s3_upload_mode (str, optional): Upload mode ("Continuous" or "EndOfJob")

158

"""

159

def __init__(self, source: str, destination: str, **kwargs): ...

160

```

161

162

### Baseline Jobs and Statistics

163

164

Classes for creating baseline statistics and constraints from training data for comparison with production data.

165

166

```python { .api }

167

class BaseliningJob:

168

"""

169

Job for creating baseline statistics and constraints from training data.

170

171

Parameters:

172

- job_name (str): Name of the baselining job

173

- inputs (List[ProcessingInput]): Input data configuration

174

- outputs (List[ProcessingOutput]): Output configuration

175

- All processing job parameters

176

"""

177

def __init__(self, job_name: str, **kwargs): ...

178

179

def wait(self): ...

180

def describe(self) -> dict: ...

181

182

class Statistics:

183

"""

184

Statistical baseline for monitoring comparisons.

185

186

Parameters:

187

- body_dict (dict, optional): Statistics content as dictionary

188

- file_s3_uri (str, optional): S3 URI of statistics file

189

- kms_key (str, optional): KMS key for encryption

190

"""

191

def __init__(self, body_dict: dict = None, file_s3_uri: str = None, **kwargs): ...

192

193

class Constraints:

194

"""

195

Constraint definitions for monitoring thresholds and alerting.

196

197

Parameters:

198

- body_dict (dict, optional): Constraints content as dictionary

199

- file_s3_uri (str, optional): S3 URI of constraints file

200

- kms_key (str, optional): KMS key for encryption

201

"""

202

def __init__(self, body_dict: dict = None, file_s3_uri: str = None, **kwargs): ...

203

204

class ConstraintViolations:

205

"""

206

Results of constraint violation analysis from monitoring executions.

207

208

Parameters:

209

- body_dict (dict, optional): Violations content as dictionary

210

- file_s3_uri (str, optional): S3 URI of violations file

211

- kms_key (str, optional): KMS key for encryption

212

"""

213

def __init__(self, body_dict: dict = None, file_s3_uri: str = None, **kwargs): ...

214

```

215

216

### Monitoring Execution Management

217

218

Classes for managing and analyzing monitoring job executions and results.

219

220

```python { .api }

221

class MonitoringExecution:

222

"""

223

Individual monitoring job execution with results and status.

224

225

Parameters:

226

- sagemaker_session (Session, optional): SageMaker session

227

- job_name (str): Monitoring execution job name

228

- inputs (List[dict], optional): Input configuration

229

- output (dict, optional): Output configuration

230

"""

231

def __init__(self, sagemaker_session: Session = None, **kwargs): ...

232

233

def wait(self, logs: bool = True): ...

234

def describe(self) -> dict: ...

235

def list_steps(self) -> List[dict]: ...

236

def stop(self): ...

237

```

238

239

## Usage Examples

240

241

### Basic Data Quality Monitoring

242

243

```python

244

from sagemaker.model_monitor import DefaultModelMonitor, DataCaptureConfig

245

246

# Enable data capture on endpoint

247

data_capture_config = DataCaptureConfig(

248

enable_capture=True,

249

sampling_percentage=100,

250

destination_s3_uri="s3://bucket/monitoring-data"

251

)

252

253

# Create default monitor

254

monitor = DefaultModelMonitor(

255

role=role,

256

instance_count=1,

257

instance_type="ml.m5.xlarge"

258

)

259

260

# Create baseline from training data

261

baseline_job = monitor.suggest_baseline(

262

baseline_dataset="s3://bucket/training-data.csv",

263

dataset_format="csv",

264

output_s3_uri="s3://bucket/baseline"

265

)

266

267

# Create monitoring schedule

268

monitor.create_monitoring_schedule(

269

monitor_schedule_name="data-quality-schedule",

270

endpoint_input=EndpointInput(

271

endpoint_name="my-endpoint",

272

destination="/opt/ml/processing/endpoint"

273

),

274

output_s3_uri="s3://bucket/monitoring-results",

275

schedule_cron_expression="cron(0 * * * ? *)" # Hourly

276

)

277

```

278

279

### Model Bias Monitoring

280

281

```python

282

from sagemaker.model_monitor import ModelBiasMonitor

283

284

# Create bias monitor

285

bias_monitor = ModelBiasMonitor(

286

role=role,

287

config_uri="s3://bucket/bias-config.json",

288

instance_count=1,

289

instance_type="ml.m5.xlarge"

290

)

291

292

# Create bias monitoring schedule

293

bias_monitor.create_monitoring_schedule(

294

monitor_schedule_name="bias-detection-schedule",

295

endpoint_input=EndpointInput(

296

endpoint_name="my-endpoint",

297

destination="/opt/ml/processing/endpoint",

298

features_attribute="features",

299

inference_attribute="prediction",

300

probability_attribute="probability"

301

),

302

output_s3_uri="s3://bucket/bias-results",

303

schedule_cron_expression="cron(0 0 * * ? *)" # Daily

304

)

305

```