Fit interpretable models and explain blackbox machine learning with comprehensive interpretability tools.
npx @tessl/cli install tessl/pypi-interpret@0.7.0A comprehensive machine learning interpretability library that provides tools for training interpretable models and explaining both glassbox and blackbox machine learning systems. Interpret incorporates state-of-the-art techniques including Explainable Boosting Machine (EBM), SHAP, LIME, and sensitivity analysis, with built-in interactive visualization capabilities.
pip install interpretimport interpretCommon imports for interpretable models:
from interpret.glassbox import ExplainableBoostingClassifier, ExplainableBoostingRegressorCommon imports for explaining blackbox models:
from interpret.blackbox import LimeTabular, ShapKernel, PartialDependenceCommon imports for visualization:
from interpret import show, preservefrom interpret.glassbox import ExplainableBoostingClassifier
from interpret.blackbox import LimeTabular
from interpret import show
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
# Create sample data
X, y = make_classification(n_samples=1000, n_features=20, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Train an interpretable model
ebm = ExplainableBoostingClassifier(random_state=42)
ebm.fit(X_train, y_train)
# Get global explanations
global_explanation = ebm.explain_global()
show(global_explanation)
# Get local explanations
local_explanation = ebm.explain_local(X_test[:5], y_test[:5])
show(local_explanation)
# Explain a blackbox model
rf = RandomForestClassifier(random_state=42)
rf.fit(X_train, y_train)
lime = LimeTabular(predict_fn=rf.predict_proba, data=X_train)
lime_explanation = lime.explain_local(X_test[:5], y_test[:5])
show(lime_explanation)Interpret follows a consistent architectural pattern organized around explanation types and provider systems:
ExplainerMixin and implement fit() and explain_*() methodsExplanationMixin providing consistent data accessInherently interpretable machine learning models that provide transparency by design, including Explainable Boosting Machine (EBM), linear models, decision trees, and decision lists.
class ExplainableBoostingClassifier:
def __init__(self, **kwargs): ...
def fit(self, X, y): ...
def predict(self, X): ...
def explain_global(self): ...
def explain_local(self, X, y=None): ...
class ExplainableBoostingRegressor:
def __init__(self, **kwargs): ...
def fit(self, X, y): ...
def predict(self, X): ...
def explain_global(self): ...
def explain_local(self, X, y=None): ...Model-agnostic explanation methods for any machine learning model, including LIME, SHAP, partial dependence plots, and sensitivity analysis.
class LimeTabular:
def __init__(self, predict_fn, data, **kwargs): ...
def explain_local(self, X, y=None): ...
class ShapKernel:
def __init__(self, predict_fn, data, **kwargs): ...
def explain_local(self, X, y=None): ...
class PartialDependence:
def __init__(self, predict_fn, data, **kwargs): ...
def explain_global(self): ...Specialized explanation methods optimized for tree-based models, providing efficient and accurate explanations for decision trees, random forests, and gradient boosting models.
class ShapTree:
def __init__(self, model, data, **kwargs): ...
def explain_local(self, X): ...
def explain_global(self): ...Tools for understanding dataset characteristics and feature distributions to inform model selection and feature engineering decisions.
class ClassHistogram:
def __init__(self): ...
def explain_data(self, X, y): ...
class Marginal:
def __init__(self): ...
def explain_data(self, X, y=None): ...Comprehensive model performance analysis tools including ROC curves, PR curves, and regression metrics with interactive visualizations.
class ROC:
def __init__(self): ...
def explain_perf(self, y_true, y_prob): ...
class PR:
def __init__(self): ...
def explain_perf(self, y_true, y_prob): ...
class RegressionPerf:
def __init__(self): ...
def explain_perf(self, y_true, y_pred): ...Interactive visualization system with multiple backends, preservation capabilities, and server management for dashboard applications.
def show(explanation): ...
def preserve(explanation): ...
def set_visualize_provider(provider): ...
def init_show_server(): ...
def shutdown_show_server(): ...Differentially private machine learning models that provide formal privacy guarantees while maintaining interpretability.
class DPExplainableBoostingClassifier:
def __init__(self, epsilon=1.0, **kwargs): ...
def fit(self, X, y): ...
def explain_global(self): ...Utility functions for data preprocessing, feature interaction analysis, synthetic data generation, and development tools.
def measure_interactions(X, y): ...
def make_synthetic(n_samples): ...
class EBMPreprocessor:
def __init__(self): ...
def fit_transform(self, X): ...class ExplainerMixin:
"""Abstract base class for all explainers."""
def fit(self, X, y): ...
def explain_global(self): ...
def explain_local(self, X, y=None): ...
class ExplanationMixin:
"""Abstract base class for all explanations."""
def data(self): ...
def visualize(self): ...