CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-interpret

Fit interpretable models and explain blackbox machine learning with comprehensive interpretability tools.

Pending
Overview
Eval results
Files

glassbox.mddocs/

Interpretable Models (Glassbox)

Machine learning models that are inherently interpretable by design. These models provide transparency and explainability without requiring post-hoc explanation methods, making them ideal for high-stakes applications where understanding model behavior is critical.

Capabilities

Explainable Boosting Machine (EBM)

State-of-the-art interpretable machine learning algorithm that uses gradient boosting with intelligibility constraints. EBM provides accuracy competitive with blackbox models while maintaining full interpretability through additive feature contributions.

class ExplainableBoostingClassifier:
    def __init__(
        self,
        feature_names=None,
        feature_types=None,
        max_bins=1024,
        max_interaction_bins=64,
        interactions="5x",
        exclude=None,
        validation_size=0.15,
        outer_bags=14,
        inner_bags=0,
        learning_rate=0.04,
        greedy_ratio=10.0,
        cyclic_progress=False,
        smoothing_rounds=500,
        interaction_smoothing_rounds=100,
        max_rounds=50000,
        early_stopping_rounds=100,
        early_stopping_tolerance=1e-5,
        callback=None,
        min_samples_leaf=4,
        min_hessian=0.0,
        reg_alpha=0.0,
        reg_lambda=0.0,
        max_delta_step=0.0,
        gain_scale=5.0,
        min_cat_samples=10,
        cat_smooth=10.0,
        missing="separate",
        max_leaves=2,
        monotone_constraints=None,
        objective="rmse",
        n_jobs=-2,
        random_state=42
    ):
        """
        Explainable Boosting Machine classifier.
        
        Parameters:
            feature_names (list, optional): Names for features
            feature_types (list, optional): Types for features ('continuous', 'ordinal', 'nominal')
            max_bins (int): Maximum bins for continuous features
            max_interaction_bins (int): Maximum bins for interaction features
            interactions (int, float, str, or list): Number of interactions to detect or specific pairs ("5x" = 5 times features)
            exclude (list, optional): Features to exclude from interactions
            validation_size (float): Proportion for validation set
            outer_bags (int): Number of outer bags for training
            inner_bags (int): Number of inner bags for training
            learning_rate (float): Learning rate for boosting
            greedy_ratio (float): Ratio for greedy vs cyclic boosting
            cyclic_progress (bool, float, int): Use cyclic boosting progress
            smoothing_rounds (int): Rounds of smoothing
            interaction_smoothing_rounds (int): Rounds for interaction smoothing
            max_rounds (int): Maximum boosting rounds
            early_stopping_rounds (int): Early stopping patience
            early_stopping_tolerance (float): Early stopping tolerance
            callback (callable, optional): Callback function for training progress
            min_samples_leaf (int): Minimum samples per leaf
            min_hessian (float): Minimum hessian for node splitting
            reg_alpha (float): L1 regularization term
            reg_lambda (float): L2 regularization term
            max_delta_step (float): Maximum delta step for weight estimation
            gain_scale (float): Gain scaling factor
            min_cat_samples (int): Minimum samples for categorical splitting
            cat_smooth (float): Smoothing factor for categorical features
            missing (str): How to handle missing values ("separate", "none")
            max_leaves (int): Maximum leaves per tree
            monotone_constraints (list, optional): Monotonic constraints for features
            objective (str): Loss function objective ("rmse", "log_loss", etc.)
            n_jobs (int): Parallel jobs (-2 for all but one CPU)
            random_state (int, optional): Random seed
        """
    
    def fit(self, X, y, sample_weight=None):
        """Fit the EBM classifier."""
    
    def predict(self, X):
        """Make predictions."""
    
    def predict_proba(self, X):
        """Predict class probabilities."""
    
    def explain_global(self, name=None):
        """Get global feature importance explanation."""
    
    def explain_local(self, X, y=None, name=None):
        """Get local explanations for specific instances."""

class ExplainableBoostingRegressor:
    def __init__(
        self,
        feature_names=None,
        feature_types=None,
        max_bins=1024,
        max_interaction_bins=64,
        interactions="3x",
        exclude=None,
        validation_size=0.15,
        outer_bags=14,
        inner_bags=0,
        learning_rate=0.015,
        greedy_ratio=10.0,
        cyclic_progress=False,
        smoothing_rounds=75,
        interaction_smoothing_rounds=75,
        max_rounds=50000,
        early_stopping_rounds=100,
        early_stopping_tolerance=1e-5,
        callback=None,
        min_samples_leaf=4,
        min_hessian=1e-4,
        reg_alpha=0.0,
        reg_lambda=0.0,
        max_delta_step=0.0,
        gain_scale=5.0,
        min_cat_samples=10,
        cat_smooth=10.0,
        missing="separate",
        max_leaves=2,
        monotone_constraints=None,
        objective="log_loss",
        n_jobs=-2,
        random_state=42
    ):
        """
        Explainable Boosting Machine regressor.
        
        Parameters:
            feature_names (list, optional): Names for features
            feature_types (list, optional): Types for features ('continuous', 'ordinal', 'nominal')
            max_bins (int): Maximum bins for continuous features
            max_interaction_bins (int): Maximum bins for interaction features
            interactions (int, float, str, or list): Number of interactions to detect or specific pairs ("3x" = 3 times features)
            exclude (list, optional): Features to exclude from interactions
            validation_size (float): Proportion for validation set
            outer_bags (int): Number of outer bags for training
            inner_bags (int): Number of inner bags for training
            learning_rate (float): Learning rate for boosting
            greedy_ratio (float): Ratio for greedy vs cyclic boosting
            cyclic_progress (bool, float, int): Use cyclic boosting progress
            smoothing_rounds (int): Rounds of smoothing
            interaction_smoothing_rounds (int): Rounds for interaction smoothing
            max_rounds (int): Maximum boosting rounds
            early_stopping_rounds (int): Early stopping patience
            early_stopping_tolerance (float): Early stopping tolerance
            callback (callable, optional): Callback function for training progress
            min_samples_leaf (int): Minimum samples per leaf
            min_hessian (float): Minimum hessian for node splitting
            reg_alpha (float): L1 regularization term
            reg_lambda (float): L2 regularization term
            max_delta_step (float): Maximum delta step for weight estimation
            gain_scale (float): Gain scaling factor
            min_cat_samples (int): Minimum samples for categorical splitting
            cat_smooth (float): Smoothing factor for categorical features
            missing (str): How to handle missing values ("separate", "none")
            max_leaves (int): Maximum leaves per tree
            monotone_constraints (list, optional): Monotonic constraints for features
            objective (str): Loss function objective ("log_loss", "rmse", etc.)
            n_jobs (int): Parallel jobs (-2 for all but one CPU)
            random_state (int, optional): Random seed
        """
    
    def fit(self, X, y, sample_weight=None):
        """Fit the EBM regressor."""
    
    def predict(self, X):
        """Make predictions."""
    
    def explain_global(self, name=None):
        """Get global feature importance explanation."""
    
    def explain_local(self, X, y=None, name=None):
        """Get local explanations for specific instances."""

def merge_ebms(ebms):
    """
    Merge multiple EBM models trained on different datasets.
    
    Parameters:
        ebms (list): List of trained EBM models
        
    Returns:
        Merged EBM model
    """

Linear Models

Traditional linear regression and logistic regression models with full interpretability through coefficient inspection.

class LinearRegression:
    def __init__(self, feature_names=None, feature_types=None):
        """
        Linear regression model.
        
        Parameters:
            feature_names (list, optional): Names for features
            feature_types (list, optional): Types for features
        """
    
    def fit(self, X, y):
        """Fit linear regression model."""
    
    def predict(self, X):
        """Make predictions."""
    
    def explain_global(self, name=None):
        """Get global coefficient explanation."""

class LogisticRegression:
    def __init__(self, feature_names=None, feature_types=None):
        """
        Logistic regression model.
        
        Parameters:
            feature_names (list, optional): Names for features  
            feature_types (list, optional): Types for features
        """
    
    def fit(self, X, y):
        """Fit logistic regression model."""
    
    def predict(self, X):
        """Make predictions."""
    
    def predict_proba(self, X):
        """Predict class probabilities."""
    
    def explain_global(self, name=None):
        """Get global coefficient explanation."""

Decision Trees

Interpretable decision tree models for classification and regression with built-in explanation capabilities.

class ClassificationTree:
    def __init__(
        self,
        feature_names=None,
        feature_types=None,
        max_depth=None,
        min_samples_split=2,
        min_samples_leaf=1,
        random_state=None
    ):
        """
        Decision tree classifier.
        
        Parameters:
            feature_names (list, optional): Names for features
            feature_types (list, optional): Types for features  
            max_depth (int, optional): Maximum tree depth
            min_samples_split (int): Minimum samples to split
            min_samples_leaf (int): Minimum samples per leaf
            random_state (int, optional): Random seed
        """
    
    def fit(self, X, y):
        """Fit decision tree classifier."""
    
    def predict(self, X):
        """Make predictions."""
    
    def predict_proba(self, X):
        """Predict class probabilities."""
    
    def explain_global(self, name=None):
        """Get global tree structure explanation."""
    
    def explain_local(self, X, y=None, name=None):
        """Get local decision path explanations."""

class RegressionTree:
    def __init__(
        self,
        feature_names=None,
        feature_types=None,
        max_depth=None,
        min_samples_split=2,
        min_samples_leaf=1,
        random_state=None
    ):
        """
        Decision tree regressor.
        
        Parameters: Same as ClassificationTree
        """
    
    def fit(self, X, y):
        """Fit decision tree regressor."""
    
    def predict(self, X):
        """Make predictions."""
    
    def explain_global(self, name=None):
        """Get global tree structure explanation."""
    
    def explain_local(self, X, y=None, name=None):
        """Get local decision path explanations."""

Decision Lists

Interpretable rule-based classification models that provide easy-to-understand if-then rules.

class DecisionListClassifier:
    def __init__(
        self,
        feature_names=None,
        feature_types=None,
        max_depth=5,
        min_samples_leaf=10,
        random_state=None
    ):
        """
        Decision list classifier using rule-based learning.
        
        Parameters:
            feature_names (list, optional): Names for features
            feature_types (list, optional): Types for features
            max_depth (int): Maximum rule depth
            min_samples_leaf (int): Minimum samples per rule
            random_state (int, optional): Random seed
        """
    
    def fit(self, X, y):
        """Fit decision list classifier."""
    
    def predict(self, X):
        """Make predictions."""
    
    def predict_proba(self, X):
        """Predict class probabilities."""
    
    def explain_global(self, name=None):
        """Get global rule list explanation."""
    
    def explain_local(self, X, y=None, name=None):
        """Get local rule explanations."""

APLR Models

Additive Piecewise Linear Regression models that combine interpretability with the ability to capture non-linear relationships.

class APLRClassifier:
    def __init__(
        self,
        feature_names=None,
        feature_types=None,
        random_state=None
    ):
        """
        Additive Piecewise Linear Regression classifier.
        
        Parameters:
            feature_names (list, optional): Names for features
            feature_types (list, optional): Types for features
            random_state (int, optional): Random seed
        """
    
    def fit(self, X, y):
        """Fit APLR classifier."""
    
    def predict(self, X):
        """Make predictions."""
    
    def predict_proba(self, X):
        """Predict class probabilities."""
    
    def explain_global(self, name=None):
        """Get global piecewise linear explanation."""
    
    def explain_local(self, X, y=None, name=None):
        """Get local explanations."""

class APLRRegressor:
    def __init__(
        self,
        feature_names=None,
        feature_types=None,
        random_state=None
    ):
        """
        Additive Piecewise Linear Regression regressor.
        
        Parameters: Same as APLRClassifier
        """
    
    def fit(self, X, y):
        """Fit APLR regressor."""
    
    def predict(self, X):
        """Make predictions."""
    
    def explain_global(self, name=None):
        """Get global piecewise linear explanation."""
    
    def explain_local(self, X, y=None, name=None):
        """Get local explanations."""

Usage Examples

Training an EBM Model

from interpret.glassbox import ExplainableBoostingClassifier
from interpret import show
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split

# Load data
data = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(
    data.data, data.target, test_size=0.2, random_state=42
)

# Train EBM with interactions
ebm = ExplainableBoostingClassifier(
    feature_names=data.feature_names,
    interactions=5,
    random_state=42
)
ebm.fit(X_train, y_train)

# Get explanations
global_exp = ebm.explain_global()
show(global_exp)

local_exp = ebm.explain_local(X_test[:5], y_test[:5])
show(local_exp)

Comparing Multiple Interpretable Models

from interpret.glassbox import (
    ExplainableBoostingClassifier, 
    LogisticRegression, 
    ClassificationTree
)
from sklearn.metrics import accuracy_score

models = {
    'EBM': ExplainableBoostingClassifier(random_state=42),
    'Logistic': LogisticRegression(),
    'Tree': ClassificationTree(max_depth=5, random_state=42)
}

for name, model in models.items():
    model.fit(X_train, y_train)
    pred = model.predict(X_test)
    acc = accuracy_score(y_test, pred)
    print(f"{name} Accuracy: {acc:.4f}")
    
    # Show global explanations
    show(model.explain_global(name=f"{name} Global"))

Install with Tessl CLI

npx tessl i tessl/pypi-interpret

docs

blackbox.md

data.md

glassbox.md

greybox.md

index.md

performance.md

privacy.md

utils.md

visualization.md

tile.json