A suite of visual analysis and diagnostic tools for machine learning.
Tools for feature selection, analysis, and visualization to understand data characteristics, identify important features, and guide feature engineering decisions. These visualizers support both supervised and unsupervised feature analysis techniques.
Univariate and bivariate feature ranking visualizers for identifying the most informative features using various statistical measures and algorithms.
class Rank1D(Visualizer):
"""
1D feature ranking visualizer using univariate statistical measures.
Parameters:
- algorithm: str, ranking algorithm ('shapiro' for normality, others available)
- features: list, feature names for display
- orient: str, orientation of bars ('h' for horizontal, 'v' for vertical)
"""
def __init__(self, algorithm='shapiro', features=None, orient='h', **kwargs): ...
def fit(self, X, y=None, **kwargs): ...
def show(self, **kwargs): ...
class Rank2D(Visualizer):
"""
2D feature ranking visualizer using bivariate statistical measures.
Parameters:
- algorithm: str, ranking algorithm ('pearson', 'covariance', 'spearman', 'kendalltau')
- features: list, feature names for display
- colormap: str, matplotlib colormap for heatmap (default: "RdBu_r")
"""
def __init__(self, algorithm='pearson', features=None, colormap='RdBu_r', **kwargs): ...
def fit(self, X, y=None, **kwargs): ...
def show(self, **kwargs): ...
def rank1d(X, y=None, algorithm='shapiro', features=None, **kwargs):
"""
Functional API for 1D feature ranking visualization.
Parameters:
- X: feature matrix
- y: target vector (optional)
- algorithm: str, ranking algorithm
- features: list, feature names
Returns:
Rank1D visualizer instance
"""
def rank2d(X, y=None, algorithm='pearson', features=None, **kwargs):
"""
Functional API for 2D feature ranking visualization.
Parameters:
- X: feature matrix
- y: target vector (optional)
- algorithm: str, ranking algorithm
- features: list, feature names
Returns:
Rank2D visualizer instance
"""Usage Example:
from yellowbrick.features import Rank1D, Rank2D, rank1d, rank2d
from sklearn.datasets import load_wine
# Load sample data
wine = load_wine()
X, y = wine.data, wine.target
features = wine.feature_names
# 1D feature ranking
rank1d_viz = Rank1D(algorithm='shapiro', features=features)
rank1d_viz.fit(X, y)
rank1d_viz.show()
# 2D feature correlation
rank2d_viz = Rank2D(algorithm='pearson', features=features)
rank2d_viz.fit(X, y)
rank2d_viz.show()
# Functional API
rank1d(X, y, features=features, algorithm='shapiro')
rank2d(X, y, features=features, algorithm='spearman')Parallel coordinates visualization for multivariate data analysis, showing relationships between features and target classes across multiple dimensions.
class ParallelCoordinates(Visualizer):
"""
Parallel coordinates plot for multivariate data visualization.
Parameters:
- classes: list, class labels for target
- features: list, feature names for display
- normalize: str, normalization method ('standard', 'minmax', 'robust', or None)
- sample: float or int, sampling strategy for large datasets
- shuffle: bool, whether to shuffle data before sampling
- random_state: int, random state for reproducibility
"""
def __init__(self, classes=None, features=None, normalize=None, sample=1.0, shuffle=False, random_state=None, **kwargs): ...
def fit(self, X, y=None, **kwargs): ...
def show(self, **kwargs): ...
def parallel_coordinates(X, y=None, classes=None, features=None, **kwargs):
"""
Functional API for parallel coordinates visualization.
Parameters:
- X: feature matrix
- y: target vector (optional)
- classes: list, class labels
- features: list, feature names
Returns:
ParallelCoordinates visualizer instance
"""Radial visualization for projecting multidimensional data onto a 2D plane, useful for identifying clusters and class separability.
class RadialVisualizer(Visualizer):
"""
Radial visualization (RadViz) for multidimensional data projection.
Parameters:
- classes: list, class labels for target
- features: list, feature names for anchors
- alpha: float, transparency of data points
"""
def __init__(self, classes=None, features=None, alpha=0.75, **kwargs): ...
def fit(self, X, y=None, **kwargs): ...
def show(self, **kwargs): ...
# Alias for compatibility
RadViz = RadialVisualizer
def radviz(X, y=None, classes=None, features=None, **kwargs):
"""
Functional API for radial visualization.
Parameters:
- X: feature matrix
- y: target vector (optional)
- classes: list, class labels
- features: list, feature names
Returns:
RadialVisualizer instance
"""Joint plots showing relationships between pairs of features with marginal distributions, useful for understanding feature interactions and distributions.
class JointPlot(Visualizer):
"""
Joint plot visualization for feature pair analysis.
Parameters:
- columns: tuple or list, column indices or names for x and y axes
- classes: list, class labels for target
- kind: str, plot type ('scatter', 'hex', 'reg')
"""
def __init__(self, columns=None, classes=None, kind='scatter', **kwargs): ...
def fit(self, X, y=None, **kwargs): ...
def show(self, **kwargs): ...
# Alias for compatibility
JointPlotVisualizer = JointPlot
def joint_plot(X, y=None, columns=None, classes=None, **kwargs):
"""
Functional API for joint plot visualization.
Parameters:
- X: feature matrix
- y: target vector (optional)
- columns: tuple, column indices or names
- classes: list, class labels
Returns:
JointPlot visualizer instance
"""Principal Component Analysis visualization for dimensionality reduction, variance explanation, and feature transformation analysis.
class PCA(Visualizer):
"""
PCA decomposition visualizer for dimensionality reduction analysis.
Parameters:
- scale: bool, whether to scale features before PCA
- proj_features: bool, whether to project original features
- biplot: bool, whether to draw biplot with feature vectors
- classes: list, class labels for target
"""
def __init__(self, scale=True, proj_features=True, biplot=False, classes=None, **kwargs): ...
def fit(self, X, y=None, **kwargs): ...
def show(self, **kwargs): ...
# Alias for compatibility
PCADecomposition = PCA
def pca_decomposition(X, y=None, scale=True, proj_features=True, **kwargs):
"""
Functional API for PCA decomposition visualization.
Parameters:
- X: feature matrix
- y: target vector (optional)
- scale: bool, whether to scale features
- proj_features: bool, whether to project features
Returns:
PCA visualizer instance
"""Manifold learning visualization for non-linear dimensionality reduction using various algorithms like t-SNE, ISOMAP, and Locally Linear Embedding.
class Manifold(Visualizer):
"""
Manifold learning visualizer for non-linear dimensionality reduction.
Parameters:
- manifold: str, manifold algorithm ('lle', 'ltsa', 'hessian', 'modified', 'isomap', 'mds', 'spectral', 'tsne')
- n_neighbors: int, number of neighbors for local methods
- classes: list, class labels for target
- target_type: str, target type ('discrete', 'continuous', 'single', 'auto')
"""
def __init__(self, manifold='lle', n_neighbors=None, classes=None, target_type='auto', **kwargs): ...
def fit(self, X, y=None, **kwargs): ...
def show(self, **kwargs): ...
def manifold_embedding(X, y=None, manifold='lle', classes=None, **kwargs):
"""
Functional API for manifold learning visualization.
Parameters:
- X: feature matrix
- y: target vector (optional)
- manifold: str, manifold algorithm
- classes: list, class labels
Returns:
Manifold visualizer instance
"""Feature importance visualization from model selection module, showing the relative importance of features as determined by tree-based models.
class FeatureImportances(ModelVisualizer):
"""
Feature importances visualizer for tree-based models.
Parameters:
- estimator: scikit-learn estimator with feature_importances_ attribute
- labels: list, feature labels for display
- relative: bool, whether to show relative importance (percentages)
- absolute: bool, whether to show absolute importance values
"""
def __init__(self, estimator, labels=None, relative=True, absolute=False, **kwargs): ...
def fit(self, X, y, **kwargs): ...
def show(self, **kwargs): ...
def feature_importances(estimator, X, y, labels=None, **kwargs):
"""
Functional API for feature importances visualization.
Parameters:
- estimator: scikit-learn estimator
- X: feature matrix
- y: target vector
- labels: list, feature labels
Returns:
FeatureImportances visualizer instance
"""Recursive Feature Elimination with Cross-Validation (RFECV) for systematic feature selection using model performance feedback.
class RFECV(ModelVisualizer):
"""
Recursive Feature Elimination with Cross-Validation visualizer.
Parameters:
- estimator: scikit-learn estimator
- cv: int or cross-validation generator
- scoring: str, scoring metric
- step: int or float, number of features to remove at each step
"""
def __init__(self, estimator, cv=None, scoring=None, step=1, **kwargs): ...
def fit(self, X, y, **kwargs): ...
def show(self, **kwargs): ...
def rfecv(estimator, X, y, cv=None, scoring=None, **kwargs):
"""
Functional API for RFECV visualization.
Parameters:
- estimator: scikit-learn estimator
- X: feature matrix
- y: target vector
- cv: int or cross-validation generator
- scoring: str, scoring metric
Returns:
RFECV visualizer instance
"""from enum import Enum
class TargetType(Enum):
AUTO = "auto"
SINGLE = "single"
DISCRETE = "discrete"
CONTINUOUS = "continuous"
UNKNOWN = "unknown"from yellowbrick.features import Rank1D, Rank2D, ParallelCoordinates, RadViz, PCA
from sklearn.datasets import load_wine
import matplotlib.pyplot as plt
# Load sample data
wine = load_wine()
X, y = wine.data, wine.target
features = wine.feature_names
classes = wine.target_names
# Feature ranking analysis
fig, axes = plt.subplots(2, 2, figsize=(15, 12))
# 1D feature ranking
rank1d_viz = Rank1D(features=features, ax=axes[0,0])
rank1d_viz.fit(X, y)
rank1d_viz.finalize()
# 2D feature correlation
rank2d_viz = Rank2D(features=features, ax=axes[0,1])
rank2d_viz.fit(X, y)
rank2d_viz.finalize()
# Parallel coordinates
pcoords_viz = ParallelCoordinates(classes=classes, ax=axes[1,0])
pcoords_viz.fit(X, y)
pcoords_viz.finalize()
# RadViz
radviz_viz = RadViz(classes=classes, ax=axes[1,1])
radviz_viz.fit(X, y)
radviz_viz.finalize()
plt.tight_layout()
plt.show()
# PCA analysis
pca_viz = PCA(scale=True, biplot=True, classes=classes)
pca_viz.fit(X, y)
pca_viz.show()from yellowbrick.features import PCA, Manifold
from sklearn.datasets import load_digits
import matplotlib.pyplot as plt
# Load high-dimensional data
digits = load_digits()
X, y = digits.data, digits.target
# Compare different dimensionality reduction techniques
fig, axes = plt.subplots(2, 3, figsize=(18, 12))
axes = axes.ravel()
techniques = [
('PCA', PCA(scale=True)),
('t-SNE', Manifold(manifold='tsne')),
('ISOMAP', Manifold(manifold='isomap')),
('LLE', Manifold(manifold='lle')),
('Spectral', Manifold(manifold='spectral')),
('MDS', Manifold(manifold='mds'))
]
for idx, (name, viz) in enumerate(techniques):
viz.ax = axes[idx]
viz.fit(X, y)
viz.finalize()
axes[idx].set_title(name)
plt.tight_layout()
plt.show()from yellowbrick.features import RFECV, FeatureImportances
from yellowbrick.model_selection import LearningCurve
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Step 1: Feature importance analysis
rf_model = RandomForestClassifier(n_estimators=100, random_state=42)
fi_viz = FeatureImportances(rf_model, labels=features)
fi_viz.fit(X_train, y_train)
fi_viz.show()
# Step 2: Recursive feature elimination
rfecv_viz = RFECV(rf_model, cv=5, scoring='accuracy')
rfecv_viz.fit(X_train, y_train)
rfecv_viz.show()
# Get optimal features
optimal_features = rfecv_viz.support_
X_train_selected = X_train[:, optimal_features]
X_test_selected = X_test[:, optimal_features]
print(f"Selected {optimal_features.sum()} out of {len(optimal_features)} features")from yellowbrick.features import Rank2D
from sklearn.datasets import make_classification
import matplotlib.pyplot as plt
# Generate sample data
X, y = make_classification(n_samples=1000, n_features=20, n_informative=10,
n_redundant=10, random_state=42)
# Compare different ranking algorithms
algorithms = ['pearson', 'covariance', 'spearman', 'kendalltau']
fig, axes = plt.subplots(2, 2, figsize=(12, 10))
axes = axes.ravel()
for idx, algorithm in enumerate(algorithms):
viz = Rank2D(algorithm=algorithm, ax=axes[idx])
viz.fit(X, y)
viz.finalize()
axes[idx].set_title(f'{algorithm.title()} Correlation')
plt.tight_layout()
plt.show()from yellowbrick.features import JointPlot, ParallelCoordinates
from sklearn.datasets import load_iris
# Load data
iris = load_iris()
X, y = iris.data, iris.target
features = iris.feature_names
classes = iris.target_names
# Joint plot for feature pairs
feature_pairs = [(0, 1), (0, 2), (1, 3), (2, 3)]
fig, axes = plt.subplots(2, 2, figsize=(12, 10))
axes = axes.ravel()
for idx, (i, j) in enumerate(feature_pairs):
viz = JointPlot(columns=(i, j), classes=classes, ax=axes[idx])
viz.fit(X, y)
viz.finalize()
axes[idx].set_title(f'{features[i]} vs {features[j]}')
plt.tight_layout()
plt.show()
# Parallel coordinates with different normalizations
normalizations = [None, 'standard', 'minmax', 'robust']
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
axes = axes.ravel()
for idx, norm in enumerate(normalizations):
viz = ParallelCoordinates(classes=classes, normalize=norm, ax=axes[idx])
viz.fit(X, y)
viz.finalize()
title = f'Normalization: {norm}' if norm else 'No Normalization'
axes[idx].set_title(title)
plt.tight_layout()
plt.show()Install with Tessl CLI
npx tessl i tessl/pypi-yellowbrick