CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-yellowbrick

A suite of visual analysis and diagnostic tools for machine learning.

Overview
Eval results
Files

clustering.mddocs/

Clustering Analysis

Visualizers for unsupervised clustering evaluation and analysis, providing tools to determine optimal cluster numbers, assess clustering quality, and understand cluster relationships. These tools support various clustering algorithms and distance metrics.

Capabilities

Elbow Method Analysis

K-Elbow visualizer for determining the optimal number of clusters using the elbow method. Supports multiple scoring metrics including distortion, silhouette score, and calinski-harabasz index.

class KElbow(ClusteringScoreVisualizer):
    """
    K-Elbow visualizer for optimal cluster number selection.
    
    Parameters:
    - estimator: scikit-learn clustering estimator (KMeans, etc.)
    - k: int or tuple, range of K values to test (default: (4, 11))
    - metric: str, scoring metric ('distortion', 'silhouette', 'calinski_harabasz')
    - timings: bool, whether to show fitting time for each K
    - locate_elbow: bool, whether to automatically locate elbow point
    """
    def __init__(self, estimator, k=10, metric='distortion', timings=True, locate_elbow=True, **kwargs): ...
    def fit(self, X, y=None, **kwargs): ...
    def show(self, **kwargs): ...

# Alias for compatibility
KElbowVisualizer = KElbow

def kelbow_visualizer(estimator, X, k=10, metric='distortion', **kwargs):
    """
    Functional API for K-elbow visualization.
    
    Parameters:
    - estimator: scikit-learn clustering estimator
    - X: feature matrix
    - k: int or tuple, range of K values to test
    - metric: str, scoring metric
    
    Returns:
    KElbow visualizer instance
    """

def distortion_score(estimator, X):
    """
    Compute distortion score (sum of squared distances to centroids).
    
    Parameters:
    - estimator: fitted clustering estimator with cluster_centers_ attribute
    - X: feature matrix
    
    Returns:
    float: distortion score
    """

Usage Example:

from yellowbrick.cluster import KElbow, kelbow_visualizer
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs

# Generate sample data
X, _ = make_blobs(n_samples=1000, centers=4, n_features=12, random_state=42)

# Class-based API
model = KMeans()
visualizer = KElbow(model, k=(2, 12), metric='distortion', timings=False)
visualizer.fit(X)
visualizer.show()

# Get optimal K
optimal_k = visualizer.elbow_value_

# Functional API
kelbow_visualizer(KMeans(), X, k=(2, 12), metric='silhouette')

Silhouette Analysis

Silhouette analysis for evaluating clustering quality and cluster cohesion. Provides detailed view of how well each sample fits within its assigned cluster.

class SilhouetteVisualizer(ClusteringScoreVisualizer):
    """
    Silhouette analysis visualizer for clustering evaluation.
    
    Parameters:
    - estimator: scikit-learn clustering estimator
    - colors: str or list, colors for different clusters
    - is_fitted: bool, whether estimator is already fitted
    """
    def __init__(self, estimator, colors='yellowbrick', is_fitted=False, **kwargs): ...
    def fit(self, X, y=None, **kwargs): ...
    def show(self, **kwargs): ...

def silhouette_visualizer(estimator, X, colors='yellowbrick', **kwargs):
    """
    Functional API for silhouette analysis visualization.
    
    Parameters:
    - estimator: scikit-learn clustering estimator
    - X: feature matrix
    - colors: str or list, colors for clusters
    
    Returns:
    SilhouetteVisualizer instance
    """

Usage Example:

from yellowbrick.cluster import SilhouetteVisualizer, silhouette_visualizer
from sklearn.cluster import KMeans

# Class-based API
model = KMeans(n_clusters=4, random_state=42)
visualizer = SilhouetteVisualizer(model, colors='yellowbrick')
visualizer.fit(X)
visualizer.show()

# Access silhouette scores
silhouette_scores = visualizer.silhouette_samples_
avg_silhouette = visualizer.silhouette_score_

# Functional API
silhouette_visualizer(KMeans(n_clusters=4), X)

Intercluster Distance Maps

Intercluster distance visualization showing relationships between cluster centers using dimensionality reduction techniques like MDS or t-SNE.

class InterclusterDistance(ClusteringScoreVisualizer):
    """
    Intercluster distance map visualizer.
    
    Parameters:
    - estimator: scikit-learn clustering estimator
    - embedding: str, embedding method ('mds', 'tsne')
    - random_state: int, random state for reproducibility
    """
    def __init__(self, estimator, embedding='mds', random_state=None, **kwargs): ...
    def fit(self, X, y=None, **kwargs): ...
    def show(self, **kwargs): ...

def intercluster_distance(estimator, X, embedding='mds', **kwargs):
    """
    Functional API for intercluster distance visualization.
    
    Parameters:
    - estimator: scikit-learn clustering estimator
    - X: feature matrix
    - embedding: str, embedding method
    
    Returns:
    InterclusterDistance visualizer instance
    """

# Valid embedding methods
VALID_EMBEDDING = ['mds', 'tsne']

Usage Example:

from yellowbrick.cluster import InterclusterDistance, intercluster_distance
from sklearn.cluster import KMeans

# Class-based API with MDS embedding
model = KMeans(n_clusters=6, random_state=42)
visualizer = InterclusterDistance(model, embedding='mds')
visualizer.fit(X)
visualizer.show()

# Class-based API with t-SNE embedding
tsne_visualizer = InterclusterDistance(model, embedding='tsne', random_state=42)
tsne_visualizer.fit(X)
tsne_visualizer.show()

# Functional API
intercluster_distance(KMeans(n_clusters=6), X, embedding='mds')

Base Classes

class ClusteringScoreVisualizer(ScoreVisualizer):
    """
    Base class for clustering scoring visualizers.
    Provides common functionality for clustering model evaluation.
    """
    def __init__(self, estimator, **kwargs): ...
    def fit(self, X, y=None, **kwargs): ...

Usage Patterns

Complete Clustering Analysis Workflow

from yellowbrick.cluster import KElbow, SilhouetteVisualizer, InterclusterDistance
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt

# Generate sample data
X, _ = make_blobs(n_samples=1000, centers=4, n_features=12, random_state=42)

# Step 1: Determine optimal number of clusters
print("Step 1: Finding optimal K using elbow method")
elbow_viz = KElbow(KMeans(), k=(2, 12), metric='distortion')
elbow_viz.fit(X)
elbow_viz.show()
optimal_k = elbow_viz.elbow_value_
print(f"Optimal K: {optimal_k}")

# Step 2: Evaluate clustering quality with silhouette analysis
print(f"Step 2: Silhouette analysis with K={optimal_k}")
model = KMeans(n_clusters=optimal_k, random_state=42)
silhouette_viz = SilhouetteVisualizer(model)
silhouette_viz.fit(X)
silhouette_viz.show()
print(f"Average silhouette score: {silhouette_viz.silhouette_score_:.3f}")

# Step 3: Visualize cluster relationships
print("Step 3: Intercluster distance analysis")
distance_viz = InterclusterDistance(model, embedding='mds')
distance_viz.fit(X)
distance_viz.show()

Comparing Multiple Clustering Algorithms

from yellowbrick.cluster import SilhouetteVisualizer
from sklearn.cluster import KMeans, AgglomerativeClustering, DBSCAN
from sklearn.mixture import GaussianMixture
import matplotlib.pyplot as plt

# Define clustering algorithms
algorithms = {
    'K-Means': KMeans(n_clusters=4, random_state=42),
    'Agglomerative': AgglomerativeClustering(n_clusters=4),
    'Gaussian Mixture': GaussianMixture(n_components=4, random_state=42)
}

# Compare silhouette analysis across algorithms
fig, axes = plt.subplots(1, 3, figsize=(15, 5))

for idx, (name, algorithm) in enumerate(algorithms.items()):
    viz = SilhouetteVisualizer(algorithm, ax=axes[idx])
    viz.fit(X)
    viz.finalize()
    axes[idx].set_title(f'{name}\nAvg Score: {viz.silhouette_score_:.3f}')

plt.tight_layout()
plt.show()

Parameter Tuning with Multiple Metrics

from yellowbrick.cluster import KElbow
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt

# Compare different scoring metrics
metrics = ['distortion', 'silhouette', 'calinski_harabasz']
fig, axes = plt.subplots(1, 3, figsize=(15, 5))

for idx, metric in enumerate(metrics):
    viz = KElbow(KMeans(), k=(2, 12), metric=metric, ax=axes[idx])
    viz.fit(X)
    viz.finalize()
    axes[idx].set_title(f'Elbow Method - {metric.title()}')

plt.tight_layout()
plt.show()

Advanced Clustering Evaluation

from yellowbrick.cluster import KElbow, SilhouetteVisualizer
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline

# Clustering with preprocessing pipeline
pipeline = Pipeline([
    ('scaler', StandardScaler()),
    ('kmeans', KMeans())
])

# Elbow analysis with pipeline
elbow_viz = KElbow(pipeline, k=(2, 12), metric='silhouette')
elbow_viz.fit(X)
elbow_viz.show()

# Silhouette analysis with optimal K
optimal_k = elbow_viz.elbow_value_
pipeline.set_params(kmeans__n_clusters=optimal_k)
silhouette_viz = SilhouetteVisualizer(pipeline)
silhouette_viz.fit(X)
silhouette_viz.show()

Custom Distance Metrics

from yellowbrick.cluster import KElbow
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances

# Custom scoring function example
def custom_score(estimator, X):
    """Custom scoring function using average intra-cluster distance"""
    labels = estimator.labels_
    centers = estimator.cluster_centers_
    
    score = 0
    for i in range(len(centers)):
        cluster_points = X[labels == i]
        if len(cluster_points) > 0:
            distances = pairwise_distances(cluster_points, [centers[i]])
            score += distances.mean()
    
    return score

# Use custom scoring with manual evaluation
k_values = range(2, 12)
scores = []

for k in k_values:
    kmeans = KMeans(n_clusters=k, random_state=42)
    kmeans.fit(X)
    score = custom_score(kmeans, X)
    scores.append(score)

# Plot custom scores
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 6))
plt.plot(k_values, scores, 'bo-')
plt.xlabel('Number of Clusters (K)')
plt.ylabel('Custom Score')
plt.title('Custom Clustering Evaluation')
plt.grid(True)
plt.show()

Install with Tessl CLI

npx tessl i tessl/pypi-yellowbrick

docs

classification.md

clustering.md

data-utilities.md

features.md

index.md

model-selection.md

regression.md

text.md

tile.json