CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-scikit-learn-intelex

Intel Extension for Scikit-learn providing hardware-accelerated implementations of scikit-learn algorithms optimized for Intel CPUs and GPUs.

Pending
Overview
Eval results
Files

decomposition.mddocs/

Decomposition

Principal Component Analysis with Intel acceleration for efficient dimensionality reduction on large datasets. Provides significant performance improvements through optimized matrix decomposition algorithms.

Capabilities

Principal Component Analysis

Intel-accelerated PCA with optimized singular value decomposition for fast dimensionality reduction.

class PCA:
    """
    Principal Component Analysis with Intel optimization.
    
    Efficient dimensionality reduction using optimized SVD algorithms
    and Intel Math Kernel Library integration.
    """
    
    def __init__(
        self,
        n_components=None,
        copy=True,
        whiten=False,
        svd_solver='auto',
        tol=0.0,
        iterated_power='auto',
        n_oversamples=10,
        power_iteration_normalizer='auto',
        random_state=None
    ):
        """
        Initialize PCA.
        
        Parameters:
            n_components (int or float): Number of components to keep
            copy (bool): Whether to copy data
            whiten (bool): Whether to whiten components
            svd_solver (str): SVD solver algorithm
            tol (float): Tolerance for singular values
            iterated_power (int): Number of iterations for randomized SVD
            n_oversamples (int): Additional samples for randomized SVD
            power_iteration_normalizer (str): Normalization method
            random_state (int): Random state for reproducibility
        """
    
    def fit(self, X, y=None):
        """
        Fit PCA model.
        
        Parameters:
            X (array-like): Training data of shape (n_samples, n_features)
            y: Ignored, present for API consistency
            
        Returns:
            self: Fitted estimator
        """
    
    def transform(self, X):
        """
        Transform data to lower dimensional space.
        
        Parameters:
            X (array-like): Data to transform
            
        Returns:
            array: Transformed data
        """
    
    def fit_transform(self, X, y=None):
        """
        Fit model and transform data.
        
        Parameters:
            X (array-like): Training data
            y: Ignored
            
        Returns:
            array: Transformed data
        """
    
    def inverse_transform(self, X):
        """
        Transform data back to original space.
        
        Parameters:
            X (array-like): Data in PCA space
            
        Returns:
            array: Data in original space
        """
    
    def score(self, X, y=None):
        """
        Return average log-likelihood.
        
        Parameters:
            X (array-like): Test data
            y: Ignored
            
        Returns:
            float: Average log-likelihood
        """
    
    # Attributes available after fitting
    components_: ...           # Principal axes
    explained_variance_: ...   # Variance explained by each component
    explained_variance_ratio_: ... # Percentage of variance explained
    singular_values_: ...      # Singular values
    mean_: ...                # Per-feature empirical mean
    n_components_: ...        # Number of components
    n_features_in_: ...       # Number of features during fit
    noise_variance_: ...      # Estimated noise covariance

Usage Examples

Basic PCA for Dimensionality Reduction

import numpy as np
from sklearnex.decomposition import PCA
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier

# Generate high-dimensional dataset
X, y = make_classification(
    n_samples=1000, n_features=50, n_informative=30,
    n_redundant=20, random_state=42
)

# Apply PCA for dimensionality reduction
pca = PCA(n_components=10, random_state=42)
X_reduced = pca.fit_transform(X)

print(f"Original shape: {X.shape}")
print(f"Reduced shape: {X_reduced.shape}")
print(f"Explained variance ratio: {pca.explained_variance_ratio_.sum():.3f}")

# Use reduced features for classification
X_train, X_test, y_train, y_test = train_test_split(
    X_reduced, y, test_size=0.2, random_state=42
)

rf = RandomForestClassifier(random_state=42)
rf.fit(X_train, y_train)
accuracy = rf.score(X_test, y_test)

print(f"Classification accuracy with PCA: {accuracy:.3f}")

Explained Variance Analysis

import numpy as np
import matplotlib.pyplot as plt
from sklearnex.decomposition import PCA
from sklearn.datasets import load_digits

# Load digits dataset
digits = load_digits()
X, y = digits.data, digits.target

# Fit PCA with all components
pca_full = PCA()
pca_full.fit(X)

# Calculate cumulative explained variance
cumsum_var = np.cumsum(pca_full.explained_variance_ratio_)

# Find number of components for 95% variance
n_components_95 = np.argmax(cumsum_var >= 0.95) + 1
print(f"Components for 95% variance: {n_components_95}/{len(cumsum_var)}")

# Apply PCA with optimal number of components
pca = PCA(n_components=n_components_95)
X_transformed = pca.fit_transform(X)

print(f"Original dimensions: {X.shape}")
print(f"Reduced dimensions: {X_transformed.shape}")
print(f"Variance preserved: {pca.explained_variance_ratio_.sum():.3f}")

# Analyze top components
print(f"Top 5 components variance:")
for i in range(min(5, len(pca.explained_variance_ratio_))):
    print(f"  PC{i+1}: {pca.explained_variance_ratio_[i]:.4f}")

Data Reconstruction and Noise Reduction

import numpy as np
from sklearnex.decomposition import PCA
from sklearn.datasets import make_blobs

# Generate data with noise
X_clean, _ = make_blobs(n_samples=500, centers=3, n_features=20, random_state=42)
noise = np.random.normal(0, 0.5, X_clean.shape)
X_noisy = X_clean + noise

# Apply PCA for noise reduction
pca = PCA(n_components=10)  # Keep only top 10 components
X_pca = pca.fit_transform(X_noisy)
X_reconstructed = pca.inverse_transform(X_pca)

# Calculate reconstruction error
reconstruction_error = np.mean((X_noisy - X_reconstructed) ** 2)
denoising_improvement = np.mean((X_clean - X_noisy) ** 2) - np.mean((X_clean - X_reconstructed) ** 2)

print(f"Original data shape: {X_noisy.shape}")
print(f"PCA components: {X_pca.shape[1]}")
print(f"Reconstruction error: {reconstruction_error:.4f}")
print(f"Denoising improvement: {denoising_improvement:.4f}")
print(f"Explained variance: {pca.explained_variance_ratio_.sum():.3f}")

Performance Comparison

import time
import numpy as np
from sklearn.datasets import make_classification

# Generate large dataset
X, y = make_classification(
    n_samples=5000, n_features=200, n_informative=100,
    random_state=42
)

# Intel-optimized PCA
from sklearnex.decomposition import PCA as IntelPCA

start_time = time.time()
intel_pca = IntelPCA(n_components=50)
X_intel = intel_pca.fit_transform(X)
intel_time = time.time() - start_time

print(f"Intel PCA:")
print(f"  Time: {intel_time:.2f} seconds")
print(f"  Shape: {X_intel.shape}")
print(f"  Explained variance: {intel_pca.explained_variance_ratio_.sum():.3f}")

# Standard scikit-learn PCA (for comparison)
from sklearn.decomposition import PCA as StandardPCA

start_time = time.time()
standard_pca = StandardPCA(n_components=50)
X_standard = standard_pca.fit_transform(X)
standard_time = time.time() - start_time

print(f"\nStandard PCA:")
print(f"  Time: {standard_time:.2f} seconds")
print(f"  Shape: {X_standard.shape}")
print(f"  Explained variance: {standard_pca.explained_variance_ratio_.sum():.3f}")
print(f"  Speedup: {standard_time / intel_time:.1f}x")

# Verify results are equivalent
results_close = np.allclose(
    np.abs(X_intel), np.abs(X_standard), rtol=1e-3
)
print(f"  Results equivalent: {results_close}")

Performance Notes

  • Significant speedups on datasets with >1000 samples and >50 features
  • SVD computation is highly optimized with Intel MKL
  • Memory usage is comparable to standard scikit-learn
  • Randomized SVD solver provides additional performance benefits for large datasets
  • Numerical stability maintained equivalent to scikit-learn implementation

Install with Tessl CLI

npx tessl i tessl/pypi-scikit-learn-intelex

docs

advanced.md

clustering.md

daal4py-mb.md

decomposition.md

ensemble.md

index.md

linear-models.md

metrics-model-selection.md

neighbors.md

patching-config.md

stats-manifold.md

svm.md

tile.json