CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-mne

MNE-Python provides comprehensive tools for analyzing MEG, EEG, and other neuroimaging data with advanced source estimation and connectivity analysis.

Pending
Overview
Eval results
Files

machine-learning.mddocs/

Machine Learning

Machine learning tools for decoding neural signals including common spatial patterns (CSP), temporal decoding, encoding models, and cross-validation utilities designed for neuroimaging data.

Capabilities

Spatial Filtering Methods

Learn spatial filters to extract discriminative patterns from multichannel neuroimaging data.

class CSP:
    """Common Spatial Patterns for EEG/MEG classification."""
    
    def __init__(self, n_components: int = 4, reg: Optional[Union[str, float]] = None,
                log: Optional[bool] = None, cov_est: str = 'concat', transform_into: str = 'average_power',
                norm_trace: bool = False, cov_method_params: Optional[Dict] = None,
                rank: Optional[Union[str, int, Dict]] = None, component_order: str = 'mutual_info'):
        """
        Initialize CSP transformer.
        
        Parameters:
        - n_components: Number of CSP components to keep
        - reg: Regularization parameter for covariance estimation
        - log: Apply log transformation to features  
        - cov_est: Covariance estimation method
        - transform_into: Transform method ('average_power' or 'csp_space')
        - norm_trace: Normalize covariance trace
        - cov_method_params: Covariance estimation parameters
        - rank: Data rank specification
        - component_order: Component ordering method
        """
    
    def fit(self, X: ArrayLike, y: ArrayLike) -> 'CSP':
        """
        Fit CSP spatial filters.
        
        Parameters:
        - X: Training data (n_epochs, n_channels, n_times)
        - y: Class labels (n_epochs,)
        
        Returns:
        Fitted CSP object
        """
    
    def transform(self, X: ArrayLike) -> ArrayLike:
        """
        Apply CSP transformation.
        
        Parameters:
        - X: Data to transform (n_epochs, n_channels, n_times)
        
        Returns:
        Transformed features (n_epochs, n_components)
        """
    
    def fit_transform(self, X: ArrayLike, y: ArrayLike) -> ArrayLike:
        """
        Fit CSP and transform data.
        
        Parameters:
        - X: Training data (n_epochs, n_channels, n_times)
        - y: Class labels (n_epochs,)
        
        Returns:
        CSP features (n_epochs, n_components)
        """
    
    def plot_patterns(self, info: Info, components: Optional[Union[int, List[int]]] = None,
                     ch_type: Optional[str] = None, layout: Optional[str] = None,
                     vmin: Optional[float] = None, vmax: Optional[float] = None,
                     cmap: Optional[str] = None, sensors: Union[bool, str] = True,
                     colorbar: bool = True, scale: Optional[float] = None,
                     scale_time: Optional[float] = None, unit: Optional[str] = None,
                     size: Union[int, Tuple] = 1, show_names: Union[bool, str] = False,
                     title: Optional[str] = None, show: bool = True,
                     outlines: str = 'head', image_interp: str = 'bilinear',
                     average: Optional[float] = None, sphere: Optional[float] = None) -> Figure:
        """
        Plot CSP spatial patterns.
        
        Returns:
        Figure object
        """

class SPoC:
    """Source Power Comodulation."""
    
    def __init__(self, n_components: int = 4, reg: Optional[Union[str, float]] = None,
                log: Optional[bool] = None, cov_est: str = 'concat',
                norm_trace: bool = False, cov_method_params: Optional[Dict] = None,
                rank: Optional[Union[str, int, Dict]] = None):
        """
        Initialize SPoC transformer.
        
        Parameters:
        - n_components: Number of components
        - reg: Regularization parameter
        - log: Apply log transformation
        - cov_est: Covariance estimation method
        - norm_trace: Normalize covariance trace
        - cov_method_params: Covariance parameters
        - rank: Data rank specification
        """
    
    def fit(self, X: ArrayLike, y: ArrayLike) -> 'SPoC':
        """
        Fit SPoC spatial filters.
        
        Parameters:
        - X: Training data (n_epochs, n_channels, n_times)
        - y: Continuous target variable (n_epochs,)
        
        Returns:
        Fitted SPoC object
        """
    
    def transform(self, X: ArrayLike) -> ArrayLike:
        """
        Apply SPoC transformation.
        
        Parameters:
        - X: Data to transform (n_epochs, n_channels, n_times)
        
        Returns:
        SPoC features (n_epochs, n_components)
        """

class SSD:
    """Spatio-Spectral Decomposition."""
    
    def __init__(self, info: Info, filt_params_signal: Dict, filt_params_noise: Dict,
                reg: Optional[Union[str, float]] = None, n_components: Optional[int] = None,
                picks: Optional[Union[str, List]] = None, sort_by_spectral_ratio: bool = True,
                return_filtered: bool = False, n_fft: Optional[int] = None,
                cov_method_params: Optional[Dict] = None, rank: Optional[Union[str, int, Dict]] = None):
        """
        Initialize SSD transformer.
        
        Parameters:
        - info: Measurement info
        - filt_params_signal: Signal band filter parameters
        - filt_params_noise: Noise band filter parameters
        - reg: Regularization parameter
        - n_components: Number of components
        - picks: Channel selection
        - sort_by_spectral_ratio: Sort by spectral ratio
        - return_filtered: Return filtered data
        - n_fft: FFT length for PSD computation
        - cov_method_params: Covariance parameters
        - rank: Data rank specification
        """
    
    def fit(self, X: ArrayLike) -> 'SSD':
        """Fit SSD spatial filters."""
    
    def transform(self, X: ArrayLike) -> ArrayLike:
        """Apply SSD transformation."""

Temporal Decoding

Decode information from neural signals with time-resolved analysis.

class SlidingEstimator:
    """Time-resolved decoding with sliding window."""
    
    def __init__(self, base_estimator, scoring: Optional[Union[str, callable]] = None,
                n_jobs: int = 1, verbose: Optional[Union[bool, str, int]] = None):
        """
        Initialize sliding estimator.
        
        Parameters:
        - base_estimator: Base classifier/regressor
        - scoring: Scoring method
        - n_jobs: Number of parallel jobs
        - verbose: Verbosity level
        """
    
    def fit(self, X: ArrayLike, y: ArrayLike) -> 'SlidingEstimator':
        """
        Fit estimator at each time point.
        
        Parameters:
        - X: Training data (n_epochs, n_features, n_times)
        - y: Target labels (n_epochs,)
        
        Returns:
        Fitted SlidingEstimator
        """
    
    def predict(self, X: ArrayLike) -> ArrayLike:
        """
        Predict at each time point.
        
        Parameters:
        - X: Test data (n_epochs, n_features, n_times)
        
        Returns:
        Predictions (n_epochs, n_times)
        """
    
    def score(self, X: ArrayLike, y: ArrayLike) -> ArrayLike:
        """
        Score at each time point.
        
        Parameters:
        - X: Test data (n_epochs, n_features, n_times)
        - y: True labels (n_epochs,)
        
        Returns:
        Scores at each time point (n_times,)
        """

class GeneralizingEstimator:
    """Cross-temporal decoding (temporal generalization)."""
    
    def __init__(self, base_estimator, scoring: Optional[Union[str, callable]] = None,
                n_jobs: int = 1, verbose: Optional[Union[bool, str, int]] = None):
        """
        Initialize generalizing estimator.
        
        Parameters:
        - base_estimator: Base classifier/regressor
        - scoring: Scoring method
        - n_jobs: Number of parallel jobs
        - verbose: Verbosity level
        """
    
    def fit(self, X: ArrayLike, y: ArrayLike) -> 'GeneralizingEstimator':
        """
        Fit estimator at each time point.
        
        Parameters:
        - X: Training data (n_epochs, n_features, n_times)
        - y: Target labels (n_epochs,)
        
        Returns:
        Fitted GeneralizingEstimator
        """
    
    def score(self, X: ArrayLike, y: ArrayLike) -> ArrayLike:
        """
        Score on all time point combinations.
        
        Parameters:
        - X: Test data (n_epochs, n_features, n_times)
        - y: True labels (n_epochs,)
        
        Returns:
        Cross-temporal decoding scores (n_times, n_times)
        """

class TimeDelayingRidge:
    """Ridge regression with time delays for encoding models."""
    
    def __init__(self, tmin: float, tmax: float, sfreq: float, alpha: float = 0.0,
                reg_type: Optional[str] = None, n_jobs: int = 1, edge_correction: bool = True):
        """
        Initialize time-delaying ridge regression.
        
        Parameters:
        - tmin: Minimum time delay
        - tmax: Maximum time delay
        - sfreq: Sampling frequency
        - alpha: Regularization parameter
        - reg_type: Regularization type
        - n_jobs: Number of parallel jobs
        - edge_correction: Apply edge correction
        """
    
    def fit(self, X: ArrayLike, y: ArrayLike) -> 'TimeDelayingRidge':
        """
        Fit time-delayed ridge regression.
        
        Parameters:
        - X: Features (n_times, n_features)
        - y: Target (n_times, n_targets)
        
        Returns:
        Fitted TimeDelayingRidge
        """
    
    def predict(self, X: ArrayLike) -> ArrayLike:
        """
        Predict using fitted model.
        
        Parameters:
        - X: Features (n_times, n_features)
        
        Returns:
        Predictions (n_times, n_targets)
        """

Feature Extraction and Preprocessing

Transform raw neural data into features suitable for machine learning.

class Vectorizer:
    """Flatten multidimensional features."""
    
    def __init__(self):
        """Initialize vectorizer."""
    
    def fit(self, X: ArrayLike, y: Optional[ArrayLike] = None) -> 'Vectorizer':
        """Fit vectorizer (no-op)."""
        return self
    
    def transform(self, X: ArrayLike) -> ArrayLike:
        """
        Flatten last two dimensions.
        
        Parameters:
        - X: Input data (..., n_channels, n_times)
        
        Returns:
        Flattened data (..., n_channels * n_times)
        """

class Scaler:
    """Feature scaling for neural data."""
    
    def __init__(self, info: Info, scalings: Optional[Dict] = None, with_mean: bool = True,
                with_std: bool = True):
        """
        Initialize scaler.
        
        Parameters:
        - info: Measurement info
        - scalings: Scaling factors by channel type
        - with_mean: Center data
        - with_std: Scale to unit variance
        """
    
    def fit(self, X: ArrayLike, y: Optional[ArrayLike] = None) -> 'Scaler':
        """Fit scaling parameters."""
    
    def transform(self, X: ArrayLike) -> ArrayLike:
        """Apply scaling transformation."""

class PSDEstimator:
    """Power spectral density features."""
    
    def __init__(self, sfreq: float = 2 * np.pi, fmin: float = 0, fmax: float = np.inf,
                bandwidth: Optional[float] = None, adaptive: bool = False, low_bias: bool = True,
                n_jobs: int = 1, normalization: str = 'length', verbose: Optional[Union[bool, str, int]] = None):
        """
        Initialize PSD estimator.
        
        Parameters:
        - sfreq: Sampling frequency
        - fmin: Minimum frequency
        - fmax: Maximum frequency
        - bandwidth: Multitaper bandwidth
        - adaptive: Use adaptive weighting
        - low_bias: Reduce bias
        - n_jobs: Number of parallel jobs
        - normalization: Normalization method
        - verbose: Verbosity level
        """
    
    def fit(self, X: ArrayLike, y: Optional[ArrayLike] = None) -> 'PSDEstimator':
        """Fit PSD estimator."""
    
    def transform(self, X: ArrayLike) -> ArrayLike:
        """Compute PSD features."""

class FilterEstimator:
    """Frequency filtering as preprocessing step."""
    
    def __init__(self, info: Info, l_freq: Optional[float], h_freq: Optional[float],
                l_trans_bandwidth: str = 'auto', h_trans_bandwidth: str = 'auto',
                filter_length: str = 'auto', method: str = 'fir', n_jobs: int = 1):
        """
        Initialize filter estimator.
        
        Parameters:
        - info: Measurement info
        - l_freq: Low frequency cutoff
        - h_freq: High frequency cutoff
        - l_trans_bandwidth: Low transition bandwidth
        - h_trans_bandwidth: High transition bandwidth
        - filter_length: Filter length
        - method: Filter method
        - n_jobs: Number of parallel jobs
        """
    
    def fit(self, X: ArrayLike, y: Optional[ArrayLike] = None) -> 'FilterEstimator':
        """Fit filter (no-op)."""
        return self
    
    def transform(self, X: ArrayLike) -> ArrayLike:
        """Apply filtering."""

Cross-Validation and Model Evaluation

Specialized cross-validation and evaluation methods for neuroimaging data.

def cross_val_multiscore(estimator, X: ArrayLike, y: Optional[ArrayLike] = None,
                        groups: Optional[ArrayLike] = None, 
                        scoring: Optional[Union[str, List[str], Dict]] = None,
                        cv: Optional[Union[int, callable]] = None,
                        n_jobs: int = 1, verbose: int = 0,
                        fit_params: Optional[Dict] = None,
                        pre_dispatch: Union[int, str] = '2*n_jobs') -> ArrayLike:
    """
    Cross-validation with multiple scoring metrics.
    
    Parameters:
    - estimator: Estimator to evaluate
    - X: Feature data
    - y: Target data
    - groups: Group labels for grouped CV
    - scoring: Scoring method(s)
    - cv: Cross-validation strategy
    - n_jobs: Number of parallel jobs
    - verbose: Verbosity level
    - fit_params: Parameters to pass to fit
    - pre_dispatch: Controls job dispatching
    
    Returns:
    Cross-validation scores
    """

def get_coef(estimator, attr: str = 'filters_', inverse_transform: bool = False) -> ArrayLike:
    """
    Extract coefficients from fitted estimator.
    
    Parameters:
    - estimator: Fitted estimator
    - attr: Attribute name containing coefficients
    - inverse_transform: Apply inverse transformation
    
    Returns:
    Coefficient array
    """

Encoding Models

Model how stimulus features are encoded in neural responses.

class ReceptiveField:
    """Temporal response function estimation."""
    
    def __init__(self, tmin: float, tmax: float, sfreq: float, feature_names: Optional[List[str]] = None,
                estimator: Optional[str] = None, scoring: str = 'corrcoef', n_jobs: int = 1,
                edge_correction: bool = True, verbose: Optional[Union[bool, str, int]] = None):
        """
        Initialize receptive field estimator.
        
        Parameters:
        - tmin: Minimum time lag
        - tmax: Maximum time lag
        - sfreq: Sampling frequency
        - feature_names: Names of stimulus features
        - estimator: Regression estimator to use
        - scoring: Scoring method
        - n_jobs: Number of parallel jobs
        - edge_correction: Apply edge correction
        - verbose: Verbosity level
        """
    
    def fit(self, X: ArrayLike, y: ArrayLike) -> 'ReceptiveField':
        """
        Fit receptive field model.
        
        Parameters:
        - X: Stimulus features (n_times, n_features)
        - y: Neural responses (n_times, n_channels)
        
        Returns:
        Fitted ReceptiveField
        """
    
    def predict(self, X: ArrayLike) -> ArrayLike:
        """
        Predict neural responses.
        
        Parameters:
        - X: Stimulus features (n_times, n_features)
        
        Returns:
        Predicted responses (n_times, n_channels)
        """
    
    def score(self, X: ArrayLike, y: ArrayLike) -> ArrayLike:
        """
        Score model performance.
        
        Parameters:
        - X: Stimulus features
        - y: True neural responses
        
        Returns:
        Scores for each channel
        """

class EMS:
    """EEG Motor Imagery Separability."""
    
    def __init__(self, fmin: float = 8, fmax: float = 35, n_freq: int = 9,
                n_steps: int = 8, return_spectrum: bool = False):
        """
        Initialize EMS computation.
        
        Parameters:
        - fmin: Minimum frequency
        - fmax: Maximum frequency
        - n_freq: Number of frequencies
        - n_steps: Number of time steps
        - return_spectrum: Return full spectrum
        """
    
    def fit(self, epochs_1: Epochs, epochs_2: Epochs) -> 'EMS':
        """
        Compute EMS between two conditions.
        
        Parameters:
        - epochs_1: First condition epochs
        - epochs_2: Second condition epochs
        
        Returns:
        Fitted EMS object
        """

def compute_ems(epochs_1: Epochs, epochs_2: Epochs, fmin: float = 8, fmax: float = 35,
               n_freq: int = 9, n_steps: int = 8, return_spectrum: bool = False) -> Union[float, Tuple]:
    """
    Compute EEG Motor Imagery Separability metric.
    
    Parameters:
    - epochs_1: First condition epochs
    - epochs_2: Second condition epochs
    - fmin: Minimum frequency
    - fmax: Maximum frequency
    - n_freq: Number of frequencies
    - n_steps: Number of time steps
    - return_spectrum: Return full spectrum
    
    Returns:
    EMS value or tuple with spectrum
    """

Usage Examples

CSP for Motor Imagery Classification

import mne
from mne.decoding import CSP
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.pipeline import Pipeline

# Load motor imagery epochs
epochs = mne.read_epochs('motor_imagery-epo.fiv')

# Get data and labels
X = epochs.get_data()  # (n_epochs, n_channels, n_times)
y = epochs.events[:, 2]  # Event IDs as labels

# Create CSP + LDA pipeline
csp = CSP(n_components=4, reg=None, log=True)
lda = LinearDiscriminantAnalysis()
pipeline = Pipeline([('csp', csp), ('lda', lda)])

# Fit and evaluate
from sklearn.model_selection import cross_val_score
scores = cross_val_score(pipeline, X, y, cv=5, scoring='accuracy')
print(f"Accuracy: {scores.mean():.3f} ± {scores.std():.3f}")

# Plot CSP patterns
csp.fit(X, y)
csp.plot_patterns(epochs.info, ch_type='eeg', size=1.5)

Time-Resolved Decoding

import mne
from mne.decoding import SlidingEstimator, cross_val_multiscore
from sklearn.linear_model import LogisticRegression
import numpy as np

# Load epochs
epochs = mne.read_epochs('sample-epo.fiv')

# Prepare data
X = epochs.get_data()  # (n_epochs, n_channels, n_times)
y = epochs.events[:, 2]

# Create sliding estimator
clf = LogisticRegression(solver='liblinear')
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc')

# Perform cross-validation at each time point
scores = cross_val_multiscore(time_decod, X, y, cv=5, n_jobs=1)

# Plot decoding accuracy over time
times = epochs.times
plt.figure(figsize=(10, 4))
plt.plot(times, scores.mean(axis=0))
plt.fill_between(times, scores.mean(axis=0) - scores.std(axis=0),
                scores.mean(axis=0) + scores.std(axis=0), alpha=0.3)
plt.xlabel('Time (s)')
plt.ylabel('ROC AUC')
plt.title('Time-resolved decoding')
plt.show()

Cross-Temporal Decoding

import mne
from mne.decoding import GeneralizingEstimator
from sklearn.linear_model import LogisticRegression

# Load epochs
epochs = mne.read_epochs('sample-epo.fiv')
X = epochs.get_data()
y = epochs.events[:, 2]

# Create generalizing estimator
clf = LogisticRegression(solver='liblinear')
time_gen = GeneralizingEstimator(clf, n_jobs=1, scoring='roc_auc')

# Fit and score
time_gen.fit(X, y)
scores = time_gen.score(X, y)

# Plot cross-temporal decoding matrix
times = epochs.times
fig, ax = plt.subplots(figsize=(8, 8))
im = ax.imshow(scores, extent=[times[0], times[-1], times[0], times[-1]],
              cmap='RdYlBu_r', vmin=0.4, vmax=0.6, origin='lower')
ax.set_xlabel('Testing Time (s)')
ax.set_ylabel('Training Time (s)')
ax.set_title('Cross-temporal decoding')
plt.colorbar(im, ax=ax)
plt.show()

Receptive Field Modeling

import mne
from mne.decoding import ReceptiveField
import numpy as np

# Load continuous data
raw = mne.io.read_raw_fif('sample_audvis_raw.fiv', preload=True)
raw.filter(1, 8)  # Filter for slow components

# Create synthetic stimulus (e.g., audio envelope)
n_times = len(raw.times)
stimulus = np.random.randn(n_times, 3)  # 3 stimulus features

# Get MEG data
picks = mne.pick_types(raw.info, meg='grad')
meg_data = raw.get_data(picks=picks).T  # (n_times, n_channels)

# Fit receptive field model
rf = ReceptiveField(tmin=-0.1, tmax=0.4, sfreq=raw.info['sfreq'], 
                   scoring='corrcoef')
rf.fit(stimulus, meg_data)

# Get model scores
scores = rf.score(stimulus, meg_data)
print(f"Mean correlation: {scores.mean():.3f}")

# Plot receptive field patterns
rf.plot(picks=picks[:10], show=True)

Feature Engineering Pipeline

import mne
from mne.decoding import Scaler, Vectorizer, PSDEstimator
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier

# Load epochs
epochs = mne.read_epochs('sample-epo.fiv')
X = epochs.get_data()
y = epochs.events[:, 2]

# Create feature extraction pipeline
pipeline = Pipeline([
    ('scaler', Scaler(epochs.info)),  # Scale by channel type
    ('psd', PSDEstimator(sfreq=epochs.info['sfreq'], 
                        fmin=1, fmax=40)),  # PSD features
    ('vectorizer', Vectorizer()),  # Flatten features
    ('classifier', RandomForestClassifier(n_estimators=100))
])

# Evaluate pipeline
from sklearn.model_selection import cross_val_score
scores = cross_val_score(pipeline, X, y, cv=5)
print(f"Accuracy: {scores.mean():.3f} ± {scores.std():.3f}")

Types

import numpy as np
from typing import Union, Optional, List, Dict, Tuple, Any, Callable

ArrayLike = Union[np.ndarray, List, Tuple]
Figure = Any  # matplotlib.figure.Figure

Install with Tessl CLI

npx tessl i tessl/pypi-mne

docs

data-io.md

datasets.md

index.md

machine-learning.md

preprocessing.md

source-analysis.md

statistics.md

time-frequency.md

visualization.md

tile.json