MNE-Python provides comprehensive tools for analyzing MEG, EEG, and other neuroimaging data with advanced source estimation and connectivity analysis.
—
Statistical methods including cluster-based permutation testing, multiple comparisons correction, and specialized analysis tools designed for neuroimaging data with high temporal and spatial resolution.
Correct for multiple comparisons across time, frequency, and spatial dimensions.
def bonferroni_correction(pvals: ArrayLike, alpha: float = 0.05) -> Tuple[ArrayLike, float]:
"""
Bonferroni correction for multiple comparisons.
Parameters:
- pvals: P-values to correct
- alpha: Family-wise error rate
Returns:
Tuple of (reject array, corrected alpha level)
"""
def fdr_correction(pvals: ArrayLike, alpha: float = 0.05, method: str = 'indep') -> Tuple[ArrayLike, ArrayLike]:
"""
False discovery rate correction.
Parameters:
- pvals: P-values to correct
- alpha: False discovery rate
- method: FDR method ('indep' or 'negcorr')
Returns:
Tuple of (reject array, corrected p-values)
"""Non-parametric statistical testing that accounts for multiple comparisons by clustering adjacent significant tests.
def permutation_cluster_test(X: ArrayLike, threshold: Optional[float] = None, n_permutations: int = 1024,
tail: int = 0, stat_fun: Optional[callable] = None,
adjacency: Optional[ArrayLike] = None, n_jobs: int = 1,
seed: Optional[int] = None, max_step: int = 1, exclude: Optional[ArrayLike] = None,
step_down_p: float = 0, t_power: float = 1, out_type: str = 'indices',
check_disjoint: bool = False, buffer_size: Optional[int] = None,
verbose: Optional[Union[bool, str, int]] = None) -> Tuple:
"""
Cluster-based permutation test for comparing conditions.
Parameters:
- X: Data arrays to compare (list of arrays or single array)
- threshold: Statistical threshold for clustering
- n_permutations: Number of permutations
- tail: Test direction (0=two-tailed, 1=greater, -1=less)
- stat_fun: Statistical function to use
- adjacency: Adjacency matrix for clustering
- n_jobs: Number of parallel jobs
- seed: Random seed for reproducibility
- max_step: Maximum cluster step size
- exclude: Samples to exclude from clustering
- step_down_p: Step-down p-value threshold
- t_power: Power for threshold-free cluster enhancement
- out_type: Output type ('indices' or 'mask')
- check_disjoint: Check for disjoint clusters
- buffer_size: Buffer size for permutations
- verbose: Verbosity level
Returns:
Tuple of (T_obs, clusters, cluster_p_values, H0)
"""
def permutation_cluster_1samp_test(X: ArrayLike, threshold: Optional[float] = None, n_permutations: int = 1024,
tail: int = 0, stat_fun: Optional[callable] = None,
adjacency: Optional[ArrayLike] = None, n_jobs: int = 1,
seed: Optional[int] = None, max_step: int = 1,
exclude: Optional[ArrayLike] = None, step_down_p: float = 0,
t_power: float = 1, out_type: str = 'indices',
check_disjoint: bool = False, buffer_size: Optional[int] = None,
verbose: Optional[Union[bool, str, int]] = None) -> Tuple:
"""
One-sample cluster-based permutation test.
Parameters:
- X: Data array (n_observations, n_features)
- threshold: Statistical threshold for clustering
- n_permutations: Number of permutations
- tail: Test direction
- stat_fun: Statistical function
- adjacency: Adjacency matrix for clustering
- n_jobs: Number of parallel jobs
- seed: Random seed
- max_step: Maximum cluster step size
- exclude: Samples to exclude
- step_down_p: Step-down p-value threshold
- t_power: Power for TFCE
- out_type: Output type
- check_disjoint: Check for disjoint clusters
- buffer_size: Buffer size
- verbose: Verbosity level
Returns:
Tuple of (T_obs, clusters, cluster_p_values, H0)
"""
def spatio_temporal_cluster_test(X: ArrayLike, adjacency: ArrayLike, threshold: Optional[float] = None,
n_permutations: int = 1024, tail: int = 0, stat_fun: Optional[callable] = None,
n_jobs: int = 1, seed: Optional[int] = None, max_step: int = 1,
exclude: Optional[ArrayLike] = None, step_down_p: float = 0,
t_power: float = 1, out_type: str = 'indices',
check_disjoint: bool = False, buffer_size: Optional[int] = None,
verbose: Optional[Union[bool, str, int]] = None) -> Tuple:
"""
Spatio-temporal cluster-based permutation test.
Parameters:
- X: Data arrays (list of n_conditions arrays of shape n_observations × n_vertices × n_times)
- adjacency: Spatial adjacency matrix
- threshold: Statistical threshold
- n_permutations: Number of permutations
- tail: Test direction
- stat_fun: Statistical function
- n_jobs: Number of parallel jobs
- seed: Random seed
- max_step: Maximum cluster step size
- exclude: Samples to exclude
- step_down_p: Step-down threshold
- t_power: TFCE power
- out_type: Output type
- check_disjoint: Check disjointness
- buffer_size: Buffer size
- verbose: Verbosity level
Returns:
Tuple of (T_obs, clusters, cluster_p_values, H0)
"""
def spatio_temporal_cluster_1samp_test(X: ArrayLike, adjacency: ArrayLike, threshold: Optional[float] = None,
n_permutations: int = 1024, tail: int = 0,
stat_fun: Optional[callable] = None, n_jobs: int = 1,
seed: Optional[int] = None, max_step: int = 1,
exclude: Optional[ArrayLike] = None, step_down_p: float = 0,
t_power: float = 1, out_type: str = 'indices',
check_disjoint: bool = False, buffer_size: Optional[int] = None,
verbose: Optional[Union[bool, str, int]] = None) -> Tuple:
"""
One-sample spatio-temporal cluster test.
Parameters:
- X: Data array (n_observations × n_vertices × n_times)
- adjacency: Spatial adjacency matrix
- threshold: Statistical threshold
- n_permutations: Number of permutations
- tail: Test direction
- stat_fun: Statistical function
- n_jobs: Number of parallel jobs
- seed: Random seed
- max_step: Maximum cluster step size
- exclude: Samples to exclude
- step_down_p: Step-down threshold
- t_power: TFCE power
- out_type: Output type
- check_disjoint: Check disjointness
- buffer_size: Buffer size
- verbose: Verbosity level
Returns:
Tuple of (T_obs, clusters, cluster_p_values, H0)
"""Classical parametric tests adapted for neuroimaging data structures.
def f_oneway(*args: ArrayLike) -> Tuple[ArrayLike, ArrayLike]:
"""
One-way ANOVA F-test.
Parameters:
- *args: Data arrays for each group
Returns:
Tuple of (F-statistics, p-values)
"""
def f_mway_rm(data: ArrayLike, factor_levels: ArrayLike, effects: str = 'A*B',
correction: bool = False, alpha: float = 0.05) -> Dict:
"""
Multi-way repeated measures ANOVA.
Parameters:
- data: Data array (n_subjects × n_conditions × n_times)
- factor_levels: Number of levels for each factor
- effects: Effects to test ('A', 'B', 'A*B', etc.)
- correction: Apply sphericity correction
- alpha: Significance level
Returns:
Dictionary with F-statistics, p-values, and effect information
"""
def ttest_1samp_no_p(X: ArrayLike, popmean: float = 0, sigma: Optional[float] = None) -> ArrayLike:
"""
One-sample t-test without p-value computation (for speed).
Parameters:
- X: Data array
- popmean: Population mean to test against
- sigma: Standard deviation (if known)
Returns:
T-statistics array
"""
def ttest_ind_no_p(X1: ArrayLike, X2: ArrayLike, equal_var: bool = True) -> ArrayLike:
"""
Independent samples t-test without p-values.
Parameters:
- X1: First group data
- X2: Second group data
- equal_var: Assume equal variances
Returns:
T-statistics array
"""Non-parametric alternatives and bootstrap methods for statistical inference.
def permutation_t_test(X: ArrayLike, n_permutations: int = 10000, tail: int = 0,
n_jobs: int = 1, seed: Optional[int] = None,
verbose: Optional[Union[bool, str, int]] = None) -> Tuple[ArrayLike, ArrayLike]:
"""
Permutation-based t-test.
Parameters:
- X: Data array (n_observations, n_features)
- n_permutations: Number of permutations
- tail: Test direction (0=two-tailed, 1=greater, -1=less)
- n_jobs: Number of parallel jobs
- seed: Random seed
- verbose: Verbosity level
Returns:
Tuple of (T-statistics, p-values)
"""
def bootstrap_confidence_interval(arr: ArrayLike, ci: float = 0.95, n_bootstraps: int = 10000,
stat_fun: str = 'mean', seed: Optional[int] = None,
n_jobs: int = 1, verbose: Optional[Union[bool, str, int]] = None) -> ArrayLike:
"""
Bootstrap confidence intervals.
Parameters:
- arr: Data array
- ci: Confidence interval level
- n_bootstraps: Number of bootstrap samples
- stat_fun: Statistic to compute ('mean', 'median', 'std')
- seed: Random seed
- n_jobs: Number of parallel jobs
- verbose: Verbosity level
Returns:
Confidence interval bounds
"""Regression methods for relating neural signals to experimental variables.
def linear_regression(design_matrix: ArrayLike, data: ArrayLike, names: Optional[List[str]] = None) -> Dict:
"""
Compute linear regression.
Parameters:
- design_matrix: Design matrix (n_observations × n_regressors)
- data: Data matrix (n_observations × n_features)
- names: Regressor names
Returns:
Dictionary with regression coefficients, t-statistics, p-values, etc.
"""
def linear_regression_raw(raw: Raw, design_matrix: ArrayLike, names: Optional[List[str]] = None,
tmin: float = 0.0, tmax: Optional[float] = None,
reject_by_annotation: bool = True, picks: Optional[Union[str, List]] = None) -> Dict:
"""
Linear regression on raw data.
Parameters:
- raw: Raw data instance
- design_matrix: Design matrix
- names: Regressor names
- tmin: Start time
- tmax: End time
- reject_by_annotation: Reject annotated segments
- picks: Channel selection
Returns:
Dictionary with regression results
"""Construct spatial adjacency matrices for cluster-based statistics.
def combine_adjacency(adjacency_list: List[ArrayLike]) -> ArrayLike:
"""
Combine multiple adjacency matrices.
Parameters:
- adjacency_list: List of adjacency matrices
Returns:
Combined adjacency matrix
"""
def spatial_src_adjacency(src: SourceSpaces, verbose: Optional[Union[bool, str, int]] = None) -> ArrayLike:
"""
Compute adjacency matrix for source space.
Parameters:
- src: Source space
- verbose: Verbosity level
Returns:
Source space adjacency matrix
"""
def spatial_tris_adjacency(tris: ArrayLike, remap_vertices: bool = False) -> ArrayLike:
"""
Compute adjacency matrix from triangulation.
Parameters:
- tris: Triangle array (n_tris × 3)
- remap_vertices: Remap vertex indices
Returns:
Spatial adjacency matrix
"""
def spatial_dist_adjacency(src: SourceSpaces, dist: float, verbose: Optional[Union[bool, str, int]] = None) -> ArrayLike:
"""
Compute distance-based adjacency matrix.
Parameters:
- src: Source space with distance information
- dist: Maximum distance for adjacency
- verbose: Verbosity level
Returns:
Distance-based adjacency matrix
"""
def spatial_inter_hemi_adjacency(src: SourceSpaces, dist: float,
verbose: Optional[Union[bool, str, int]] = None) -> ArrayLike:
"""
Get inter-hemisphere adjacency.
Parameters:
- src: Source space
- dist: Maximum inter-hemisphere distance
- verbose: Verbosity level
Returns:
Inter-hemisphere adjacency matrix
"""Analyze and summarize results from cluster-based permutation tests.
def summarize_clusters_stc(clu: Tuple, tstep: float, tmin: float, subject: str,
vertices: Optional[List[ArrayLike]] = None,
verbose: Optional[Union[bool, str, int]] = None) -> List[Dict]:
"""
Summarize cluster test results in source space.
Parameters:
- clu: Cluster test results tuple
- tstep: Time step
- tmin: Start time
- subject: Subject name
- vertices: Vertex arrays (if different from clu)
- verbose: Verbosity level
Returns:
List of cluster summary dictionaries
"""import mne
from mne.stats import permutation_cluster_test
import numpy as np
# Load epochs for two conditions
epochs1 = mne.read_epochs('condition1-epo.fiv')
epochs2 = mne.read_epochs('condition2-epo.fiv')
# Get data arrays
X = [epochs1.get_data(), epochs2.get_data()]
# Run cluster-based permutation test
T_obs, clusters, cluster_p_values, H0 = permutation_cluster_test(
X, n_permutations=1000, threshold=None, tail=0, n_jobs=1)
# Find significant clusters
significant_clusters = [c for c, p in zip(clusters, cluster_p_values) if p < 0.05]
print(f"Found {len(significant_clusters)} significant clusters")import mne
from mne.stats import spatio_temporal_cluster_1samp_test
# Load epochs
epochs = mne.read_epochs('sample-epo.fiv')
# Get data (subtract baseline)
X = epochs.get_data() - epochs.get_data().mean(axis=-1, keepdims=True)
# Create adjacency matrix for channels
adjacency, ch_names = mne.channels.find_ch_adjacency(epochs.info, ch_type='eeg')
# Run spatio-temporal cluster test
T_obs, clusters, cluster_p_values, H0 = spatio_temporal_cluster_1samp_test(
X, adjacency=adjacency, n_permutations=1000, threshold=None, tail=1)
# Visualize results
evoked = mne.EvokedArray(T_obs, epochs.info, tmin=epochs.tmin)
evoked.plot_topomap(times=np.linspace(0.05, 0.3, 6))import mne
from mne.stats import spatio_temporal_cluster_1samp_test, summarize_clusters_stc
# Load source estimates for multiple subjects
stcs = []
for subject in subjects:
stc = mne.read_source_estimate(f'{subject}-stc.fiv')
stcs.append(stc)
# Convert to array (n_subjects × n_vertices × n_times)
X = np.array([stc.data for stc in stcs])
# Get source space adjacency
src = mne.read_source_spaces('fsaverage-oct6-src.fiv')
adjacency = mne.spatial_src_adjacency(src)
# Run cluster test
T_obs, clusters, cluster_p_values, H0 = spatio_temporal_cluster_1samp_test(
X, adjacency=adjacency, n_permutations=1000, threshold=None, tail=1)
# Summarize significant clusters
significant_clusters = summarize_clusters_stc(
(T_obs, clusters, cluster_p_values, H0),
tstep=stcs[0].tstep, tmin=stcs[0].tmin, subject='fsaverage')
# Create source estimate with T-statistics
stc_T = mne.SourceEstimate(T_obs, vertices=stcs[0].vertices,
tmin=stcs[0].tmin, tstep=stcs[0].tstep,
subject='fsaverage')
stc_T.plot(subject='fsaverage', subjects_dir=subjects_dir)import mne
from mne.stats import fdr_correction, bonferroni_correction
import numpy as np
# Simulate p-values from multiple tests
n_tests = 1000
p_values = np.random.uniform(0, 1, n_tests)
p_values[:50] = np.random.uniform(0, 0.01, 50) # Add some "significant" values
# Apply FDR correction
reject_fdr, p_fdr = fdr_correction(p_values, alpha=0.05)
print(f"FDR: {np.sum(reject_fdr)} significant tests")
# Apply Bonferroni correction
reject_bonf, alpha_bonf = bonferroni_correction(p_values, alpha=0.05)
print(f"Bonferroni: {np.sum(reject_bonf)} significant tests")
print(f"Bonferroni corrected alpha: {alpha_bonf}")import mne
from mne.stats import linear_regression
import numpy as np
# Load epochs
epochs = mne.read_epochs('sample-epo.fiv')
# Create design matrix (e.g., stimulus intensity)
n_epochs = len(epochs)
stimulus_intensity = np.random.randn(n_epochs)
design_matrix = np.column_stack([np.ones(n_epochs), stimulus_intensity])
# Get data
data = epochs.get_data().reshape(n_epochs, -1) # Flatten channels and time
# Run regression
results = linear_regression(design_matrix, data,
names=['Intercept', 'Stimulus_Intensity'])
# Extract results
beta = results['beta'] # Regression coefficients
t_val = results['t_val'] # T-statistics
p_val = results['p_val'] # P-values
# Reshape back to original dimensions
n_channels, n_times = epochs.get_data().shape[1:]
t_val_reshaped = t_val[1].reshape(n_channels, n_times) # Stimulus effect
# Create evoked with T-statistics
evoked_t = mne.EvokedArray(t_val_reshaped, epochs.info, tmin=epochs.tmin)
evoked_t.plot_topomap(times=[0.1, 0.2, 0.3])import numpy as np
from typing import Union, Optional, List, Dict, Tuple, Any, Callable
ArrayLike = Union[np.ndarray, List, Tuple]Install with Tessl CLI
npx tessl i tessl/pypi-mne