Comprehensive Python library for diffusion MRI analysis including tensor imaging, tractography, and visualization
—
Statistical analysis tools for diffusion metrics, tractometry analysis, and group-level comparisons of white matter properties. DIPY provides comprehensive methods for analyzing tract-specific measurements and performing statistical comparisons.
Automated Fiber Quantification (AFQ) approach for tract-based analysis along streamline bundles.
def afq_profile(data, bundle, affine=None, n_points=100, weights=None):
"""
Calculate AFQ-style tract profile along bundle.
Parameters:
data (array): 3D scalar data (e.g., FA, MD)
bundle (list): streamlines defining the tract
affine (array): voxel-to-world transformation matrix
n_points (int): number of points for profile sampling
weights (array): weights for each streamline
Returns:
array: tract profile values along bundle
"""
def gaussian_weights(bundle, n_points=100, return_mahalnobis=False, sphere=None):
"""
Calculate Gaussian weights for streamlines based on trajectory.
Parameters:
bundle (list): streamlines in bundle
n_points (int): number of sampling points
return_mahalnobis (bool): return Mahalanobis distances
sphere (Sphere): sphere for directional weighting
Returns:
array: Gaussian weights for each streamline
"""
def values_from_volume(data, streamlines, affine=None, order=1):
"""
Extract values from volume along streamlines.
Parameters:
data (array): 3D volume data
streamlines (list): streamline coordinates
affine (array): transformation matrix
order (int): interpolation order (0=nearest, 1=linear)
Returns:
list: values along each streamline
"""Tools for analyzing tract profiles and performing statistical comparisons across subjects or groups.
class TractProfile:
"""Container for tract profile data and analysis."""
def __init__(self, data, bundle, affine=None, n_points=100):
"""
Initialize tract profile analysis.
Parameters:
data (array): scalar diffusion data
bundle (list): tract streamlines
affine (array): coordinate transformation
n_points (int): profile sampling points
"""
def get_profile(self, weights=None):
"""
Calculate tract profile.
Parameters:
weights (array): streamline weights
Returns:
array: profile values along tract
"""
def bootstrap_profile(self, n_bootstrap=1000):
"""
Generate bootstrap confidence intervals for profile.
Parameters:
n_bootstrap (int): number of bootstrap samples
Returns:
tuple: (mean_profile, confidence_intervals)
"""
def mean_profile(profiles):
"""
Calculate mean profile across subjects.
Parameters:
profiles (list): list of individual tract profiles
Returns:
array: mean profile values
"""
def profile_variance(profiles):
"""
Calculate variance of profiles across subjects.
Parameters:
profiles (list): individual profiles
Returns:
array: variance along tract
"""Statistical measures for analyzing bundle properties and tract characteristics.
def bundle_analysis(bundle, affine=None):
"""
Comprehensive bundle analysis including geometric properties.
Parameters:
bundle (list): streamlines in bundle
affine (array): coordinate transformation
Returns:
dict: bundle statistics (length, volume, density, etc.)
"""
def principal_components_analysis(streamlines):
"""
Perform PCA on streamline bundle.
Parameters:
streamlines (list): bundle streamlines
Returns:
tuple: (eigenvalues, eigenvectors, mean_streamline)
"""
def streamline_variance(streamlines):
"""
Calculate variance within streamline bundle.
Parameters:
streamlines (list): bundle streamlines
Returns:
array: variance along streamline length
"""
def bundle_coherence(bundle, metric='angular'):
"""
Measure bundle coherence using directional consistency.
Parameters:
bundle (list): streamlines
metric (str): coherence metric ('angular', 'spatial')
Returns:
float: coherence measure (0-1)
"""Tools for group-level statistical analysis and comparison of tract properties.
class GroupAnalysis:
"""Group-level tract analysis."""
def __init__(self, profiles_list, group_labels=None):
"""
Initialize group analysis.
Parameters:
profiles_list (list): list of profiles for each subject
group_labels (array): group membership labels
"""
def ttest(self, group1_indices, group2_indices):
"""
Perform t-test between two groups.
Parameters:
group1_indices (array): indices for group 1
group2_indices (array): indices for group 2
Returns:
tuple: (t_statistics, p_values)
"""
def anova(self):
"""
Perform ANOVA across groups.
Returns:
tuple: (F_statistics, p_values)
"""
def permutation_test(group1_profiles, group2_profiles, n_permutations=10000, metric='mean_diff'):
"""
Non-parametric permutation test for group differences.
Parameters:
group1_profiles (list): profiles for group 1
group2_profiles (list): profiles for group 2
n_permutations (int): number of permutations
metric (str): test statistic metric
Returns:
tuple: (observed_statistic, p_value, null_distribution)
"""
def effect_size(group1_profiles, group2_profiles, metric='cohens_d'):
"""
Calculate effect size between groups.
Parameters:
group1_profiles (list): group 1 data
group2_profiles (list): group 2 data
metric (str): effect size metric
Returns:
array: effect sizes along tract
"""Tools for analyzing relationships between tract properties and behavioral measures.
def tract_behavior_correlation(tract_profiles, behavior_scores, method='pearson'):
"""
Correlate tract profiles with behavioral measures.
Parameters:
tract_profiles (array): tract profile data (subjects x points)
behavior_scores (array): behavioral scores for each subject
method (str): correlation method ('pearson', 'spearman')
Returns:
tuple: (correlation_coefficients, p_values)
"""
def partial_correlation(tract_profiles, behavior_scores, covariates):
"""
Partial correlation controlling for covariates.
Parameters:
tract_profiles (array): tract data
behavior_scores (array): behavioral measures
covariates (array): covariate data
Returns:
tuple: (partial_correlations, p_values)
"""
class LinearModel:
"""General linear model for tract analysis."""
def __init__(self, design_matrix):
"""
Initialize GLM with design matrix.
Parameters:
design_matrix (array): design matrix (subjects x predictors)
"""
def fit(self, tract_data):
"""
Fit model to tract data.
Parameters:
tract_data (array): tract profiles (subjects x points)
Returns:
dict: fit results with coefficients and statistics
"""Tools for assessing measurement reliability and reproducibility across sessions.
def test_retest_reliability(profiles_session1, profiles_session2, metric='icc'):
"""
Calculate test-retest reliability.
Parameters:
profiles_session1 (list): profiles from session 1
profiles_session2 (list): profiles from session 2
metric (str): reliability metric ('icc', 'pearson')
Returns:
array: reliability coefficients along tract
"""
def measurement_error(repeated_profiles):
"""
Estimate measurement error from repeated measurements.
Parameters:
repeated_profiles (list): list of repeated profile measurements
Returns:
tuple: (measurement_error, confidence_intervals)
"""
def bland_altman_analysis(method1_profiles, method2_profiles):
"""
Bland-Altman analysis comparing two measurement methods.
Parameters:
method1_profiles (array): profiles from method 1
method2_profiles (array): profiles from method 2
Returns:
dict: bias, limits of agreement, and statistics
"""# AFQ-style tractometry analysis
from dipy.stats.analysis import afq_profile, gaussian_weights
from dipy.data import read_stanford_hardi, read_bundles_2_subjects
from dipy.reconst.dti import TensorModel
import numpy as np
# Load data
img, gtab = read_stanford_hardi()
data = img.get_fdata()
# Compute FA
tensor_model = TensorModel(gtab)
tensor_fit = tensor_model.fit(data)
fa = tensor_fit.fa
# Load bundle
bundle1, bundle2 = read_bundles_2_subjects()
# Calculate tract profile
profile = afq_profile(fa, bundle1, affine=img.affine, n_points=100)
print(f"Tract profile shape: {profile.shape}")
print(f"Mean FA along tract: {profile.mean():.3f}")
# Calculate Gaussian weights for bundle core
weights = gaussian_weights(bundle1, n_points=100)
weighted_profile = afq_profile(fa, bundle1, affine=img.affine, weights=weights)
# Group analysis example
from dipy.stats.analysis import GroupAnalysis, permutation_test
# Simulate group data
n_subjects_group1 = 20
n_subjects_group2 = 18
group1_profiles = [profile + np.random.normal(0, 0.01, len(profile))
for _ in range(n_subjects_group1)]
group2_profiles = [profile + np.random.normal(0.02, 0.01, len(profile))
for _ in range(n_subjects_group2)]
# Perform permutation test
observed_stat, p_value, null_dist = permutation_test(
group1_profiles, group2_profiles, n_permutations=5000
)
print(f"Group difference p-value: {p_value:.4f}")
# Correlation with behavior
behavior_scores = np.random.normal(100, 15, n_subjects_group1 + n_subjects_group2)
all_profiles = np.array(group1_profiles + group2_profiles)
from dipy.stats.analysis import tract_behavior_correlation
correlations, p_vals = tract_behavior_correlation(all_profiles, behavior_scores)
# Find significant correlations (FDR corrected)
from statsmodels.stats.multitest import fdrcorrection
significant_mask, p_corrected = fdrcorrection(p_vals, alpha=0.05)
print(f"Significant correlations: {significant_mask.sum()} points out of {len(p_vals)}")
# Test-retest reliability
session2_profiles = [prof + np.random.normal(0, 0.005, len(prof))
for prof in group1_profiles[:10]]
from dipy.stats.analysis import test_retest_reliability
reliability = test_retest_reliability(group1_profiles[:10], session2_profiles)
print(f"Mean test-retest reliability: {reliability.mean():.3f}")
# Bundle statistics
from dipy.stats.analysis import bundle_analysis, bundle_coherence
bundle_stats = bundle_analysis(bundle1)
coherence = bundle_coherence(bundle1, metric='angular')
print(f"Bundle coherence: {coherence:.3f}")
print(f"Bundle statistics: {bundle_stats}")Install with Tessl CLI
npx tessl i tessl/pypi-dipy