Skeletonize densely labeled image volumes using TEASAR-derived algorithms for neuroscience and connectomics research.
—
Advanced analysis functions for extracting morphological measurements from skeletons, creating oversegmented labels for proofreading workflows, and working with binary skeleton images.
from typing import Union, Dict, List, Tuple
import numpy as np
from osteoid import SkeletonComputes cross-sectional areas along skeleton paths by analyzing the perpendicular cross-sections at each skeleton vertex.
def cross_sectional_area(
all_labels: np.ndarray,
skeletons: Union[Dict[int, Skeleton], List[Skeleton], Skeleton],
anisotropy: np.ndarray = np.array([1,1,1], dtype=np.float32),
smoothing_window: int = 1,
progress: bool = False,
in_place: bool = False,
fill_holes: bool = False,
repair_contacts: bool = False,
visualize_section_planes: bool = False,
step: int = 1
) -> Union[Dict[int, Skeleton], List[Skeleton], Skeleton]:
"""
Compute cross-sectional areas along skeleton paths.
Cross-section planes are defined by normal vectors derived from
differences between adjacent vertices. The area is measured by
intersecting the object with planes perpendicular to the skeleton.
Parameters:
- all_labels (numpy.ndarray): Original labeled volume
- skeletons (dict/list/Skeleton): Skeleton(s) to analyze
- anisotropy (numpy.ndarray): Physical voxel dimensions (default: [1,1,1])
- smoothing_window (int): Rolling average window for plane normals (default: 1)
- progress (bool): Show progress bar (default: False)
- in_place (bool): Modify input skeletons in place (default: False)
- fill_holes (bool): Fill holes in shapes before analysis (default: False)
- repair_contacts (bool): Repair boundary contacts (default: False)
- visualize_section_planes (bool): Debug visualization (default: False)
- step (int): Sample every nth vertex (default: 1)
Returns:
dict/list/Skeleton: Skeletons with cross_sectional_area property added
Notes:
- Adds 'cross_sectional_area' property to skeleton vertices
- Adds 'cross_sectional_area_contacts' indicating boundary contacts
"""import kimimaro
import numpy as np
# Generate skeletons
labels = np.load("neurons.npy")
skeletons = kimimaro.skeletonize(labels, anisotropy=(16, 16, 40))
# Compute cross-sectional areas
analyzed_skeletons = kimimaro.cross_sectional_area(
labels,
skeletons,
anisotropy=np.array([16, 16, 40], dtype=np.float32),
smoothing_window=5, # Smooth normal vectors over 5 vertices
progress=True,
fill_holes=True, # Fill holes before measuring
step=2 # Sample every 2nd vertex for speed
)
# Access cross-sectional data
skeleton = analyzed_skeletons[neuron_id]
areas = skeleton.cross_sectional_area
contacts = skeleton.cross_sectional_area_contacts
print(f"Cross-sectional areas: {areas}")
print(f"Mean area: {np.mean(areas):.2f} nm²")
print(f"Boundary contacts: {np.sum(contacts)} vertices")
# Find thickest point
max_area_idx = np.argmax(areas)
thickest_vertex = skeleton.vertices[max_area_idx]
print(f"Thickest point at {thickest_vertex} with area {areas[max_area_idx]:.2f} nm²")Extracts skeleton from a binary image that has already been processed by a thinning algorithm, converting it to Kimimaro's skeleton format.
def extract_skeleton_from_binary_image(image: np.ndarray) -> Skeleton:
"""
Extract skeleton from binary image using edge detection.
Converts binary skeleton images (e.g., from Zhang-Suen thinning)
into Kimimaro Skeleton objects with vertices and edges.
Parameters:
- image (numpy.ndarray): Binary image with skeleton pixels = True/1
Returns:
Skeleton: Skeleton object with vertices and edges extracted from binary image
"""import kimimaro
import numpy as np
from skimage.morphology import skeletonize
# Create binary skeleton using scikit-image
binary_object = (labels == target_label)
binary_skeleton = skeletonize(binary_object)
# Convert to Kimimaro skeleton format
skeleton = kimimaro.extract_skeleton_from_binary_image(binary_skeleton)
print(f"Extracted {len(skeleton.vertices)} vertices")
print(f"Extracted {len(skeleton.edges)} edges")
# Can now use with other Kimimaro functions
processed_skeleton = kimimaro.postprocess(skeleton)Creates oversegmented labels from skeletons, useful for integrating with proofreading systems that work by merging atomic labels.
def oversegment(
all_labels: np.ndarray,
skeletons: Union[Dict[int, Skeleton], List[Skeleton], Skeleton],
anisotropy: np.ndarray = np.array([1,1,1], dtype=np.float32),
progress: bool = False,
fill_holes: bool = False,
in_place: bool = False,
downsample: int = 0
) -> Tuple[np.ndarray, Union[Dict[int, Skeleton], List[Skeleton], Skeleton]]:
"""
Oversegment existing segmentations using skeleton data.
Creates atomic labels along skeleton paths that can be merged
during proofreading workflows. Each segment corresponds to a
portion of the skeleton path.
Parameters:
- all_labels (numpy.ndarray): Original labeled volume
- skeletons (Union[Dict[int, Skeleton], List[Skeleton], Skeleton]): Skeletons defining segmentation boundaries
- anisotropy (numpy.ndarray): Physical voxel dimensions (default: [1,1,1])
- progress (bool): Show progress bar (default: False)
- fill_holes (bool): Fill holes in shapes before processing (default: False)
- in_place (bool): Modify input arrays in place (default: False)
- downsample (int): Downsampling factor for skeleton sampling (default: 0)
Returns:
tuple: (oversegmented_labels, updated_skeletons)
- oversegmented_labels: New label array with atomic segments
- updated_skeletons: Skeletons with 'segments' property mapping vertices to labels
"""import kimimaro
import numpy as np
# Generate skeletons for proofreading workflow
labels = np.load("ai_segmentation.npy")
skeletons = kimimaro.skeletonize(
labels,
anisotropy=(16, 16, 40),
parallel=4
)
# Create oversegmented labels
oversegmented_labels, segmented_skeletons = kimimaro.oversegment(
labels,
skeletons,
anisotropy=(16, 16, 40),
downsample=5 # Create segments every 5 vertices
)
# The oversegmented labels now contain atomic segments
print(f"Original labels: {len(np.unique(labels))} objects")
print(f"Oversegmented labels: {len(np.unique(oversegmented_labels))} segments")
# Each skeleton now has segment information
skeleton = segmented_skeletons[neuron_id]
segment_labels = skeleton.segments
print(f"Skeleton spans {len(np.unique(segment_labels))} segments")
# Save for proofreading system
np.save("oversegmented_for_proofreading.npy", oversegmented_labels)import kimimaro
import numpy as np
import matplotlib.pyplot as plt
# Complete morphological analysis
labels = np.load("neurons.npy")
anisotropy = (16, 16, 40) # nm per voxel
# 1. Skeletonization
skeletons = kimimaro.skeletonize(
labels,
anisotropy=anisotropy,
teasar_params={
"scale": 1.5,
"const": 300,
"soma_detection_threshold": 750,
},
parallel=4
)
# 2. Post-processing
cleaned_skeletons = {}
for label_id, skeleton in skeletons.items():
cleaned_skeletons[label_id] = kimimaro.postprocess(
skeleton,
dust_threshold=1500,
tick_threshold=3000
)
# 3. Cross-sectional analysis
analyzed_skeletons = kimimaro.cross_sectional_area(
labels,
cleaned_skeletons,
anisotropy=np.array(anisotropy, dtype=np.float32),
smoothing_window=5,
progress=True
)
# 4. Extract morphological measurements
morphology_data = {}
for label_id, skeleton in analyzed_skeletons.items():
morphology_data[label_id] = {
'cable_length': skeleton.cable_length(),
'n_vertices': len(skeleton.vertices),
'n_branches': len([v for v in skeleton.vertices
if len(skeleton.edges[v]) > 2]),
'mean_cross_section': np.mean(skeleton.cross_sectional_area),
'max_cross_section': np.max(skeleton.cross_sectional_area),
'volume_estimate': np.sum(skeleton.cross_sectional_area) *
np.mean(anisotropy)
}
# 5. Visualization and analysis
for label_id, metrics in morphology_data.items():
print(f"Neuron {label_id}:")
print(f" Cable length: {metrics['cable_length']:.1f} nm")
print(f" Branch points: {metrics['n_branches']}")
print(f" Mean diameter: {2 * np.sqrt(metrics['mean_cross_section']/np.pi):.1f} nm")
print(f" Volume estimate: {metrics['volume_estimate']:.0f} nm³")import kimimaro
import numpy as np
def validate_skeleton_quality(skeleton, labels, label_id):
"""Quality control checks for generated skeletons."""
# Check connectivity
n_components = skeleton.n_components
if n_components > 1:
print(f"Warning: Skeleton {label_id} has {n_components} components")
# Check for loops
cycles = skeleton.cycles()
if len(cycles) > 0:
print(f"Warning: Skeleton {label_id} has {len(cycles)} loops")
# Check coverage
skeleton_volume = np.sum(skeleton.cross_sectional_area) if hasattr(skeleton, 'cross_sectional_area') else None
object_volume = np.sum(labels == label_id)
if skeleton_volume:
coverage_ratio = skeleton_volume / object_volume
if coverage_ratio < 0.1:
print(f"Warning: Low coverage ratio {coverage_ratio:.3f} for skeleton {label_id}")
# Check for reasonable branch density
cable_length = skeleton.cable_length()
n_branches = len([v for v in skeleton.vertices if len(skeleton.edges[v]) > 2])
if cable_length > 0:
branch_density = n_branches / cable_length * 1000 # branches per micron
if branch_density > 10:
print(f"Warning: High branch density {branch_density:.2f}/μm for skeleton {label_id}")
# Apply quality control
for label_id, skeleton in analyzed_skeletons.items():
validate_skeleton_quality(skeleton, labels, label_id)# Export skeletons in various formats
for label_id, skeleton in analyzed_skeletons.items():
# SWC format for neuromorphology tools
with open(f"neuron_{label_id}.swc", "w") as f:
f.write(skeleton.to_swc())
# JSON format for web visualization
skeleton_data = {
'vertices': skeleton.vertices.tolist(),
'edges': skeleton.edges,
'cross_sectional_area': skeleton.cross_sectional_area.tolist()
if hasattr(skeleton, 'cross_sectional_area') else None
}
import json
with open(f"neuron_{label_id}.json", "w") as f:
json.dump(skeleton_data, f)# Prepare data for connectomics analysis
import pandas as pd
# Create connectivity matrix based on skeleton proximity
def compute_connectivity_matrix(skeletons, proximity_threshold=1000):
"""Compute potential connections between skeletons."""
labels = list(skeletons.keys())
n_neurons = len(labels)
connectivity = np.zeros((n_neurons, n_neurons))
for i, label_a in enumerate(labels):
for j, label_b in enumerate(labels):
if i != j:
skel_a = skeletons[label_a]
skel_b = skeletons[label_b]
# Compute minimum distance between skeletons
min_dist = compute_skeleton_distance(skel_a, skel_b)
if min_dist < proximity_threshold:
connectivity[i, j] = 1.0 / min_dist
return pd.DataFrame(connectivity, index=labels, columns=labels)
connectivity_matrix = compute_connectivity_matrix(analyzed_skeletons)
print("Potential connections based on skeleton proximity:")
print(connectivity_matrix)Install with Tessl CLI
npx tessl i tessl/pypi-kimimaro