0
# Analysis and Utilities
1
2
Advanced analysis functions for extracting morphological measurements from skeletons, creating oversegmented labels for proofreading workflows, and working with binary skeleton images.
3
4
## Imports
5
6
```python
7
from typing import Union, Dict, List, Tuple
8
import numpy as np
9
from osteoid import Skeleton
10
```
11
12
## Capabilities
13
14
### Cross-sectional Area Analysis
15
16
Computes cross-sectional areas along skeleton paths by analyzing the perpendicular cross-sections at each skeleton vertex.
17
18
```python { .api }
19
def cross_sectional_area(
20
all_labels: np.ndarray,
21
skeletons: Union[Dict[int, Skeleton], List[Skeleton], Skeleton],
22
anisotropy: np.ndarray = np.array([1,1,1], dtype=np.float32),
23
smoothing_window: int = 1,
24
progress: bool = False,
25
in_place: bool = False,
26
fill_holes: bool = False,
27
repair_contacts: bool = False,
28
visualize_section_planes: bool = False,
29
step: int = 1
30
) -> Union[Dict[int, Skeleton], List[Skeleton], Skeleton]:
31
"""
32
Compute cross-sectional areas along skeleton paths.
33
34
Cross-section planes are defined by normal vectors derived from
35
differences between adjacent vertices. The area is measured by
36
intersecting the object with planes perpendicular to the skeleton.
37
38
Parameters:
39
- all_labels (numpy.ndarray): Original labeled volume
40
- skeletons (dict/list/Skeleton): Skeleton(s) to analyze
41
- anisotropy (numpy.ndarray): Physical voxel dimensions (default: [1,1,1])
42
- smoothing_window (int): Rolling average window for plane normals (default: 1)
43
- progress (bool): Show progress bar (default: False)
44
- in_place (bool): Modify input skeletons in place (default: False)
45
- fill_holes (bool): Fill holes in shapes before analysis (default: False)
46
- repair_contacts (bool): Repair boundary contacts (default: False)
47
- visualize_section_planes (bool): Debug visualization (default: False)
48
- step (int): Sample every nth vertex (default: 1)
49
50
Returns:
51
dict/list/Skeleton: Skeletons with cross_sectional_area property added
52
53
Notes:
54
- Adds 'cross_sectional_area' property to skeleton vertices
55
- Adds 'cross_sectional_area_contacts' indicating boundary contacts
56
"""
57
```
58
59
#### Usage Example
60
61
```python
62
import kimimaro
63
import numpy as np
64
65
# Generate skeletons
66
labels = np.load("neurons.npy")
67
skeletons = kimimaro.skeletonize(labels, anisotropy=(16, 16, 40))
68
69
# Compute cross-sectional areas
70
analyzed_skeletons = kimimaro.cross_sectional_area(
71
labels,
72
skeletons,
73
anisotropy=np.array([16, 16, 40], dtype=np.float32),
74
smoothing_window=5, # Smooth normal vectors over 5 vertices
75
progress=True,
76
fill_holes=True, # Fill holes before measuring
77
step=2 # Sample every 2nd vertex for speed
78
)
79
80
# Access cross-sectional data
81
skeleton = analyzed_skeletons[neuron_id]
82
areas = skeleton.cross_sectional_area
83
contacts = skeleton.cross_sectional_area_contacts
84
85
print(f"Cross-sectional areas: {areas}")
86
print(f"Mean area: {np.mean(areas):.2f} nm²")
87
print(f"Boundary contacts: {np.sum(contacts)} vertices")
88
89
# Find thickest point
90
max_area_idx = np.argmax(areas)
91
thickest_vertex = skeleton.vertices[max_area_idx]
92
print(f"Thickest point at {thickest_vertex} with area {areas[max_area_idx]:.2f} nm²")
93
```
94
95
### Binary Skeleton Extraction
96
97
Extracts skeleton from a binary image that has already been processed by a thinning algorithm, converting it to Kimimaro's skeleton format.
98
99
```python { .api }
100
def extract_skeleton_from_binary_image(image: np.ndarray) -> Skeleton:
101
"""
102
Extract skeleton from binary image using edge detection.
103
104
Converts binary skeleton images (e.g., from Zhang-Suen thinning)
105
into Kimimaro Skeleton objects with vertices and edges.
106
107
Parameters:
108
- image (numpy.ndarray): Binary image with skeleton pixels = True/1
109
110
Returns:
111
Skeleton: Skeleton object with vertices and edges extracted from binary image
112
"""
113
```
114
115
#### Usage Example
116
117
```python
118
import kimimaro
119
import numpy as np
120
from skimage.morphology import skeletonize
121
122
# Create binary skeleton using scikit-image
123
binary_object = (labels == target_label)
124
binary_skeleton = skeletonize(binary_object)
125
126
# Convert to Kimimaro skeleton format
127
skeleton = kimimaro.extract_skeleton_from_binary_image(binary_skeleton)
128
129
print(f"Extracted {len(skeleton.vertices)} vertices")
130
print(f"Extracted {len(skeleton.edges)} edges")
131
132
# Can now use with other Kimimaro functions
133
processed_skeleton = kimimaro.postprocess(skeleton)
134
```
135
136
### Oversegmentation for Proofreading
137
138
Creates oversegmented labels from skeletons, useful for integrating with proofreading systems that work by merging atomic labels.
139
140
```python { .api }
141
def oversegment(
142
all_labels: np.ndarray,
143
skeletons: Union[Dict[int, Skeleton], List[Skeleton], Skeleton],
144
anisotropy: np.ndarray = np.array([1,1,1], dtype=np.float32),
145
progress: bool = False,
146
fill_holes: bool = False,
147
in_place: bool = False,
148
downsample: int = 0
149
) -> Tuple[np.ndarray, Union[Dict[int, Skeleton], List[Skeleton], Skeleton]]:
150
"""
151
Oversegment existing segmentations using skeleton data.
152
153
Creates atomic labels along skeleton paths that can be merged
154
during proofreading workflows. Each segment corresponds to a
155
portion of the skeleton path.
156
157
Parameters:
158
- all_labels (numpy.ndarray): Original labeled volume
159
- skeletons (Union[Dict[int, Skeleton], List[Skeleton], Skeleton]): Skeletons defining segmentation boundaries
160
- anisotropy (numpy.ndarray): Physical voxel dimensions (default: [1,1,1])
161
- progress (bool): Show progress bar (default: False)
162
- fill_holes (bool): Fill holes in shapes before processing (default: False)
163
- in_place (bool): Modify input arrays in place (default: False)
164
- downsample (int): Downsampling factor for skeleton sampling (default: 0)
165
166
Returns:
167
tuple: (oversegmented_labels, updated_skeletons)
168
- oversegmented_labels: New label array with atomic segments
169
- updated_skeletons: Skeletons with 'segments' property mapping vertices to labels
170
"""
171
```
172
173
#### Usage Example
174
175
```python
176
import kimimaro
177
import numpy as np
178
179
# Generate skeletons for proofreading workflow
180
labels = np.load("ai_segmentation.npy")
181
skeletons = kimimaro.skeletonize(
182
labels,
183
anisotropy=(16, 16, 40),
184
parallel=4
185
)
186
187
# Create oversegmented labels
188
oversegmented_labels, segmented_skeletons = kimimaro.oversegment(
189
labels,
190
skeletons,
191
anisotropy=(16, 16, 40),
192
downsample=5 # Create segments every 5 vertices
193
)
194
195
# The oversegmented labels now contain atomic segments
196
print(f"Original labels: {len(np.unique(labels))} objects")
197
print(f"Oversegmented labels: {len(np.unique(oversegmented_labels))} segments")
198
199
# Each skeleton now has segment information
200
skeleton = segmented_skeletons[neuron_id]
201
segment_labels = skeleton.segments
202
print(f"Skeleton spans {len(np.unique(segment_labels))} segments")
203
204
# Save for proofreading system
205
np.save("oversegmented_for_proofreading.npy", oversegmented_labels)
206
```
207
208
## Advanced Analysis Workflows
209
210
### Morphological Analysis Pipeline
211
212
```python
213
import kimimaro
214
import numpy as np
215
import matplotlib.pyplot as plt
216
217
# Complete morphological analysis
218
labels = np.load("neurons.npy")
219
anisotropy = (16, 16, 40) # nm per voxel
220
221
# 1. Skeletonization
222
skeletons = kimimaro.skeletonize(
223
labels,
224
anisotropy=anisotropy,
225
teasar_params={
226
"scale": 1.5,
227
"const": 300,
228
"soma_detection_threshold": 750,
229
},
230
parallel=4
231
)
232
233
# 2. Post-processing
234
cleaned_skeletons = {}
235
for label_id, skeleton in skeletons.items():
236
cleaned_skeletons[label_id] = kimimaro.postprocess(
237
skeleton,
238
dust_threshold=1500,
239
tick_threshold=3000
240
)
241
242
# 3. Cross-sectional analysis
243
analyzed_skeletons = kimimaro.cross_sectional_area(
244
labels,
245
cleaned_skeletons,
246
anisotropy=np.array(anisotropy, dtype=np.float32),
247
smoothing_window=5,
248
progress=True
249
)
250
251
# 4. Extract morphological measurements
252
morphology_data = {}
253
for label_id, skeleton in analyzed_skeletons.items():
254
morphology_data[label_id] = {
255
'cable_length': skeleton.cable_length(),
256
'n_vertices': len(skeleton.vertices),
257
'n_branches': len([v for v in skeleton.vertices
258
if len(skeleton.edges[v]) > 2]),
259
'mean_cross_section': np.mean(skeleton.cross_sectional_area),
260
'max_cross_section': np.max(skeleton.cross_sectional_area),
261
'volume_estimate': np.sum(skeleton.cross_sectional_area) *
262
np.mean(anisotropy)
263
}
264
265
# 5. Visualization and analysis
266
for label_id, metrics in morphology_data.items():
267
print(f"Neuron {label_id}:")
268
print(f" Cable length: {metrics['cable_length']:.1f} nm")
269
print(f" Branch points: {metrics['n_branches']}")
270
print(f" Mean diameter: {2 * np.sqrt(metrics['mean_cross_section']/np.pi):.1f} nm")
271
print(f" Volume estimate: {metrics['volume_estimate']:.0f} nm³")
272
```
273
274
### Quality Control and Validation
275
276
```python
277
import kimimaro
278
import numpy as np
279
280
def validate_skeleton_quality(skeleton, labels, label_id):
281
"""Quality control checks for generated skeletons."""
282
283
# Check connectivity
284
n_components = skeleton.n_components
285
if n_components > 1:
286
print(f"Warning: Skeleton {label_id} has {n_components} components")
287
288
# Check for loops
289
cycles = skeleton.cycles()
290
if len(cycles) > 0:
291
print(f"Warning: Skeleton {label_id} has {len(cycles)} loops")
292
293
# Check coverage
294
skeleton_volume = np.sum(skeleton.cross_sectional_area) if hasattr(skeleton, 'cross_sectional_area') else None
295
object_volume = np.sum(labels == label_id)
296
if skeleton_volume:
297
coverage_ratio = skeleton_volume / object_volume
298
if coverage_ratio < 0.1:
299
print(f"Warning: Low coverage ratio {coverage_ratio:.3f} for skeleton {label_id}")
300
301
# Check for reasonable branch density
302
cable_length = skeleton.cable_length()
303
n_branches = len([v for v in skeleton.vertices if len(skeleton.edges[v]) > 2])
304
if cable_length > 0:
305
branch_density = n_branches / cable_length * 1000 # branches per micron
306
if branch_density > 10:
307
print(f"Warning: High branch density {branch_density:.2f}/μm for skeleton {label_id}")
308
309
# Apply quality control
310
for label_id, skeleton in analyzed_skeletons.items():
311
validate_skeleton_quality(skeleton, labels, label_id)
312
```
313
314
## Integration with External Tools
315
316
### Export for Visualization
317
318
```python
319
# Export skeletons in various formats
320
for label_id, skeleton in analyzed_skeletons.items():
321
# SWC format for neuromorphology tools
322
with open(f"neuron_{label_id}.swc", "w") as f:
323
f.write(skeleton.to_swc())
324
325
# JSON format for web visualization
326
skeleton_data = {
327
'vertices': skeleton.vertices.tolist(),
328
'edges': skeleton.edges,
329
'cross_sectional_area': skeleton.cross_sectional_area.tolist()
330
if hasattr(skeleton, 'cross_sectional_area') else None
331
}
332
333
import json
334
with open(f"neuron_{label_id}.json", "w") as f:
335
json.dump(skeleton_data, f)
336
```
337
338
### Integration with Connectomics Pipelines
339
340
```python
341
# Prepare data for connectomics analysis
342
import pandas as pd
343
344
# Create connectivity matrix based on skeleton proximity
345
def compute_connectivity_matrix(skeletons, proximity_threshold=1000):
346
"""Compute potential connections between skeletons."""
347
labels = list(skeletons.keys())
348
n_neurons = len(labels)
349
connectivity = np.zeros((n_neurons, n_neurons))
350
351
for i, label_a in enumerate(labels):
352
for j, label_b in enumerate(labels):
353
if i != j:
354
skel_a = skeletons[label_a]
355
skel_b = skeletons[label_b]
356
# Compute minimum distance between skeletons
357
min_dist = compute_skeleton_distance(skel_a, skel_b)
358
if min_dist < proximity_threshold:
359
connectivity[i, j] = 1.0 / min_dist
360
361
return pd.DataFrame(connectivity, index=labels, columns=labels)
362
363
connectivity_matrix = compute_connectivity_matrix(analyzed_skeletons)
364
print("Potential connections based on skeleton proximity:")
365
print(connectivity_matrix)
366
```