Comprehensive image processing and computer vision library for Python with algorithms for filtering, morphology, segmentation, and feature detection
—
Feature detection and description algorithms for computer vision applications. Includes corner detection, blob detection, texture analysis, keypoint descriptors, and feature matching capabilities.
Detect and analyze corner points using various corner detection algorithms.
def corner_harris(image, method='k', k=0.05, eps=1e-6, sigma=1):
"""
Apply Harris corner detection.
Parameters:
image : array_like
Input grayscale image
method : str, optional
Method for corner response ('k' or 'eps')
k : float, optional
Harris detector sensitivity parameter
eps : float, optional
Epsilon for method='eps'
sigma : float, optional
Standard deviation of Gaussian kernel
Returns:
ndarray
Harris corner response image
"""
def corner_shi_tomasi(image, sigma=1):
"""
Apply Shi-Tomasi corner detection.
Parameters:
image : array_like
Input grayscale image
sigma : float, optional
Standard deviation of Gaussian kernel
Returns:
ndarray
Shi-Tomasi corner response image
"""
def corner_foerstner(image, sigma=1):
"""
Apply Foerstner corner detection.
Parameters:
image : array_like
Input grayscale image
sigma : float, optional
Standard deviation of Gaussian kernel
Returns:
tuple
(corner_response, roundness, accuracy)
"""
def corner_kitchen_rosenfeld(image, mode='constant', cval=0):
"""
Apply Kitchen-Rosenfeld corner detection.
Parameters:
image : array_like
Input grayscale image
mode : str, optional
Boundary condition mode
cval : float, optional
Value for constant mode
Returns:
ndarray
Corner response image
"""
def corner_fast(image, n=12, threshold=0.15, nonmax_suppression=True):
"""
Apply FAST corner detection.
Parameters:
image : array_like
Input grayscale image
n : int, optional
Number of consecutive pixels for corner detection
threshold : float, optional
Intensity threshold for corner detection
nonmax_suppression : bool, optional
Whether to apply non-maximum suppression
Returns:
ndarray
Corner coordinates array
"""
def corner_moravec(image, window_size=1):
"""
Apply Moravec corner detection.
Parameters:
image : array_like
Input grayscale image
window_size : int, optional
Size of sliding window
Returns:
ndarray
Moravec corner response image
"""
def corner_peaks(image, min_distance=1, threshold_abs=None, threshold_rel=None, exclude_border=True, num_peaks=np.inf, footprint=None, labels=None):
"""
Find corner peaks in corner response image.
Parameters:
image : array_like
Corner response image
min_distance : int, optional
Minimum distance between peaks
threshold_abs : float, optional
Minimum absolute intensity
threshold_rel : float, optional
Minimum relative intensity
exclude_border : bool, optional
Whether to exclude border
num_peaks : int, optional
Maximum number of peaks
footprint : array_like, optional
Footprint for peak detection
labels : array_like, optional
Labeled regions
Returns:
ndarray
Corner coordinates array
"""
def corner_subpix(image, corners, window_size=11, alpha=0.99):
"""
Refine corner coordinates to subpixel accuracy.
Parameters:
image : array_like
Input grayscale image
corners : array_like
Corner coordinates
window_size : int, optional
Size of neighborhood window
alpha : float, optional
Significance level for termination
Returns:
ndarray
Refined corner coordinates
"""
def corner_orientations(image, corners, mask):
"""
Compute corner orientations.
Parameters:
image : array_like
Input grayscale image
corners : array_like
Corner coordinates
mask : array_like
Circular mask for orientation computation
Returns:
ndarray
Corner orientations in radians
"""Detect blob-like structures using scale-space methods.
def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=0.1, overlap=0.5, exclude_border=True):
"""
Apply Difference of Gaussian blob detection.
Parameters:
image : array_like
Input grayscale image
min_sigma : float, optional
Minimum standard deviation
max_sigma : float, optional
Maximum standard deviation
sigma_ratio : float, optional
Ratio between successive sigma values
threshold : float, optional
Detection threshold
overlap : float, optional
Maximum overlap between blobs
exclude_border : bool, optional
Whether to exclude border
Returns:
ndarray
Blob coordinates and scales (row, col, sigma)
"""
def blob_log(image, min_sigma=1, max_sigma=50, num_sigma=10, threshold=0.1, overlap=0.5, log_scale=False, exclude_border=True):
"""
Apply Laplacian of Gaussian blob detection.
Parameters:
image : array_like
Input grayscale image
min_sigma : float, optional
Minimum standard deviation
max_sigma : float, optional
Maximum standard deviation
num_sigma : int, optional
Number of sigma values
threshold : float, optional
Detection threshold
overlap : float, optional
Maximum overlap between blobs
log_scale : bool, optional
Whether to use logarithmic scale
exclude_border : bool, optional
Whether to exclude border
Returns:
ndarray
Blob coordinates and scales (row, col, sigma)
"""
def blob_doh(image, min_sigma=1, max_sigma=30, num_sigma=10, threshold=0.01, overlap=0.5, log_scale=False, exclude_border=True):
"""
Apply Determinant of Hessian blob detection.
Parameters:
image : array_like
Input grayscale image
min_sigma : float, optional
Minimum standard deviation
max_sigma : float, optional
Maximum standard deviation
num_sigma : int, optional
Number of sigma values
threshold : float, optional
Detection threshold
overlap : float, optional
Maximum overlap between blobs
log_scale : bool, optional
Whether to use logarithmic scale
exclude_border : bool, optional
Whether to exclude border
Returns:
ndarray
Blob coordinates and scales (row, col, sigma)
"""Object-oriented interface for feature detection and description.
class ORB:
"""
Oriented FAST and Rotated BRIEF feature detector and descriptor.
Parameters:
n_keypoints : int, optional
Maximum number of keypoints
fast_n : int, optional
FAST parameter n
fast_threshold : float, optional
FAST intensity threshold
harris_k : float, optional
Harris corner detection parameter
pyramid_levels : int, optional
Number of pyramid levels
downscale : float, optional
Downscaling factor between pyramid levels
"""
def __init__(self, n_keypoints=500, fast_n=9, fast_threshold=0.08, harris_k=0.04, pyramid_levels=8, downscale=1.2):
pass
def detect(self, image):
"""
Detect keypoints.
Parameters:
image : array_like
Input grayscale image
Returns:
ndarray
Keypoint coordinates
"""
pass
def extract(self, image, keypoints):
"""
Extract descriptors.
Parameters:
image : array_like
Input grayscale image
keypoints : array_like
Keypoint coordinates
Returns:
ndarray
Binary descriptors
"""
pass
def detect_and_extract(self, image):
"""
Detect keypoints and extract descriptors.
Parameters:
image : array_like
Input grayscale image
Returns:
tuple
(keypoints, descriptors)
"""
pass
class BRIEF:
"""
Binary Robust Independent Elementary Features descriptor.
Parameters:
descriptor_size : int, optional
Length of descriptor in bits
patch_size : int, optional
Size of sampling patch
mode : str, optional
Sampling pattern mode
sample_seed : int, optional
Random seed for sampling pattern
sigma : float, optional
Standard deviation for Gaussian sampling
"""
def __init__(self, descriptor_size=256, patch_size=49, mode='normal', sample_seed=1, sigma=1):
pass
def extract(self, image, keypoints):
"""
Extract BRIEF descriptors.
Parameters:
image : array_like
Input grayscale image
keypoints : array_like
Keypoint coordinates
Returns:
ndarray
Binary descriptors
"""
pass
class SIFT:
"""
Scale-Invariant Feature Transform detector and descriptor.
Parameters:
upsampling : int, optional
Upsampling factor for input image
n_octaves : int, optional
Number of octaves
n_scales : int, optional
Number of scales per octave
sigma_min : float, optional
Minimum scale
sigma_in : float, optional
Assumed blur of input image
c_dog : float, optional
Threshold for DoG response
c_edge : float, optional
Threshold for edge response
n_bins : int, optional
Number of bins in orientation histogram
lambda_ori : float, optional
Peak threshold for orientation assignment
c_max : float, optional
Maximum corner response
lambda_descr : float, optional
Non-maximum suppression threshold
n_hist : int, optional
Number of histograms
n_ori : int, optional
Number of orientations
"""
def __init__(self, upsampling=2, n_octaves=8, n_scales=3, sigma_min=1.6, sigma_in=0.5, c_dog=0.013333333333333334, c_edge=10, n_bins=36, lambda_ori=1.5, c_max=0.8, lambda_descr=6, n_hist=4, n_ori=8):
pass
def detect_and_extract(self, image):
"""
Detect keypoints and extract descriptors.
Parameters:
image : array_like
Input grayscale image
Returns:
tuple
(keypoints, descriptors)
"""
pass
class CENSURE:
"""
CENter SURround Extremas feature detector.
Parameters:
min_scale : int, optional
Minimum octave scale
max_scale : int, optional
Maximum octave scale
mode : str, optional
Detection mode
threshold : float, optional
Feature response threshold
non_max_threshold : float, optional
Non-maximum suppression threshold
line_threshold : float, optional
Line response threshold
"""
def __init__(self, min_scale=1, max_scale=7, mode='DoB', threshold=0.15, non_max_threshold=0.15, line_threshold=10):
pass
def detect(self, image):
"""
Detect keypoints.
Parameters:
image : array_like
Input grayscale image
Returns:
ndarray
Keypoint coordinates and scales
"""
passMatch feature descriptors between images for correspondence and registration.
def match_descriptors(descriptors1, descriptors2, metric='euclidean', p=2, max_distance=np.inf, cross_check=True, max_ratio=1.0):
"""
Match feature descriptors between two images.
Parameters:
descriptors1 : array_like
First set of descriptors
descriptors2 : array_like
Second set of descriptors
metric : str, optional
Distance metric ('euclidean', 'cityblock', 'minkowski', 'hamming')
p : float, optional
Parameter for Minkowski metric
max_distance : float, optional
Maximum distance for matches
cross_check : bool, optional
Whether to apply cross-check filtering
max_ratio : float, optional
Maximum ratio for Lowe's ratio test
Returns:
ndarray
Match indices
"""
def plot_matches(ax, image1, image2, keypoints1, keypoints2, matches, keypoints_color='k', matches_color=None, only_matches=False):
"""
Plot matched features between two images.
Parameters:
ax : matplotlib axis
Matplotlib axis for plotting
image1 : array_like
First image
image2 : array_like
Second image
keypoints1 : array_like
Keypoints from first image
keypoints2 : array_like
Keypoints from second image
matches : array_like
Match indices
keypoints_color : color, optional
Color for keypoint markers
matches_color : color, optional
Color for match lines
only_matches : bool, optional
Whether to show only matched keypoints
"""
passAnalyze texture patterns using statistical and structural methods.
def local_binary_pattern(image, P, R, method='default'):
"""
Compute Local Binary Pattern for texture analysis.
Parameters:
image : array_like
Input grayscale image
P : int
Number of sample points
R : float
Radius of sample circle
method : str, optional
LBP method ('default', 'ror', 'uniform', 'nri_uniform', 'var')
Returns:
ndarray
LBP pattern image
"""
def graycomatrix(image, distances, angles, levels=None, symmetric=False, normed=False):
"""
Compute Gray-Level Co-occurrence Matrix.
Parameters:
image : array_like
Input integer image
distances : array_like
Pixel pair distance offsets
angles : array_like
Pixel pair angles in radians
levels : int, optional
Number of gray levels
symmetric : bool, optional
Whether matrix is symmetric
normed : bool, optional
Whether to normalize matrix
Returns:
ndarray
Gray-level co-occurrence matrix
"""
def graycoprops(P, prop='contrast'):
"""
Compute texture properties from Gray-Level Co-occurrence Matrix.
Parameters:
P : array_like
Gray-level co-occurrence matrix
prop : str, optional
Property to compute ('contrast', 'dissimilarity', 'homogeneity', 'ASM', 'energy', 'correlation')
Returns:
ndarray
Texture property values
"""
def multiblock_lbp(image, r, c, width, height):
"""
Compute Multi-block Local Binary Pattern.
Parameters:
image : array_like
Input grayscale image
r : int
Row coordinate of top-left corner
c : int
Column coordinate of top-left corner
width : int
Width of blocks
height : int
Height of blocks
Returns:
int
Multi-block LBP value
"""Generate advanced feature descriptors for complex recognition tasks.
def daisy(image, step=4, radius=15, rings=2, histograms=8, orientations=8, normalization='l1', sigmas=None, ring_radii=None, visualize=False):
"""
Extract DAISY feature descriptors.
Parameters:
image : array_like
Input grayscale image
step : int, optional
Distance between descriptor centers
radius : int, optional
Radius of outermost ring
rings : int, optional
Number of rings
histograms : int, optional
Number of histograms per ring
orientations : int, optional
Number of orientations per histogram
normalization : str, optional
Normalization method ('l1', 'l2', 'daisy', 'off')
sigmas : array_like, optional
Standard deviations for smoothing
ring_radii : array_like, optional
Ring radii
visualize : bool, optional
Whether to return visualization
Returns:
ndarray
DAISY descriptors
"""
def hog(image, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(3, 3), block_norm='L2-Hys', visualize=False, transform_sqrt=False, feature_vector=True, multichannel=None, channel_axis=None):
"""
Extract Histogram of Oriented Gradients (HOG) features.
Parameters:
image : array_like
Input image
orientations : int, optional
Number of orientation bins
pixels_per_cell : tuple, optional
Size of cell in pixels
cells_per_block : tuple, optional
Number of cells per block
block_norm : str, optional
Block normalization method
visualize : bool, optional
Whether to return HOG visualization
transform_sqrt : bool, optional
Apply power law compression
feature_vector : bool, optional
Return feature vector
multichannel : bool, optional
Whether last axis is channels (deprecated)
channel_axis : int, optional
Axis for color channels
Returns:
ndarray or tuple
HOG features and optionally visualization
"""Detect local maxima for keypoint extraction and feature analysis.
def peak_local_maxima(image, min_distance=1, threshold_abs=None, threshold_rel=None, exclude_border=True, num_peaks=np.inf, footprint=None, labels=None, num_peaks_per_label=np.inf, p_norm=np.inf):
"""
Find local maxima in an image.
Parameters:
image : array_like
Input image
min_distance : int, optional
Minimum distance between peaks
threshold_abs : float, optional
Minimum absolute intensity
threshold_rel : float, optional
Minimum relative intensity
exclude_border : bool, optional
Whether to exclude border
num_peaks : int, optional
Maximum number of peaks
footprint : array_like, optional
Footprint for peak detection
labels : array_like, optional
Labeled regions
num_peaks_per_label : int, optional
Maximum peaks per label
p_norm : float, optional
P-norm for distance calculation
Returns:
ndarray
Peak coordinates
"""Match template patterns within images for object detection.
def match_template(image, template, pad_input=False, mode='constant', constant_values=0):
"""
Match template using normalized cross-correlation.
Parameters:
image : array_like
Input image
template : array_like
Template image
pad_input : bool, optional
Whether to pad input image
mode : str, optional
Padding mode
constant_values : scalar, optional
Constant value for padding
Returns:
ndarray
Cross-correlation result
"""from skimage import data, feature
import matplotlib.pyplot as plt
import numpy as np
# Load image
image = data.checkerboard()
# Apply different corner detection methods
harris_corners = feature.corner_harris(image)
shi_tomasi_corners = feature.corner_shi_tomasi(image)
# Find corner peaks
harris_peaks = feature.corner_peaks(harris_corners, min_distance=5, threshold_rel=0.02)
shi_tomasi_peaks = feature.corner_peaks(shi_tomasi_corners, min_distance=5, threshold_rel=0.02)
# Display results
fig, axes = plt.subplots(2, 2, figsize=(12, 10))
axes[0, 0].imshow(image, cmap='gray')
axes[0, 0].set_title('Original')
axes[0, 1].imshow(harris_corners, cmap='hot')
axes[0, 1].set_title('Harris Response')
axes[1, 0].imshow(image, cmap='gray')
axes[1, 0].plot(harris_peaks[:, 1], harris_peaks[:, 0], 'r+', markersize=10)
axes[1, 0].set_title(f'Harris Corners ({len(harris_peaks)})')
axes[1, 1].imshow(image, cmap='gray')
axes[1, 1].plot(shi_tomasi_peaks[:, 1], shi_tomasi_peaks[:, 0], 'b+', markersize=10)
axes[1, 1].set_title(f'Shi-Tomasi Corners ({len(shi_tomasi_peaks)})')
plt.show()from skimage import data, feature
import matplotlib.pyplot as plt
# Load image
image = data.hubble_deep_field()[0:500, 0:500]
# Apply different blob detection methods
blobs_log = feature.blob_log(image, max_sigma=30, num_sigma=10, threshold=0.1)
blobs_dog = feature.blob_dog(image, max_sigma=30, threshold=0.1)
blobs_doh = feature.blob_doh(image, max_sigma=30, threshold=0.01)
# Scale radii for visualization
blobs_log[:, 2] = blobs_log[:, 2] * np.sqrt(2)
blobs_dog[:, 2] = blobs_dog[:, 2] * np.sqrt(2)
print(f"LoG detected {len(blobs_log)} blobs")
print(f"DoG detected {len(blobs_dog)} blobs")
print(f"DoH detected {len(blobs_doh)} blobs")from skimage import data, feature, transform
import numpy as np
# Load and transform images
image1 = data.camera()
tform = transform.AffineTransform(scale=(1.3, 1.1), rotation=0.5, translation=(0, -200))
image2 = transform.warp(image1, tform)
# Extract features using ORB
detector = feature.ORB(n_keypoints=200)
keypoints1, descriptors1 = detector.detect_and_extract(image1)
keypoints2, descriptors2 = detector.detect_and_extract(image2)
# Match features
matches = feature.match_descriptors(descriptors1, descriptors2, cross_check=True)
print(f"Image 1: {len(keypoints1)} keypoints")
print(f"Image 2: {len(keypoints2)} keypoints")
print(f"Matches: {len(matches)}")
# Estimate transformation from matches
src = keypoints1[matches[:, 0]]
dst = keypoints2[matches[:, 1]]
tform_est = transform.estimate_transform('affine', src, dst)
print(f"Estimated transformation matrix:\n{tform_est.params}")from skimage import data, feature
import numpy as np
# Load image
image = data.brick()
# Compute Local Binary Pattern
lbp = feature.local_binary_pattern(image, P=8, R=1, method='uniform')
# Compute Gray-Level Co-occurrence Matrix
distances = [1, 2, 3]
angles = [0, np.pi/4, np.pi/2, 3*np.pi/4]
glcm = feature.graycomatrix(image, distances=distances, angles=angles,
levels=256, symmetric=True, normed=True)
# Extract texture properties
contrast = feature.graycoprops(glcm, 'contrast')
energy = feature.graycoprops(glcm, 'energy')
homogeneity = feature.graycoprops(glcm, 'homogeneity')
print(f"LBP patterns: {len(np.unique(lbp))}")
print(f"Mean contrast: {np.mean(contrast):.3f}")
print(f"Mean energy: {np.mean(energy):.3f}")
print(f"Mean homogeneity: {np.mean(homogeneity):.3f}")from typing import Union, Optional, Tuple, List
from numpy.typing import NDArray
import numpy as np
# Feature detection results
Keypoints = NDArray[np.floating]
Descriptors = NDArray[Union[np.floating, np.bool_]]
Corners = NDArray[np.integer]
Blobs = NDArray[np.floating]
# Feature matching
Matches = NDArray[np.integer]
MatchDistances = NDArray[np.floating]
# Texture analysis
LBPPattern = NDArray[np.integer]
GLCMatrix = NDArray[np.floating]
TextureProperties = NDArray[np.floating]
# Feature detector classes
FeatureDetector = Union[ORB, BRIEF, SIFT, CENSURE]
# Template matching
CorrelationMap = NDArray[np.floating]Install with Tessl CLI
npx tessl i tessl/pypi-scikit-image