CuPy: NumPy & SciPy for GPU (CUDA 10.1 version)
—
Statistical functions and array aggregation operations including descriptive statistics, histograms, and correlation analysis. All operations are GPU-accelerated with NumPy-compatible interfaces for efficient data analysis.
Core statistical measures for data analysis and summarization.
def mean(a, axis=None, dtype=None, out=None, keepdims=False):
"""
Arithmetic mean along specified axes.
Parameters:
- a: array-like, input array
- axis: int or tuple, axes for computation, optional
- dtype: data type, result type, optional
- out: array, output array, optional
- keepdims: bool, keep dimensions
Returns:
cupy.ndarray: Mean values on GPU
"""
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""
Standard deviation along specified axes.
Parameters:
- a: array-like, input array
- axis: int or tuple, axes for computation, optional
- dtype: data type, result type, optional
- out: array, output array, optional
- ddof: int, delta degrees of freedom
- keepdims: bool, keep dimensions
Returns:
cupy.ndarray: Standard deviation on GPU
"""
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""
Variance along specified axes.
Parameters:
- a: array-like, input array
- axis: int or tuple, axes for computation, optional
- dtype: data type, result type, optional
- out: array, output array, optional
- ddof: int, delta degrees of freedom
- keepdims: bool, keep dimensions
Returns:
cupy.ndarray: Variance on GPU
"""
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Median along specified axes.
Parameters:
- a: array-like, input array
- axis: int or tuple, axes for computation, optional
- out: array, output array, optional
- overwrite_input: bool, allow input modification
- keepdims: bool, keep dimensions
Returns:
cupy.ndarray: Median values on GPU
"""
def percentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False):
"""
Percentile along specified axes.
Parameters:
- a: array-like, input array
- q: float or array, percentile(s) to compute
- axis: int or tuple, axes for computation, optional
- out: array, output array, optional
- overwrite_input: bool, allow input modification
- interpolation: str, interpolation method
- keepdims: bool, keep dimensions
Returns:
cupy.ndarray: Percentile values on GPU
"""
def quantile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False):
"""
Quantile along specified axes.
Parameters:
- a: array-like, input array
- q: float or array, quantile(s) to compute [0, 1]
- axis: int or tuple, axes for computation, optional
- out: array, output array, optional
- overwrite_input: bool, allow input modification
- interpolation: str, interpolation method
- keepdims: bool, keep dimensions
Returns:
cupy.ndarray: Quantile values on GPU
"""Functions for finding minimum, maximum, and order-based statistics.
def amax(a, axis=None, out=None, keepdims=False, initial=None, where=True):
"""
Maximum along specified axes.
Parameters:
- a: array-like, input array
- axis: int or tuple, axes for computation, optional
- out: array, output array, optional
- keepdims: bool, keep dimensions
- initial: scalar, initial value, optional
- where: array, condition, optional
Returns:
cupy.ndarray: Maximum values on GPU
"""
def amin(a, axis=None, out=None, keepdims=False, initial=None, where=True):
"""
Minimum along specified axes.
Parameters:
- a: array-like, input array
- axis: int or tuple, axes for computation, optional
- out: array, output array, optional
- keepdims: bool, keep dimensions
- initial: scalar, initial value, optional
- where: array, condition, optional
Returns:
cupy.ndarray: Minimum values on GPU
"""
def ptp(a, axis=None, out=None, keepdims=False):
"""
Peak-to-peak (maximum - minimum) along axes.
Parameters:
- a: array-like, input array
- axis: int or tuple, axes for computation, optional
- out: array, output array, optional
- keepdims: bool, keep dimensions
Returns:
cupy.ndarray: Peak-to-peak values on GPU
"""Functions for computing correlations and covariances between variables.
def corrcoef(x, y=None, rowvar=True, bias=None, ddof=None, dtype=None):
"""
Pearson correlation coefficients.
Parameters:
- x: array-like, input array
- y: array-like, additional input array, optional
- rowvar: bool, treat rows as variables
- bias: deprecated parameter
- ddof: deprecated parameter
- dtype: data type, optional
Returns:
cupy.ndarray: Correlation coefficient matrix on GPU
"""
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None, dtype=None):
"""
Covariance matrix.
Parameters:
- m: array-like, input array
- y: array-like, additional input array, optional
- rowvar: bool, treat rows as variables
- bias: bool, use biased estimate
- ddof: int, delta degrees of freedom, optional
- fweights: array, frequency weights, optional
- aweights: array, analytic weights, optional
- dtype: data type, optional
Returns:
cupy.ndarray: Covariance matrix on GPU
"""
def correlate(a, v, mode='valid'):
"""
Cross-correlation of two 1-dimensional sequences.
Parameters:
- a: array-like, first input sequence
- v: array-like, second input sequence
- mode: str, convolution mode ('valid', 'same', 'full')
Returns:
cupy.ndarray: Cross-correlation on GPU
"""Functions for computing histograms and frequency distributions.
def histogram(a, bins=10, range=None, normed=None, weights=None, density=None):
"""
Compute histogram of dataset.
Parameters:
- a: array-like, input data
- bins: int or array, bin specification
- range: tuple, range of bins, optional
- normed: deprecated parameter
- weights: array, weights for each value, optional
- density: bool, normalize to probability density
Returns:
tuple: (hist, bin_edges) arrays on GPU
"""
def histogram2d(x, y, bins=10, range=None, normed=None, weights=None, density=None):
"""
Compute 2D histogram.
Parameters:
- x: array-like, first dimension data
- y: array-like, second dimension data
- bins: int or array, bin specification
- range: array, bin ranges, optional
- normed: deprecated parameter
- weights: array, weights for each sample, optional
- density: bool, normalize to probability density
Returns:
tuple: (H, xedges, yedges) arrays on GPU
"""
def histogramdd(sample, bins=10, range=None, normed=None, weights=None, density=None):
"""
Compute multidimensional histogram.
Parameters:
- sample: array-like, input samples (N, D) or sequence of D arrays
- bins: int or array, bin specification
- range: sequence, bin ranges, optional
- normed: deprecated parameter
- weights: array, weights for each sample, optional
- density: bool, normalize to probability density
Returns:
tuple: (H, edges) histogram and bin edges on GPU
"""
def bincount(x, weights=None, minlength=0):
"""
Count occurrences of each value in array.
Parameters:
- x: array-like, non-negative integer array
- weights: array, weights for each value, optional
- minlength: int, minimum length of output
Returns:
cupy.ndarray: Occurrence counts on GPU
"""
def digitize(x, bins, right=False):
"""
Return indices of bins to which each value belongs.
Parameters:
- x: array-like, input array
- bins: array-like, bin edges
- right: bool, interval closure
Returns:
cupy.ndarray: Bin indices on GPU
"""import cupy as cp
# Generate sample data
data = cp.random.normal(10, 2, size=10000)
# Descriptive statistics
mean_val = cp.mean(data)
std_val = cp.std(data)
var_val = cp.var(data)
median_val = cp.median(data)
print(f"Mean: {mean_val}")
print(f"Standard deviation: {std_val}")
print(f"Variance: {var_val}")
print(f"Median: {median_val}")
# Percentiles
q25 = cp.percentile(data, 25)
q75 = cp.percentile(data, 75)
iqr = q75 - q25
print(f"25th percentile: {q25}")
print(f"75th percentile: {q75}")
print(f"Interquartile range: {iqr}")# Multi-dimensional data analysis
matrix_data = cp.random.normal(0, 1, size=(1000, 5))
# Statistics along different axes
column_means = cp.mean(matrix_data, axis=0) # Mean of each column
row_means = cp.mean(matrix_data, axis=1) # Mean of each row
overall_mean = cp.mean(matrix_data) # Overall mean
# Standard deviations
column_stds = cp.std(matrix_data, axis=0)
row_stds = cp.std(matrix_data, axis=1)
print(f"Column means: {column_means}")
print(f"Column standard deviations: {column_stds}")# Generate correlated data
n_samples = 5000
x = cp.random.normal(0, 1, n_samples)
y = 2 * x + cp.random.normal(0, 0.5, n_samples) # y = 2x + noise
z = cp.random.normal(0, 1, n_samples) # Independent variable
# Combine into matrix (variables as rows)
data_matrix = cp.stack([x, y, z])
# Correlation matrix
corr_matrix = cp.corrcoef(data_matrix)
print("Correlation matrix:")
print(corr_matrix)
# Covariance matrix
cov_matrix = cp.cov(data_matrix)
print("Covariance matrix:")
print(cov_matrix)
# Pairwise correlation
xy_corr = cp.corrcoef(x, y)[0, 1]
xz_corr = cp.corrcoef(x, z)[0, 1]
print(f"X-Y correlation: {xy_corr}")
print(f"X-Z correlation: {xz_corr}")# Single variable histogram
data = cp.random.exponential(2.0, size=10000)
# Compute histogram
hist, bin_edges = cp.histogram(data, bins=50, density=True)
bin_centers = (bin_edges[1:] + bin_edges[:-1]) / 2
print(f"Histogram shape: {hist.shape}")
print(f"Bin edges shape: {bin_edges.shape}")
# 2D histogram for joint distribution
x = cp.random.normal(0, 1, 5000)
y = cp.random.normal(0, 1, 5000)
hist_2d, x_edges, y_edges = cp.histogram2d(x, y, bins=30)
print(f"2D histogram shape: {hist_2d.shape}")
# Multidimensional histogram
samples = cp.random.multivariate_normal([0, 0, 0], cp.eye(3), size=1000)
hist_nd, edges = cp.histogramdd(samples, bins=10)
print(f"ND histogram shape: {hist_nd.shape}")# Weighted statistics
values = cp.array([1, 2, 3, 4, 5])
weights = cp.array([1, 2, 3, 2, 1])
# Weighted histogram
hist_weighted, _ = cp.histogram(values, bins=5, weights=weights)
print(f"Weighted histogram: {hist_weighted}")
# Time series analysis
time_series = cp.cumsum(cp.random.normal(0, 1, 1000))
# Rolling statistics (using convolution)
window_size = 50
kernel = cp.ones(window_size) / window_size
rolling_mean = cp.convolve(time_series, kernel, mode='valid')
# Moving statistics
def rolling_std(data, window):
rolling_mean = cp.convolve(data, cp.ones(window)/window, mode='valid')
# Pad for alignment
padded_mean = cp.pad(rolling_mean, (window-1, 0), mode='edge')
# Compute rolling variance
squared_diff = (data - padded_mean)**2
rolling_var = cp.convolve(squared_diff, cp.ones(window)/window, mode='valid')
return cp.sqrt(rolling_var)
rolling_std_vals = rolling_std(time_series, window_size)# Outlier detection using IQR method
data = cp.random.normal(0, 1, 1000)
# Add some outliers
data = cp.concatenate([data, cp.array([5, -5, 6, -6])])
q25 = cp.percentile(data, 25)
q75 = cp.percentile(data, 75)
iqr = q75 - q25
# Define outliers as values beyond 1.5 * IQR from quartiles
lower_bound = q25 - 1.5 * iqr
upper_bound = q75 + 1.5 * iqr
outliers = data[(data < lower_bound) | (data > upper_bound)]
normal_data = data[(data >= lower_bound) & (data <= upper_bound)]
print(f"Number of outliers: {len(outliers)}")
print(f"Outlier values: {outliers}")
# Empirical CDF
def empirical_cdf(data, x):
return cp.mean(data <= x)
# Compute CDF at specific points
test_points = cp.linspace(-3, 3, 100)
cdf_values = cp.array([empirical_cdf(data, point) for point in test_points])Install with Tessl CLI
npx tessl i tessl/pypi-cupy-cuda101@9.6.1