Deep learning framework providing tensor computation with GPU acceleration and dynamic neural networks with automatic differentiation
Comprehensive mathematical operations including linear algebra, FFT, special functions, and statistical operations. PyTorch provides extensive mathematical functionality across multiple specialized modules.
Advanced linear algebra operations for matrices and tensors.
def matmul(input: Tensor, other: Tensor) -> Tensor:
"""Matrix multiplication supporting broadcasting."""
def solve(A: Tensor, B: Tensor) -> Tensor:
"""Solve linear system AX = B."""
def inv(A: Tensor) -> Tensor:
"""Matrix inverse."""
def pinv(A: Tensor, rcond=1e-15, hermitian=False) -> Tensor:
"""Moore-Penrose pseudo-inverse."""
def det(A: Tensor) -> Tensor:
"""Matrix determinant."""
def slogdet(A: Tensor) -> Tuple[Tensor, Tensor]:
"""Sign and log-determinant."""
def norm(A: Tensor, ord=None, dim=None, keepdim=False, *, dtype=None) -> Tensor:
"""Matrix or vector norm."""
def vector_norm(x: Tensor, ord=2, dim=None, keepdim=False, *, dtype=None) -> Tensor:
"""Vector norm."""
def matrix_norm(A: Tensor, ord='fro', dim=(-2, -1), keepdim=False, *, dtype=None) -> Tensor:
"""Matrix norm."""
def matrix_rank(A: Tensor, atol=None, rtol=None, hermitian=False) -> Tensor:
"""Matrix rank."""
def cond(A: Tensor, p=None) -> Tensor:
"""Matrix condition number."""Matrix factorization methods for numerical analysis.
def svd(A: Tensor, full_matrices=True) -> Tuple[Tensor, Tensor, Tensor]:
"""Singular Value Decomposition."""
def svdvals(A: Tensor) -> Tensor:
"""Singular values only."""
def eig(A: Tensor) -> Tuple[Tensor, Tensor]:
"""Eigenvalue decomposition."""
def eigvals(A: Tensor) -> Tensor:
"""Eigenvalues only."""
def eigh(A: Tensor, UPLO='L') -> Tuple[Tensor, Tensor]:
"""Eigenvalue decomposition for Hermitian matrices."""
def eigvalsh(A: Tensor, UPLO='L') -> Tensor:
"""Eigenvalues for Hermitian matrices."""
def qr(A: Tensor, mode='reduced') -> Tuple[Tensor, Tensor]:
"""QR decomposition."""
def cholesky(A: Tensor, upper=False) -> Tensor:
"""Cholesky decomposition."""
def cholesky_ex(A: Tensor, upper=False, check_errors=False) -> Tuple[Tensor, Tensor]:
"""Cholesky decomposition with error checking."""
def lu_factor(A: Tensor, *, pivot=True) -> Tuple[Tensor, Tensor]:
"""LU factorization."""
def lu_factor_ex(A: Tensor, *, pivot=True, check_errors=False) -> Tuple[Tensor, Tensor, Tensor]:
"""LU factorization with error checking."""FFT operations for frequency domain analysis.
def fft(input: Tensor, n=None, dim=-1, norm=None) -> Tensor:
"""One-dimensional discrete Fourier transform."""
def ifft(input: Tensor, n=None, dim=-1, norm=None) -> Tensor:
"""One-dimensional inverse discrete Fourier transform."""
def rfft(input: Tensor, n=None, dim=-1, norm=None) -> Tensor:
"""One-dimensional real-to-complex FFT."""
def irfft(input: Tensor, n=None, dim=-1, norm=None) -> Tensor:
"""One-dimensional complex-to-real inverse FFT."""
def fft2(input: Tensor, s=None, dim=(-2, -1), norm=None) -> Tensor:
"""Two-dimensional discrete Fourier transform."""
def ifft2(input: Tensor, s=None, dim=(-2, -1), norm=None) -> Tensor:
"""Two-dimensional inverse discrete Fourier transform."""
def rfft2(input: Tensor, s=None, dim=(-2, -1), norm=None) -> Tensor:
"""Two-dimensional real-to-complex FFT."""
def irfft2(input: Tensor, s=None, dim=(-2, -1), norm=None) -> Tensor:
"""Two-dimensional complex-to-real inverse FFT."""
def fftn(input: Tensor, s=None, dim=None, norm=None) -> Tensor:
"""N-dimensional discrete Fourier transform."""
def ifftn(input: Tensor, s=None, dim=None, norm=None) -> Tensor:
"""N-dimensional inverse discrete Fourier transform."""
def rfftn(input: Tensor, s=None, dim=None, norm=None) -> Tensor:
"""N-dimensional real-to-complex FFT."""
def irfftn(input: Tensor, s=None, dim=None, norm=None) -> Tensor:
"""N-dimensional complex-to-real inverse FFT."""
def fftfreq(n: int, d=1.0, *, dtype=None, device=None, requires_grad=False) -> Tensor:
"""Discrete Fourier Transform sample frequencies."""
def rfftfreq(n: int, d=1.0, *, dtype=None, device=None, requires_grad=False) -> Tensor:
"""Real-valued discrete Fourier Transform sample frequencies."""
def fftshift(input: Tensor, dim=None) -> Tensor:
"""Shift zero-frequency component to center."""
def ifftshift(input: Tensor, dim=None) -> Tensor:
"""Inverse of fftshift."""Special mathematical functions for advanced computations.
def erf(input: Tensor) -> Tensor:
"""Error function."""
def erfc(input: Tensor) -> Tensor:
"""Complementary error function."""
def erfcx(input: Tensor) -> Tensor:
"""Scaled complementary error function."""
def erfinv(input: Tensor) -> Tensor:
"""Inverse error function."""
def digamma(input: Tensor) -> Tensor:
"""Digamma function (logarithmic derivative of gamma)."""
def gammaln(input: Tensor) -> Tensor:
"""Log gamma function."""
def polygamma(n: int, input: Tensor) -> Tensor:
"""Polygamma function."""
def multigammaln(input: Tensor, p: int) -> Tensor:
"""Multivariate log gamma function."""
def gammainc(input: Tensor, other: Tensor) -> Tensor:
"""Regularized lower incomplete gamma function."""
def gammaincc(input: Tensor, other: Tensor) -> Tensor:
"""Regularized upper incomplete gamma function."""
def bessel_j0(input: Tensor) -> Tensor:
"""Bessel function of the first kind of order 0."""
def bessel_j1(input: Tensor) -> Tensor:
"""Bessel function of the first kind of order 1."""
def bessel_y0(input: Tensor) -> Tensor:
"""Bessel function of the second kind of order 0."""
def bessel_y1(input: Tensor) -> Tensor:
"""Bessel function of the second kind of order 1."""
def modified_bessel_i0(input: Tensor) -> Tensor:
"""Modified Bessel function of the first kind of order 0."""
def modified_bessel_i1(input: Tensor) -> Tensor:
"""Modified Bessel function of the first kind of order 1."""
def modified_bessel_k0(input: Tensor) -> Tensor:
"""Modified Bessel function of the second kind of order 0."""
def modified_bessel_k1(input: Tensor) -> Tensor:
"""Modified Bessel function of the second kind of order 1."""
def i0(input: Tensor) -> Tensor:
"""Modified Bessel function of the first kind of order 0."""
def i0e(input: Tensor) -> Tensor:
"""Exponentially scaled modified Bessel function of order 0."""
def ndtr(input: Tensor) -> Tensor:
"""Standard normal cumulative distribution function."""
def ndtri(input: Tensor) -> Tensor:
"""Inverse of standard normal cumulative distribution function."""
def log_ndtr(input: Tensor) -> Tensor:
"""Log of standard normal cumulative distribution function."""
def expit(input: Tensor) -> Tensor:
"""Expit function (sigmoid)."""
def logit(input: Tensor, eps=None) -> Tensor:
"""Logit function (inverse sigmoid)."""
def xlogy(input: Tensor, other: Tensor) -> Tensor:
"""Elementwise x * log(y)."""
def xlog1py(input: Tensor, other: Tensor) -> Tensor:
"""Elementwise x * log1p(y)."""
def zeta(input: Tensor, other: Tensor) -> Tensor:
"""Hurwitz zeta function."""
def logsumexp(input: Tensor, dim, keepdim=False) -> Tensor:
"""Log of sum of exponentials."""
def softmax(input: Tensor, dim, dtype=None) -> Tensor:
"""Softmax function."""
def log_softmax(input: Tensor, dim, dtype=None) -> Tensor:
"""Log softmax function."""Statistical operations and probability distributions.
def mean(input: Tensor, dim=None, keepdim=False, *, dtype=None) -> Tensor:
"""Mean along specified dimensions."""
def median(input: Tensor, dim=None, keepdim=False) -> Tensor:
"""Median along specified dimensions."""
def mode(input: Tensor, dim=None, keepdim=False) -> Tensor:
"""Mode along specified dimensions."""
def std(input: Tensor, dim=None, unbiased=True, keepdim=False) -> Tensor:
"""Standard deviation."""
def var(input: Tensor, dim=None, unbiased=True, keepdim=False) -> Tensor:
"""Variance."""
def std_mean(input: Tensor, dim=None, unbiased=True, keepdim=False) -> Tuple[Tensor, Tensor]:
"""Standard deviation and mean."""
def var_mean(input: Tensor, dim=None, unbiased=True, keepdim=False) -> Tuple[Tensor, Tensor]:
"""Variance and mean."""
def cov(input: Tensor, *, correction=1, fweights=None, aweights=None) -> Tensor:
"""Covariance matrix."""
def corrcoef(input: Tensor) -> Tensor:
"""Correlation coefficient matrix."""
def bincount(input: Tensor, weights=None, minlength=0) -> Tensor:
"""Count occurrences of each value."""
def histogram(input: Tensor, bins, *, range=None, weight=None, density=False) -> Tuple[Tensor, Tensor]:
"""Compute histogram of tensor values."""
def histogramdd(input: Tensor, bins, *, range=None, weight=None, density=False) -> Tuple[Tensor, List[Tensor]]:
"""Compute multidimensional histogram."""Operations for sparse tensors and matrices.
def sparse.mm(mat1: Tensor, mat2: Tensor) -> Tensor:
"""Sparse matrix multiplication."""
def sparse.addmm(bias: Tensor, mat1: Tensor, mat2: Tensor, *, beta=1, alpha=1) -> Tensor:
"""Sparse addmm operation."""
def sparse.sum(input: Tensor, dim=None, dtype=None) -> Tensor:
"""Sum of sparse tensor elements."""
def sparse.softmax(input: Tensor, dim: int, *, dtype=None) -> Tensor:
"""Sparse softmax."""
def sparse.log_softmax(input: Tensor, dim: int, *, dtype=None) -> Tensor:
"""Sparse log softmax."""
def sparse.spsolve(A: Tensor, B: Tensor) -> Tensor:
"""Solve sparse linear system."""
def sparse.sampled_addmm(bias: Tensor, input: Tensor, mat1: Tensor, mat2: Tensor, *, beta=1, alpha=1) -> Tensor:
"""Sampled sparse matrix multiplication and addition."""Random number generation and sampling functions.
def manual_seed(seed: int):
"""Set random seed for reproducibility."""
def initial_seed() -> int:
"""Return initial random seed."""
def seed() -> int:
"""Generate random seed."""
def get_rng_state() -> Tensor:
"""Get random number generator state."""
def set_rng_state(new_state: Tensor):
"""Set random number generator state."""
def bernoulli(input: Tensor, *, generator=None) -> Tensor:
"""Sample from Bernoulli distribution."""
def poisson(input: Tensor, generator=None) -> Tensor:
"""Sample from Poisson distribution."""
def normal(mean: float, std: float, size, *, dtype=None, device=None, requires_grad=False) -> Tensor:
"""Sample from normal distribution."""
def uniform(low: float, high: float, size, *, dtype=None, device=None, requires_grad=False) -> Tensor:
"""Sample from uniform distribution."""
def exponential(lambd: float, size, *, dtype=None, device=None, requires_grad=False) -> Tensor:
"""Sample from exponential distribution."""
def geometric(p: float, size, *, dtype=None, device=None, requires_grad=False) -> Tensor:
"""Sample from geometric distribution."""
def multinomial(input: Tensor, num_samples: int, replacement=False, *, generator=None) -> Tensor:
"""Sample from multinomial distribution."""import torch
import torch.linalg as LA
# Matrix operations
A = torch.randn(3, 3)
B = torch.randn(3, 2)
# Basic operations
det_A = LA.det(A)
inv_A = LA.inv(A)
X = LA.solve(A, B) # Solve AX = B
# Matrix decompositions
U, S, V = LA.svd(A)
eigenvals, eigenvecs = LA.eig(A)
Q, R = LA.qr(A)
# Norms and properties
frobenius_norm = LA.norm(A, 'fro')
spectral_norm = LA.norm(A, 2)
condition_number = LA.cond(A)
rank = LA.matrix_rank(A)
print(f"Determinant: {det_A}")
print(f"Condition number: {condition_number}")
print(f"Rank: {rank}")import torch
import torch.fft as fft
# Create signal
t = torch.linspace(0, 1, 1000)
signal = torch.sin(2 * torch.pi * 5 * t) + torch.sin(2 * torch.pi * 10 * t)
# FFT analysis
signal_fft = fft.fft(signal)
signal_rfft = fft.rfft(signal) # Real-valued input
frequencies = fft.fftfreq(len(signal))
# 2D FFT for images
image = torch.randn(256, 256)
image_fft = fft.fft2(image)
image_shifted = fft.fftshift(image_fft)
# Inverse transform
reconstructed = fft.ifft(signal_fft).real
print(f"Original signal shape: {signal.shape}")
print(f"FFT shape: {signal_fft.shape}")
print(f"Real FFT shape: {signal_rfft.shape}")
print(f"Reconstruction error: {torch.mean((signal - reconstructed) ** 2)}")import torch
import torch.special as special
# Error functions
x = torch.linspace(-3, 3, 100)
erf_vals = special.erf(x)
erfc_vals = special.erfc(x)
# Gamma functions
gamma_vals = torch.exp(special.gammaln(x + 1)) # Gamma function via log
digamma_vals = special.digamma(x + 1)
# Bessel functions
bessel_j0 = special.bessel_j0(x)
bessel_y0 = special.bessel_y0(x + 0.1) # Avoid singularity at 0
# Probability functions
sigmoid_vals = special.expit(x) # Sigmoid
logit_vals = special.logit(torch.sigmoid(x)) # Should recover x
# Normal distribution
ndtr_vals = special.ndtr(x) # CDF
log_ndtr_vals = special.log_ndtr(x) # Log CDF
print(f"erf(1.0): {special.erf(torch.tensor(1.0))}")
print(f"Gamma(5): {torch.exp(special.gammaln(torch.tensor(5.0)))}")
print(f"Sigmoid(0): {special.expit(torch.tensor(0.0))}")import torch
# Generate sample data
data = torch.randn(1000, 10)
# Basic statistics
mean_vals = torch.mean(data, dim=0)
std_vals = torch.std(data, dim=0)
var_vals = torch.var(data, dim=0)
# Along different dimensions
overall_mean = torch.mean(data)
row_means = torch.mean(data, dim=1)
# Quantiles and percentiles
median_vals = torch.median(data, dim=0).values
q25 = torch.quantile(data, 0.25, dim=0)
q75 = torch.quantile(data, 0.75, dim=0)
# Correlation
correlation_matrix = torch.corrcoef(data.T)
covariance_matrix = torch.cov(data.T)
# Histogram
values = torch.randn(10000)
hist, bin_edges = torch.histogram(values, bins=50)
print(f"Data shape: {data.shape}")
print(f"Mean: {mean_vals[:5]}") # First 5 features
print(f"Std: {std_vals[:5]}")
print(f"Correlation matrix shape: {correlation_matrix.shape}")import torch
# Create sparse matrices
indices = torch.LongTensor([[0, 1, 1], [2, 0, 2]])
values = torch.FloatTensor([3, 4, 5])
shape = (2, 3)
sparse_a = torch.sparse_coo_tensor(indices, values, shape)
# Dense matrix
dense_b = torch.randn(3, 4)
# Sparse matrix multiplication
result = torch.sparse.mm(sparse_a, dense_b)
# Sparse addition with bias
bias = torch.randn(2, 4)
result_with_bias = torch.sparse.addmm(bias, sparse_a, dense_b)
# Convert to dense for inspection
dense_a = sparse_a.to_dense()
print(f"Sparse matrix:\n{dense_a}")
print(f"Result shape: {result.shape}")
print(f"Sparse matrix multiplication completed")import torch
import torch.linalg as LA
# Batch operations
batch_size = 32
matrix_size = 64
batch_matrices = torch.randn(batch_size, matrix_size, matrix_size)
# Batch linear algebra
batch_det = LA.det(batch_matrices)
batch_eigenvals = LA.eigvals(batch_matrices)
# Solve batch of linear systems
batch_rhs = torch.randn(batch_size, matrix_size, 10)
batch_solutions = LA.solve(batch_matrices, batch_rhs)
# Batch SVD
U, S, V = LA.svd(batch_matrices)
# Complex number operations
complex_tensor = torch.complex(torch.randn(100), torch.randn(100))
complex_fft = torch.fft.fft(complex_tensor)
phase = torch.angle(complex_tensor)
magnitude = torch.abs(complex_tensor)
print(f"Batch determinants shape: {batch_det.shape}")
print(f"Batch eigenvalues shape: {batch_eigenvals.shape}")
print(f"Complex tensor magnitude range: {magnitude.min():.3f} to {magnitude.max():.3f}")Install with Tessl CLI
npx tessl i tessl/pypi-torch