Multi-backend deep learning framework providing a unified API for building and training neural networks across JAX, TensorFlow, PyTorch, and OpenVINO backends
—
Backend-agnostic mathematical operations providing NumPy-compatible APIs, neural network specific functions, and core tensor operations that work across JAX, TensorFlow, PyTorch, and OpenVINO backends.
Essential tensor operations and control flow functions.
def cast(x, dtype):
"""Cast tensor to specified data type."""
def convert_to_tensor(x, dtype=None, sparse=None):
"""Convert input to backend tensor."""
def convert_to_numpy(x):
"""Convert tensor to NumPy array."""
def shape(x):
"""Get tensor shape as tuple."""
def dtype(x):
"""Get tensor data type."""
def stop_gradient(x):
"""Stop gradient computation through tensor."""
def cond(pred, true_fn, false_fn):
"""Conditional execution based on predicate."""
def scan(fn, init, xs, length=None, reverse=False, unroll=1):
"""Apply function across sequence elements."""
def map(fn, xs):
"""Map function over tensor elements."""Mathematical functions matching NumPy API for cross-backend compatibility.
# Array creation
def zeros(shape, dtype='float32'): ...
def ones(shape, dtype='float32'): ...
def eye(N, M=None, k=0, dtype='float32'): ...
def arange(start, stop=None, step=1, dtype=None): ...
# Mathematical operations
def add(x1, x2): ...
def subtract(x1, x2): ...
def multiply(x1, x2): ...
def divide(x1, x2): ...
def power(x1, x2): ...
def sqrt(x): ...
def exp(x): ...
def log(x): ...
def sin(x): ...
def cos(x): ...
def tanh(x): ...
# Reduction operations
def sum(x, axis=None, keepdims=False): ...
def mean(x, axis=None, keepdims=False): ...
def max(x, axis=None, keepdims=False): ...
def min(x, axis=None, keepdims=False): ...
# Linear algebra
def matmul(x1, x2): ...
def dot(x1, x2): ...
def transpose(x, axes=None): ...
def reshape(x, shape): ...
# Indexing and manipulation
def concatenate(arrays, axis=0): ...
def stack(arrays, axis=0): ...
def split(x, indices_or_sections, axis=0): ...
def squeeze(x, axis=None): ...
def expand_dims(x, axis): ...Specialized operations for neural networks and deep learning.
# Activation functions
def relu(x): ...
def sigmoid(x): ...
def softmax(x, axis=-1): ...
def gelu(x, approximate=False): ...
def silu(x): ... # Also available as swish
# Convolution operations
def conv(inputs, kernel, strides=1, padding='valid', data_format=None,
dilation_rate=1, groups=1): ...
def conv_transpose(inputs, kernel, strides=1, padding='valid'): ...
def depthwise_conv(inputs, kernel, strides=1, padding='valid'): ...
# Pooling operations
def max_pool(inputs, pool_size, strides=None, padding='valid'): ...
def average_pool(inputs, pool_size, strides=None, padding='valid'): ...
# Normalization
def batch_normalization(x, mean, variance, offset, scale, epsilon=1e-3): ...
def layer_normalization(x, axis=-1, epsilon=1e-3): ...
# Loss functions
def binary_crossentropy(target, output, from_logits=False): ...
def categorical_crossentropy(target, output, from_logits=False, axis=-1): ...
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1): ...
# Utility functions
def one_hot(x, num_classes, axis=-1, dtype='float32'): ...
def top_k(x, k, sorted=True): ...
def in_top_k(targets, predictions, k): ...Advanced linear algebra operations for machine learning algorithms.
def cholesky(x): ...
def det(x): ...
def eig(x): ...
def eigh(x): ...
def inv(x): ...
def lstsq(a, b, rcond=None): ...
def norm(x, ord=None, axis=None, keepdims=False): ...
def qr(x, mode='reduced'): ...
def solve(a, b): ...
def svd(x, full_matrices=True): ...Fast Fourier Transform operations for signal processing.
def fft(x): ...
def fft2(x): ...
def rfft(x): ...
def irfft(x): ...
def stft(x, frame_length, frame_step, fft_length=None): ...
def istft(stfts, frame_length, frame_step, fft_length=None): ...import keras
from keras import ops
# Create tensors
x = ops.ones((3, 4))
y = ops.zeros((3, 4))
# Mathematical operations
z = ops.add(x, y)
w = ops.matmul(x, ops.transpose(y))
# Reductions
mean_val = ops.mean(x)
sum_val = ops.sum(x, axis=1)import keras
from keras import ops
def forward_pass(x, weights, bias):
# Linear transformation
x = ops.matmul(x, weights) + bias
# Activation
x = ops.relu(x)
# Normalization
mean = ops.mean(x, axis=0, keepdims=True)
variance = ops.var(x, axis=0, keepdims=True)
x = (x - mean) / ops.sqrt(variance + 1e-7)
return xInstall with Tessl CLI
npx tessl i tessl/pypi-keras-nightly