CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-keras

Multi-backend deep learning framework that provides a unified, high-level API for building and training neural networks across JAX, TensorFlow, PyTorch, and OpenVINO backends.

Pending
Overview
Eval results
Files

operations.mddocs/

Backend Operations

Low-level operations and backend functionality for tensor operations, mathematical functions, and neural network primitives across different backend engines (JAX, TensorFlow, PyTorch, OpenVINO).

Capabilities

Core Tensor Operations

Basic tensor manipulation functions for creating, reshaping, and converting tensors.

def cast(x, dtype):
    """
    Cast tensor to specified dtype.
    
    Parameters:
    - x: Input tensor
    - dtype: Target data type
    
    Returns:
    Tensor cast to target dtype
    """

def convert_to_tensor(x, dtype=None):
    """
    Convert input to backend tensor.
    
    Parameters:
    - x: Input data (array, list, etc.)
    - dtype: Target data type
    
    Returns:
    Backend tensor
    """

def convert_to_numpy(x):
    """
    Convert tensor to numpy array.
    
    Parameters:
    - x: Input tensor
    
    Returns:
    Numpy array
    """

def is_tensor(x):
    """
    Check if input is a tensor.
    
    Parameters:
    - x: Input to check
    
    Returns:
    Boolean indicating if input is tensor
    """

def shape(x):
    """
    Get shape of tensor.
    
    Parameters:
    - x: Input tensor
    
    Returns:
    Shape tuple
    """

def dtype(x):
    """
    Get data type of tensor.
    
    Parameters:
    - x: Input tensor
    
    Returns:
    Data type
    """

def reshape(x, new_shape):
    """
    Reshape tensor to new shape.
    
    Parameters:
    - x: Input tensor
    - new_shape: Target shape
    
    Returns:
    Reshaped tensor
    """

def transpose(x, axes=None):
    """
    Transpose tensor dimensions.
    
    Parameters:
    - x: Input tensor
    - axes: Permutation of dimensions (optional)
    
    Returns:
    Transposed tensor
    """

def expand_dims(x, axis):
    """
    Expand tensor dimensions.
    
    Parameters:
    - x: Input tensor
    - axis: Position to insert new axis
    
    Returns:
    Tensor with expanded dimensions
    """

def squeeze(x, axis=None):
    """
    Remove single-dimensional entries.
    
    Parameters:
    - x: Input tensor
    - axis: Specific axis to squeeze (optional)
    
    Returns:
    Squeezed tensor
    """

Mathematical Operations

Core mathematical functions for tensor arithmetic and mathematical transformations.

def add(x1, x2):
    """
    Element-wise addition.
    
    Parameters:
    - x1: First input tensor
    - x2: Second input tensor
    
    Returns:
    Element-wise sum
    """

def subtract(x1, x2):
    """Element-wise subtraction."""

def multiply(x1, x2):
    """Element-wise multiplication."""

def divide(x1, x2):
    """Element-wise division."""

def power(x1, x2):
    """Element-wise power."""

def sqrt(x):
    """
    Element-wise square root.
    
    Parameters:
    - x: Input tensor
    
    Returns:
    Square root of input
    """

def square(x):
    """Element-wise square."""

def abs(x):
    """Element-wise absolute value."""

def sign(x):
    """Element-wise sign function."""

def exp(x):
    """Element-wise exponential."""

def log(x):
    """Element-wise natural logarithm."""

def sin(x):
    """Element-wise sine."""

def cos(x):
    """Element-wise cosine."""

def tan(x):
    """Element-wise tangent."""

def sinh(x):
    """Element-wise hyperbolic sine."""

def cosh(x):
    """Element-wise hyperbolic cosine."""

def tanh(x):
    """Element-wise hyperbolic tangent."""

def ceil(x):
    """Element-wise ceiling."""

def floor(x):
    """Element-wise floor."""

def round(x):
    """Element-wise rounding."""

def maximum(x1, x2):
    """Element-wise maximum."""

def minimum(x1, x2):
    """Element-wise minimum."""

def clip(x, min_value, max_value):
    """
    Clip tensor values to range.
    
    Parameters:
    - x: Input tensor
    - min_value: Minimum value
    - max_value: Maximum value
    
    Returns:
    Clipped tensor
    """

Linear Algebra Operations

Linear algebra functions for matrix operations and decompositions.

def matmul(x1, x2):
    """
    Matrix multiplication.
    
    Parameters:
    - x1: First input tensor
    - x2: Second input tensor
    
    Returns:
    Matrix product
    """

def dot(x1, x2):
    """Dot product of two tensors."""

def tensordot(x1, x2, axes):
    """
    Tensor dot product along specified axes.
    
    Parameters:
    - x1: First input tensor
    - x2: Second input tensor
    - axes: Axes to contract over
    
    Returns:
    Tensor dot product
    """

def outer(x1, x2):
    """Outer product of two vectors."""

def inner(x1, x2):
    """Inner product of two tensors."""

def cross(x1, x2):
    """Cross product of two vectors."""

def norm(x, ord=None, axis=None, keepdims=False):
    """
    Compute tensor norm.
    
    Parameters:
    - x: Input tensor
    - ord: Order of norm
    - axis: Axis along which to compute norm
    - keepdims: Whether to keep dimensions
    
    Returns:
    Norm of tensor
    """

def det(x):
    """Determinant of square matrix."""

def inv(x):
    """Matrix inverse."""

def solve(a, b):
    """
    Solve linear system ax = b.
    
    Parameters:
    - a: Coefficient matrix
    - b: Right-hand side
    
    Returns:
    Solution x
    """

def eig(x):
    """Eigenvalues and eigenvectors."""

def svd(x, full_matrices=True):
    """
    Singular value decomposition.
    
    Parameters:
    - x: Input matrix
    - full_matrices: Whether to compute full-sized U and V
    
    Returns:
    Tuple (U, s, Vh) of SVD decomposition
    """

def qr(x):
    """QR decomposition."""

def cholesky(x):
    """Cholesky decomposition."""

Array Creation and Manipulation

Functions for creating arrays and manipulating their structure.

def zeros(shape, dtype='float32'):
    """
    Create tensor of zeros.
    
    Parameters:
    - shape: Shape of output tensor
    - dtype: Data type
    
    Returns:
    Tensor filled with zeros
    """

def ones(shape, dtype='float32'):
    """Create tensor of ones."""

def full(shape, fill_value, dtype='float32'):
    """
    Create tensor filled with value.
    
    Parameters:
    - shape: Shape of output tensor
    - fill_value: Value to fill with
    - dtype: Data type
    
    Returns:
    Tensor filled with specified value
    """

def eye(N, M=None, k=0, dtype='float32'):
    """
    Create identity matrix.
    
    Parameters:
    - N: Number of rows
    - M: Number of columns (defaults to N)
    - k: Index of diagonal
    - dtype: Data type
    
    Returns:
    Identity matrix
    """

def arange(start, stop=None, step=1, dtype='int32'):
    """
    Create range of values.
    
    Parameters:
    - start: Start value
    - stop: Stop value  
    - step: Step size
    - dtype: Data type
    
    Returns:
    Array of evenly spaced values
    """

def linspace(start, stop, num=50, endpoint=True, dtype='float32'):
    """
    Create linearly spaced values.
    
    Parameters:
    - start: Start value
    - stop: Stop value
    - num: Number of samples
    - endpoint: Whether to include endpoint
    - dtype: Data type
    
    Returns:
    Array of linearly spaced values
    """

def concatenate(tensors, axis=0):
    """
    Concatenate tensors along axis.
    
    Parameters:
    - tensors: List of tensors to concatenate
    - axis: Axis for concatenation
    
    Returns:
    Concatenated tensor
    """

def stack(tensors, axis=0):
    """
    Stack tensors along new axis.
    
    Parameters:
    - tensors: List of tensors to stack
    - axis: Axis for stacking
    
    Returns:
    Stacked tensor
    """

def split(x, indices_or_sections, axis=0):
    """
    Split tensor into sub-tensors.
    
    Parameters:
    - x: Input tensor
    - indices_or_sections: Split points or number of sections
    - axis: Axis along which to split
    
    Returns:
    List of sub-tensors
    """

def unstack(x, num=None, axis=0):
    """
    Unstack tensor along axis.
    
    Parameters:
    - x: Input tensor
    - num: Number of tensors to unstack
    - axis: Axis to unstack along
    
    Returns:
    List of unstacked tensors
    """

Statistical Operations

Functions for computing statistics and aggregations over tensors.

def mean(x, axis=None, keepdims=False):
    """
    Compute mean along axis.
    
    Parameters:
    - x: Input tensor
    - axis: Axis to compute mean over
    - keepdims: Whether to keep dimensions
    
    Returns:
    Mean of tensor
    """

def sum(x, axis=None, keepdims=False):
    """Compute sum along axis."""

def prod(x, axis=None, keepdims=False):
    """Compute product along axis."""

def max(x, axis=None, keepdims=False):
    """Compute maximum along axis."""

def min(x, axis=None, keepdims=False):
    """Compute minimum along axis."""

def std(x, axis=None, keepdims=False):
    """Compute standard deviation along axis."""

def var(x, axis=None, keepdims=False):
    """Compute variance along axis."""

def argmax(x, axis=None):
    """
    Indices of maximum values.
    
    Parameters:
    - x: Input tensor
    - axis: Axis to find argmax over
    
    Returns:
    Indices of maximum values
    """

def argmin(x, axis=None):
    """Indices of minimum values."""

def argsort(x, axis=-1):
    """
    Indices that would sort tensor.
    
    Parameters:
    - x: Input tensor
    - axis: Axis to sort along
    
    Returns:
    Indices that would sort tensor
    """

def sort(x, axis=-1):
    """Sort tensor along axis."""

def top_k(x, k, sorted=True):
    """
    Find top k values and indices.
    
    Parameters:
    - x: Input tensor
    - k: Number of top values to find
    - sorted: Whether to sort output
    
    Returns:
    Tuple of (values, indices)
    """

Logical and Comparison Operations

Functions for logical operations and comparisons between tensors.

def equal(x1, x2):
    """Element-wise equality."""

def not_equal(x1, x2):
    """Element-wise inequality."""

def less(x1, x2):
    """Element-wise less than."""

def less_equal(x1, x2):
    """Element-wise less than or equal."""

def greater(x1, x2):
    """Element-wise greater than."""

def greater_equal(x1, x2):
    """Element-wise greater than or equal."""

def logical_and(x1, x2):
    """Element-wise logical AND."""

def logical_or(x1, x2):
    """Element-wise logical OR."""

def logical_not(x):
    """Element-wise logical NOT."""

def logical_xor(x1, x2):
    """Element-wise logical XOR."""

def all(x, axis=None, keepdims=False):
    """
    Test whether all elements are True.
    
    Parameters:
    - x: Input tensor
    - axis: Axis to test over
    - keepdims: Whether to keep dimensions
    
    Returns:
    Boolean result
    """

def any(x, axis=None, keepdims=False):
    """Test whether any elements are True."""

def where(condition, x1, x2):
    """
    Select elements from x1 or x2 based on condition.
    
    Parameters:
    - condition: Boolean condition tensor
    - x1: Tensor to select from when condition is True
    - x2: Tensor to select from when condition is False
    
    Returns:
    Selected elements
    """

Neural Network Operations

Specialized operations commonly used in neural networks.

def relu(x):
    """
    Rectified Linear Unit activation.
    
    Parameters:
    - x: Input tensor
    
    Returns:
    ReLU activated tensor
    """

def relu6(x):
    """ReLU capped at 6."""

def elu(x, alpha=1.0):
    """Exponential Linear Unit."""

def selu(x):
    """Scaled Exponential Linear Unit."""

def gelu(x, approximate=False):
    """Gaussian Error Linear Unit."""

def sigmoid(x):
    """Sigmoid activation."""

def softmax(x, axis=-1):
    """
    Softmax activation.
    
    Parameters:
    - x: Input tensor
    - axis: Axis to apply softmax over
    
    Returns:
    Softmax activated tensor
    """

def log_softmax(x, axis=-1):
    """Log-softmax activation."""

def softplus(x):
    """Softplus activation."""

def swish(x):
    """Swish activation."""

def conv(inputs, kernel, strides=1, padding='valid', data_format=None, 
         dilation_rate=1):
    """
    N-dimensional convolution.
    
    Parameters:
    - inputs: Input tensor
    - kernel: Convolution kernel
    - strides: Stride of convolution
    - padding: Padding mode
    - data_format: Data format
    - dilation_rate: Dilation rate
    
    Returns:
    Convolution output
    """

def conv_transpose(inputs, kernel, strides, padding='valid', output_padding=None,
                   data_format=None, dilation_rate=1):
    """Transposed convolution (deconvolution)."""

def depthwise_conv(inputs, kernel, strides=1, padding='valid', data_format=None,
                   dilation_rate=1):
    """Depthwise convolution."""

def max_pool(inputs, pool_size, strides=None, padding='valid', data_format=None):
    """
    Max pooling operation.
    
    Parameters:
    - inputs: Input tensor
    - pool_size: Size of pooling window
    - strides: Stride of pooling
    - padding: Padding mode
    - data_format: Data format
    
    Returns:
    Max pooled output
    """

def average_pool(inputs, pool_size, strides=None, padding='valid', data_format=None):
    """Average pooling operation."""

def batch_normalization(x, mean, variance, offset=None, scale=None, 
                       variance_epsilon=1e-3):
    """
    Batch normalization.
    
    Parameters:
    - x: Input tensor
    - mean: Mean for normalization
    - variance: Variance for normalization
    - offset: Offset parameter (beta)
    - scale: Scale parameter (gamma)
    - variance_epsilon: Small constant for numerical stability
    
    Returns:
    Normalized tensor
    """

def layer_normalization(x, scale=None, offset=None, axis=-1, epsilon=1e-3):
    """Layer normalization."""

def dropout(x, rate, noise_shape=None, seed=None):
    """
    Dropout regularization.
    
    Parameters:
    - x: Input tensor
    - rate: Dropout rate
    - noise_shape: Shape of dropout mask
    - seed: Random seed
    
    Returns:
    Dropout applied tensor
    """

Control Flow Operations

Operations for conditional execution and loops in computational graphs.

def cond(pred, true_fn, false_fn):
    """
    Conditional execution.
    
    Parameters:
    - pred: Boolean predicate
    - true_fn: Function to execute if True
    - false_fn: Function to execute if False
    
    Returns:
    Result of executed function
    """

def switch(branch_index, branch_fns, default=None):
    """
    Switch between multiple functions.
    
    Parameters:
    - branch_index: Index of branch to execute
    - branch_fns: List of functions
    - default: Default function if index out of range
    
    Returns:
    Result of selected function
    """

def while_loop(cond, body, loop_vars, maximum_iterations=None):
    """
    While loop operation.
    
    Parameters:
    - cond: Loop condition function
    - body: Loop body function
    - loop_vars: Initial loop variables
    - maximum_iterations: Maximum number of iterations
    
    Returns:
    Final loop variables
    """

def fori_loop(lower, upper, body_fun, init_val):
    """
    For loop with index range.
    
    Parameters:
    - lower: Lower bound of range
    - upper: Upper bound of range  
    - body_fun: Function to execute in loop
    - init_val: Initial value
    
    Returns:
    Final accumulated value
    """

def scan(f, init, xs, length=None, reverse=False, unroll=1):
    """
    Scan operation (sequential application).
    
    Parameters:
    - f: Function to apply
    - init: Initial carry value
    - xs: Input sequence
    - length: Length of sequence
    - reverse: Whether to scan in reverse
    - unroll: Unroll factor for optimization
    
    Returns:
    Tuple of (final_carry, outputs)
    """

Usage Examples

Basic Tensor Operations

import keras.ops as ops
import numpy as np

# Create tensors
x = ops.array([1, 2, 3, 4, 5])
y = ops.array([2, 3, 4, 5, 6])

# Basic arithmetic
sum_result = ops.add(x, y)
product = ops.multiply(x, y)
power_result = ops.power(x, 2)

# Mathematical functions
exp_result = ops.exp(x)
sin_result = ops.sin(x)
sqrt_result = ops.sqrt(x)

print(f"Sum: {ops.convert_to_numpy(sum_result)}")
print(f"Product: {ops.convert_to_numpy(product)}")

Matrix Operations

import keras.ops as ops

# Create matrices
A = ops.array([[1, 2], [3, 4]], dtype='float32')
B = ops.array([[5, 6], [7, 8]], dtype='float32')

# Matrix multiplication
C = ops.matmul(A, B)

# Linear algebra operations
det_A = ops.det(A)
inv_A = ops.inv(A)
norm_A = ops.norm(A)

print(f"Matrix product:\n{ops.convert_to_numpy(C)}")
print(f"Determinant: {ops.convert_to_numpy(det_A)}")

Neural Network Operations

import keras.ops as ops

# Activation functions
x = ops.array([-2, -1, 0, 1, 2], dtype='float32')

relu_out = ops.relu(x)
sigmoid_out = ops.sigmoid(x)
softmax_out = ops.softmax(ops.array([[1, 2, 3], [4, 5, 6]], dtype='float32'))

print(f"ReLU: {ops.convert_to_numpy(relu_out)}")
print(f"Sigmoid: {ops.convert_to_numpy(sigmoid_out)}")
print(f"Softmax:\n{ops.convert_to_numpy(softmax_out)}")

# Convolution operation
inputs = ops.random.normal((1, 28, 28, 1))  # Batch of 1 image
kernel = ops.random.normal((3, 3, 1, 32))   # 3x3 kernel, 32 filters

conv_out = ops.conv(inputs, kernel, strides=1, padding='same')
print(f"Convolution output shape: {ops.shape(conv_out)}")

Statistical Operations

import keras.ops as ops

# Create sample data
data = ops.random.normal((100, 10))  # 100 samples, 10 features

# Compute statistics
data_mean = ops.mean(data, axis=0)  # Mean along samples
data_std = ops.std(data, axis=0)    # Std along samples
data_max = ops.max(data, axis=1)    # Max along features
data_argmax = ops.argmax(data, axis=1)  # Index of max

print(f"Mean shape: {ops.shape(data_mean)}")
print(f"Std shape: {ops.shape(data_std)}")
print(f"Max shape: {ops.shape(data_max)}")

Custom Operations with Control Flow

import keras.ops as ops

def custom_activation(x, threshold=0.0):
    """Custom activation function using control flow."""
    return ops.where(
        ops.greater(x, threshold),
        x,  # Keep positive values
        ops.multiply(x, 0.1)  # Scale negative values
    )

def iterative_process(init_val, num_steps):
    """Example of iterative computation."""
    def step_fn(i, val):
        return val + ops.sin(ops.cast(i, 'float32'))
    
    return ops.fori_loop(0, num_steps, step_fn, init_val)

# Use custom operations
x = ops.array([-2, -1, 0, 1, 2], dtype='float32')
activated = custom_activation(x, threshold=0.5)

result = iterative_process(ops.array(0.0), 10)
print(f"Final result: {ops.convert_to_numpy(result)}")

Install with Tessl CLI

npx tessl i tessl/pypi-keras

docs

activations.md

applications.md

data-utils.md

index.md

initializers.md

layers.md

models.md

operations.md

random.md

regularizers.md

saving.md

training.md

tile.json