CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-tensorflow

An end-to-end open source platform for machine learning

Pending
Overview
Eval results
Files

nn.mddocs/

Neural Network Operations

Core neural network operations including activations, convolutions, pooling, normalization, and loss functions. These operations provide the fundamental building blocks for constructing and training neural networks.

Capabilities

Activation Functions

Non-linear activation functions that introduce non-linearity into neural networks.

def relu(features, name=None):
    """
    Computes rectified linear: max(features, 0).
    
    Parameters:
    - features: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8, int64, bfloat16, uint16, half, uint32, uint64
    - name: A name for the operation
    
    Returns:
    A Tensor. Has the same type as features
    """

def relu6(features, name=None):
    """
    Computes Rectified Linear 6: min(max(features, 0), 6).
    
    Parameters:
    - features: A Tensor with type float, double, int32, uint8, int16, or int8
    - name: A name for the operation
    
    Returns:
    A Tensor with the same type as features
    """

def elu(features, name=None):
    """
    Computes exponential linear: exp(features) - 1 if < 0, features otherwise.
    
    Parameters:
    - features: A Tensor. Must be one of the following types: half, bfloat16, float32, float64
    - name: A name for the operation
    
    Returns:
    A Tensor. Has the same type as features
    """

def sigmoid(x, name=None):
    """
    Computes sigmoid of x element-wise.
    
    Parameters:
    - x: A Tensor with type float16, float32, float64, complex64, or complex128
    - name: A name for the operation
    
    Returns:
    A Tensor with the same type as x
    """

def tanh(x, name=None):
    """
    Computes hyperbolic tangent of x element-wise.
    
    Parameters:
    - x: A Tensor. Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128
    - name: A name for the operation
    
    Returns:
    A Tensor. Has the same type as x
    """

def softmax(logits, axis=None, name=None):
    """
    Computes softmax activations.
    
    Parameters:
    - logits: A non-empty Tensor. Must be one of the following types: half, bfloat16, float32, float64
    - axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension
    - name: A name for the operation
    
    Returns:
    A Tensor. Has the same type and shape as logits
    """

def log_softmax(logits, axis=None, name=None):
    """
    Computes log softmax activations.
    
    Parameters:
    - logits: A non-empty Tensor. Must be one of the following types: half, bfloat16, float32, float64
    - axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension
    - name: A name for the operation
    
    Returns:
    A Tensor. Has the same type and shape as logits
    """

def leaky_relu(features, alpha=0.2, name=None):
    """
    Compute the Leaky ReLU activation function.
    
    Parameters:
    - features: A Tensor representing preactivation values. Must be one of the following types: float16, float32, float64, int32, int64
    - alpha: Slope of the activation function at x < 0
    - name: A name for the operation
    
    Returns:
    The activation value
    """

def gelu(features, approximate=False, name=None):
    """
    Compute the Gaussian Error Linear Unit (GELU) activation function.
    
    Parameters:
    - features: A Tensor representing preactivation values
    - approximate: An optional bool. Defaults to False. Whether to enable approximation
    - name: A name for the operation
    
    Returns:
    A Tensor with the same type as features
    """

def swish(features, name=None):
    """
    Computes the Swish activation function: features * sigmoid(features).
    
    Parameters:
    - features: A Tensor representing preactivation values
    - name: A name for the operation
    
    Returns:
    The activation value
    """

Convolution Operations

Convolution operations for processing spatial data like images.

def conv2d(input, filters, strides, padding, use_cudnn_on_gpu=True, data_format="NHWC", 
           dilations=[1,1,1,1], name=None):
    """
    Computes a 2-D convolution given 4-D input and filter tensors.
    
    Parameters:
    - input: A Tensor. Must be one of the following types: half, bfloat16, float32, float64
    - filters: A Tensor. Must have the same type as input
    - strides: An int or list of ints that has length 1, 2 or 4
    - padding: Either the string "SAME" or "VALID" indicating the type of padding algorithm to use
    - use_cudnn_on_gpu: An optional bool. Defaults to True. Whether to use cuDNN on GPU when available
    - data_format: An optional string from: "NHWC", "NCHW". Defaults to "NHWC"
    - dilations: A list of ints. Defaults to [1, 1, 1, 1]. The dilation factor for each dimension of input
    - name: A name for the operation
    
    Returns:
    A Tensor. Has the same type as input
    """

def conv2d_transpose(input, filters, output_shape, strides, padding="SAME",
                     data_format="NHWC", dilations=None, name=None):
    """
    The transpose of conv2d.
    
    Parameters:
    - input: A 4-D Tensor of type float and shape [batch, height, width, in_channels] for NHWC data format
    - filters: A 4-D Tensor with the same type as input and shape [height, width, output_channels, in_channels]
    - output_shape: A 1-D Tensor representing the output shape of the deconvolution op
    - strides: An int or list of ints that has length 1, 2 or 4
    - padding: A string, either 'VALID' or 'SAME'
    - data_format: A string. 'NHWC' and 'NCHW' are supported
    - dilations: An int or list of ints that has length 1, 2 or 4, defaults to 1
    - name: Optional name for the returned tensor
    
    Returns:
    A Tensor with the same type as input
    """

def depthwise_conv2d(input, filter, strides, padding, data_format=None,
                     dilations=None, name=None):
    """
    Depthwise 2-D convolution.
    
    Parameters:
    - input: 4-D with shape according to data_format
    - filter: 4-D with shape [filter_height, filter_width, in_channels, channel_multiplier]
    - strides: 1-D of size 4. The stride of the sliding window for each dimension of input
    - padding: Controls how to pad the image before applying the convolution
    - data_format: The data format for input. Either "NHWC" (default) or "NCHW"
    - dilations: 1-D of size 2. The dilation rate in which we sample input values
    - name: A name for this operation
    
    Returns:
    A 4-D Tensor with shape according to data_format
    """

def separable_conv2d(input, depthwise_filter, pointwise_filter, strides,
                     padding, data_format=None, dilations=None, name=None):
    """
    2-D convolution with separable filters.
    
    Parameters:
    - input: 4-D Tensor with shape according to data_format
    - depthwise_filter: 4-D Tensor with shape [filter_height, filter_width, in_channels, channel_multiplier]
    - pointwise_filter: 4-D Tensor with shape [1, 1, channel_multiplier * in_channels, out_channels]
    - strides: 1-D of size 4. The stride of the sliding window for each dimension of input
    - padding: Controls how to pad the image before applying the convolution
    - data_format: The data format for input. Either "NHWC" (default) or "NCHW"
    - dilations: 1-D of size 2. The dilation rate in which we sample input values
    - name: A name for this operation
    
    Returns:
    A 4-D Tensor with shape according to data_format
    """

Pooling Operations

Pooling operations for downsampling and feature extraction.

def max_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None):
    """
    Performs the max pooling on the input.
    
    Parameters:
    - input: A 4-D Tensor of the format specified by data_format
    - ksize: An int or list of ints that has length 1, 2 or 4
    - strides: An int or list of ints that has length 1, 2 or 4
    - padding: Either the string "SAME" or "VALID" indicating the type of padding algorithm to use
    - data_format: A string. 'NHWC' and 'NCHW' are supported
    - name: Optional name for the operation
    
    Returns:
    A Tensor of format specified by data_format
    """

def avg_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None):
    """
    Performs the average pooling on the input.
    
    Parameters:
    - input: A 4-D Tensor of shape [batch, height, width, channels] and type float32, float64, qint8, quint8, or qint32
    - ksize: An int or list of ints that has length 1, 2 or 4
    - strides: An int or list of ints that has length 1, 2 or 4
    - padding: A string, either 'VALID' or 'SAME'
    - data_format: A string. 'NHWC' and 'NCHW' are supported
    - name: Optional name for the operation
    
    Returns:
    A Tensor with the same type as input
    """

def global_max_pool2d(input, data_format="NHWC", name=None):
    """
    Performs global max pooling on the input.
    
    Parameters:
    - input: A 4-D Tensor of the format specified by data_format
    - data_format: A string. 'NHWC' and 'NCHW' are supported
    - name: Optional name for the operation
    
    Returns:
    A Tensor of format specified by data_format
    """

def global_avg_pool2d(input, data_format="NHWC", name=None):
    """
    Performs global average pooling on the input.
    
    Parameters:
    - input: A 4-D Tensor of the format specified by data_format
    - data_format: A string. 'NHWC' and 'NCHW' are supported
    - name: Optional name for the operation
    
    Returns:
    A Tensor of format specified by data_format
    """

Normalization

Normalization operations for training stability and performance.

def batch_normalization(x, mean, variance, offset, scale, variance_epsilon, name=None):
    """
    Batch normalization.
    
    Parameters:
    - x: Input Tensor
    - mean: A mean Tensor
    - variance: A variance Tensor
    - offset: An offset Tensor, often denoted β in equations, or None
    - scale: A scale Tensor, often denoted γ in equations, or None
    - variance_epsilon: A small float number to avoid dividing by 0
    - name: A name for this operation
    
    Returns:
    the normalized, scaled, offset tensor
    """

def layer_normalization(inputs, begin_norm_axis=1, begin_params_axis=-1, name=None):
    """
    Applies layer normalization.
    
    Parameters:
    - inputs: A tensor with 2 or more dimensions, where the first dimension has batch_size
    - begin_norm_axis: The first normalization dimension: normalization will be performed along dimensions begin_norm_axis : rank(inputs)
    - begin_params_axis: Part of the standard interface, unused
    - name: A name for this operation
    
    Returns:
    A normalized Tensor with the same shape as inputs
    """

def local_response_normalization(input, depth_radius=5, bias=1, alpha=1, beta=0.5, name=None):
    """
    Local Response Normalization.
    
    Parameters:
    - input: A Tensor. Must be one of the following types: half, bfloat16, float32
    - depth_radius: An optional int. Defaults to 5. 0-D. Half-width of the 1-D normalization window
    - bias: An optional float. Defaults to 1. An offset (usually positive to avoid dividing by 0)
    - alpha: An optional float. Defaults to 1. A scale factor, usually positive
    - beta: An optional float. Defaults to 0.5. An exponent
    - name: A name for the operation
    
    Returns:
    A Tensor. Has the same type as input
    """

def l2_normalize(x, axis=None, epsilon=1e-12, name=None):
    """
    Normalizes along dimension axis using an L2 norm.
    
    Parameters:
    - x: A Tensor
    - axis: Dimension along which to normalize. A scalar or a vector of integers
    - epsilon: A lower bound value for the norm. Will use sqrt(epsilon) as the divisor if norm < sqrt(epsilon)
    - name: A name for this operation
    
    Returns:
    A Tensor with the same shape as x
    """

Loss Functions

Loss functions for training neural networks.

def softmax_cross_entropy_with_logits(labels, logits, axis=-1, name=None):
    """
    Computes softmax cross entropy between logits and labels.
    
    Parameters:
    - labels: Each vector along the class dimension should hold a valid probability distribution
    - logits: Per-label activations, typically a linear output
    - axis: The class dimension. Defaulted to -1 which is the last dimension
    - name: A name for the operation
    
    Returns:
    A Tensor that contains the softmax cross entropy loss
    """

def sparse_softmax_cross_entropy_with_logits(labels, logits, name=None):
    """
    Computes sparse softmax cross entropy between logits and labels.
    
    Parameters:
    - labels: Tensor of shape [...] and dtype int32 or int64
    - logits: Per-label activations of shape [..., num_classes] and dtype float16, float32, or float64
    - name: A name for the operation
    
    Returns:
    A Tensor of the same shape as labels and of the same type as logits with the softmax cross entropy loss
    """

def sigmoid_cross_entropy_with_logits(labels, logits, name=None):
    """
    Computes sigmoid cross entropy given logits.
    
    Parameters:
    - labels: A Tensor of the same type and shape as logits
    - logits: A Tensor of type float32 or float64
    - name: A name for the operation
    
    Returns:
    A Tensor of the same shape as logits with the componentwise logistic losses
    """

def l2_loss(t, name=None):
    """
    Computes half the L2 norm of a tensor without the sqrt.
    
    Parameters:
    - t: A Tensor. Must be one of the following types: half, bfloat16, float32, float64
    - name: A name for the operation
    
    Returns:
    A Tensor. Has the same type as t
    """

def mean_squared_error(y_true, y_pred):
    """
    Computes the mean squared error between labels and predictions.
    
    Parameters:
    - y_true: Ground truth values
    - y_pred: The predicted values
    
    Returns:
    Mean squared error values
    """

def mean_absolute_error(y_true, y_pred):
    """
    Computes the mean absolute error between labels and predictions.
    
    Parameters:
    - y_true: Ground truth values
    - y_pred: The predicted values
    
    Returns:
    Mean absolute error values
    """

Dropout and Regularization

Operations for regularization and preventing overfitting.

def dropout(x, rate, noise_shape=None, seed=None, training=None, name=None):
    """
    Computes dropout: randomly sets elements to zero to prevent overfitting.
    
    Parameters:
    - x: A floating point tensor
    - rate: A scalar Tensor with the same type as x. The probability that each element is discarded
    - noise_shape: A 1-D integer Tensor, representing the shape for randomly generated keep/drop flags
    - seed: A Python integer. Used to create random seeds
    - training: Either a Python boolean, or a TensorFlow boolean scalar tensor
    - name: A name for this operation
    
    Returns:
    A Tensor of the same shape of x
    """

def spatial_dropout(x, rate, data_format="channels_last", name=None):
    """
    Spatial 2D version of Dropout.
    
    Parameters:
    - x: A 4D tensor
    - rate: Float between 0 and 1. Fraction of the input units to drop
    - data_format: 'channels_first' or 'channels_last'
    - name: A name for this operation
    
    Returns:
    A tensor of the same shape as x
    """

Usage Examples

import tensorflow as tf

# Activation functions
x = tf.constant([-2.0, -1.0, 0.0, 1.0, 2.0])
relu_out = tf.nn.relu(x)           # [0.0, 0.0, 0.0, 1.0, 2.0]
sigmoid_out = tf.nn.sigmoid(x)     # [0.119, 0.269, 0.5, 0.731, 0.881]
tanh_out = tf.nn.tanh(x)          # [-0.964, -0.762, 0.0, 0.762, 0.964]

logits = tf.constant([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]])
softmax_out = tf.nn.softmax(logits)  # [[0.09, 0.244, 0.665], [0.09, 0.244, 0.665]]

# Convolution operations
input_data = tf.random.normal([1, 32, 32, 3])  # Batch, Height, Width, Channels
filters = tf.random.normal([5, 5, 3, 64])      # Height, Width, In_channels, Out_channels
conv_out = tf.nn.conv2d(input_data, filters, strides=[1, 1, 1, 1], padding='SAME')

# Pooling operations
max_pool_out = tf.nn.max_pool2d(conv_out, ksize=2, strides=2, padding='VALID')
avg_pool_out = tf.nn.avg_pool2d(conv_out, ksize=2, strides=2, padding='VALID')

# Loss functions
y_true = tf.constant([0, 1, 2])
y_pred = tf.constant([[0.1, 0.8, 0.1], [0.2, 0.7, 0.1], [0.1, 0.2, 0.7]])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred)

# Dropout
training_data = tf.random.normal([32, 128])
dropped_out = tf.nn.dropout(training_data, rate=0.5, training=True)

Install with Tessl CLI

npx tessl i tessl/pypi-tensorflow

docs

core.md

data.md

distribute.md

image.md

index.md

keras.md

math.md

nn.md

saved-model.md

tile.json