CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-timm

PyTorch Image Models library providing state-of-the-art computer vision models, training scripts, and utilities for image classification tasks

Overview
Eval results
Files

layers.mddocs/

Layers and Components

Extensive collection of neural network building blocks including activations, attention mechanisms, convolutions, normalization layers, and specialized components for vision architectures.

Capabilities

Layer Factory Functions

Factory functions for creating various neural network components with consistent interfaces.

def create_conv2d(
    in_channels: int,
    out_channels: int,
    kernel_size: int,
    stride: int = 1,
    padding: str = '',
    dilation: int = 1,
    groups: int = 1,
    bias: bool = True,
    **kwargs
) -> torch.nn.Module:
    """
    Create 2D convolution layer with advanced padding options.

    Args:
        in_channels: Number of input channels
        out_channels: Number of output channels
        kernel_size: Convolution kernel size
        stride: Convolution stride
        padding: Padding mode ('', 'same', 'valid', or integer)
        dilation: Convolution dilation
        groups: Number of convolution groups
        bias: Include bias parameter
        **kwargs: Additional convolution arguments

    Returns:
        Configured convolution layer
    """

def create_norm_layer(
    layer_name: str,
    num_features: int,
    eps: float = 1e-5,
    **kwargs
) -> torch.nn.Module:
    """
    Create normalization layer by name.

    Args:
        layer_name: Normalization type ('batchnorm', 'layernorm', 'groupnorm', etc.)
        num_features: Number of features to normalize
        eps: Epsilon for numerical stability
        **kwargs: Layer-specific arguments

    Returns:
        Configured normalization layer
    """

def create_act_layer(
    layer_name: str,
    inplace: bool = True,
    **kwargs
) -> torch.nn.Module:
    """
    Create activation layer by name.

    Args:
        layer_name: Activation type ('relu', 'gelu', 'swish', etc.)
        inplace: Use inplace operations when possible
        **kwargs: Activation-specific arguments

    Returns:
        Configured activation layer
    """

def create_pool2d(
    pool_type: str,
    kernel_size: int,
    stride: int = None,
    **kwargs
) -> torch.nn.Module:
    """
    Create 2D pooling layer.

    Args:
        pool_type: Pooling type ('avg', 'max', 'adaptiveavg', etc.)
        kernel_size: Pooling kernel size
        stride: Pooling stride
        **kwargs: Pool-specific arguments

    Returns:
        Configured pooling layer
    """

Global Configuration Functions

Functions to control global layer behavior for scriptability and exportability.

def set_scriptable(enable: bool = True) -> None:
    """
    Set global scriptable mode for layers.

    Args:
        enable: Enable scriptable mode for TorchScript compatibility
    """

def set_exportable(enable: bool = True) -> None:
    """
    Set global exportable mode for layers.

    Args:
        enable: Enable exportable mode for ONNX/TensorRT compatibility
    """

def set_fused_attn(enable: bool = True) -> None:
    """
    Set fused attention mode globally.

    Args:
        enable: Enable fused attention implementations
    """

def is_scriptable() -> bool:
    """
    Check if layers are in scriptable mode.

    Returns:
        True if scriptable mode is enabled
    """

def is_exportable() -> bool:
    """
    Check if layers are in exportable mode.

    Returns:
        True if exportable mode is enabled
    """

Activation Functions

Core Activation Classes

class Swish(torch.nn.Module):
    """
    Swish activation function (x * sigmoid(x)).

    Args:
        inplace: Use inplace operations
    """
    
    def __init__(self, inplace: bool = False): ...

class Mish(torch.nn.Module):
    """
    Mish activation function (x * tanh(softplus(x))).

    Args:
        inplace: Use inplace operations
    """
    
    def __init__(self, inplace: bool = False): ...

class GELU(torch.nn.Module):
    """
    Gaussian Error Linear Unit activation.

    Args:
        approximate: Use tanh approximation
    """
    
    def __init__(self, approximate: str = 'none'): ...

class HardSwish(torch.nn.Module):
    """Hard Swish activation function."""
    
    def __init__(self, inplace: bool = False): ...

class HardSigmoid(torch.nn.Module):
    """Hard Sigmoid activation function."""
    
    def __init__(self, inplace: bool = False): ...

class PReLU(torch.nn.Module):
    """
    Parametric ReLU activation.

    Args:
        num_parameters: Number of learnable parameters
        init: Initial value for parameters
    """
    
    def __init__(self, num_parameters: int = 1, init: float = 0.25): ...

Functional Activations

def swish(x: torch.Tensor, inplace: bool = False) -> torch.Tensor:
    """Functional Swish activation."""

def mish(x: torch.Tensor, inplace: bool = False) -> torch.Tensor:
    """Functional Mish activation."""

def hard_swish(x: torch.Tensor, inplace: bool = False) -> torch.Tensor:
    """Functional Hard Swish activation."""

def hard_sigmoid(x: torch.Tensor, inplace: bool = False) -> torch.Tensor:
    """Functional Hard Sigmoid activation."""

Attention Mechanisms

Attention Classes

class Attention(torch.nn.Module):
    """
    Multi-head self-attention layer.

    Args:
        dim: Input dimension
        num_heads: Number of attention heads
        qkv_bias: Include bias in QKV projection
        qk_norm: Apply normalization to Q and K
        attn_drop: Attention dropout rate
        proj_drop: Projection dropout rate
        norm_layer: Normalization layer type
    """
    
    def __init__(
        self,
        dim: int,
        num_heads: int = 8,
        qkv_bias: bool = False,
        qk_norm: bool = False,
        attn_drop: float = 0.0,
        proj_drop: float = 0.0,
        norm_layer: torch.nn.Module = torch.nn.LayerNorm
    ): ...

class AttentionPool2d(torch.nn.Module):
    """
    Attention-based 2D pooling layer.

    Args:
        in_features: Input feature dimension
        out_features: Output feature dimension
        embed_dim: Embedding dimension
        num_heads: Number of attention heads
        qkv_bias: Include bias in QKV projection
    """
    
    def __init__(
        self,
        in_features: int,
        out_features: int = None,
        embed_dim: int = None,
        num_heads: int = 8,
        qkv_bias: bool = True
    ): ...

class SEModule(torch.nn.Module):
    """
    Squeeze-and-Excitation module.

    Args:
        channels: Number of input channels
        rd_ratio: Reduction ratio for squeeze operation
        rd_channels: Explicit reduction channels
        rd_divisor: Divisor for reduction channels
        bias: Include bias in FC layers
        act_layer: Activation layer type
        gate_layer: Gate activation layer type
    """
    
    def __init__(
        self,
        channels: int,
        rd_ratio: float = 1./16,
        rd_channels: int = None,
        rd_divisor: int = 8,
        bias: bool = True,
        act_layer: torch.nn.Module = torch.nn.ReLU,
        gate_layer: torch.nn.Module = torch.nn.Sigmoid
    ): ...

class EcaModule(torch.nn.Module):
    """
    Efficient Channel Attention module.

    Args:
        channels: Number of input channels
        kernel_size: Convolution kernel size for attention
        gamma: Gamma parameter for kernel size calculation
        beta: Beta parameter for kernel size calculation
    """
    
    def __init__(
        self,
        channels: int = None,
        kernel_size: int = 3,
        gamma: int = 2,
        beta: int = 1
    ): ...

Convolution Layers

Advanced Convolution Classes

class Conv2dSame(torch.nn.Conv2d):
    """
    2D convolution with SAME padding mode.

    Args:
        in_channels: Number of input channels
        out_channels: Number of output channels
        kernel_size: Convolution kernel size
        stride: Convolution stride
        padding: Padding (ignored, computed for SAME)
        dilation: Convolution dilation
        groups: Number of groups
        bias: Include bias parameter
    """
    
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        kernel_size: int,
        stride: int = 1,
        padding: str = 'SAME',
        dilation: int = 1,
        groups: int = 1,
        bias: bool = True
    ): ...

class ConvNormAct(torch.nn.Module):
    """
    Convolution + Normalization + Activation block.

    Args:
        in_channels: Number of input channels
        out_channels: Number of output channels
        kernel_size: Convolution kernel size
        stride: Convolution stride
        padding: Padding specification
        dilation: Convolution dilation
        groups: Number of groups
        bias: Include convolution bias
        norm_layer: Normalization layer
        act_layer: Activation layer
        drop_layer: Dropout layer
    """
    
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        kernel_size: int = 1,
        stride: int = 1,
        padding: str = '',
        dilation: int = 1,
        groups: int = 1,
        bias: bool = False,
        norm_layer: torch.nn.Module = torch.nn.BatchNorm2d,
        act_layer: torch.nn.Module = torch.nn.ReLU,
        drop_layer: torch.nn.Module = None
    ): ...

class MixedConv2d(torch.nn.Module):
    """
    Mixed depthwise convolution with multiple kernel sizes.

    Args:
        in_channels: Number of input channels
        out_channels: Number of output channels
        kernel_size: List of kernel sizes or single size
        stride: Convolution stride
        padding: Padding specification
        dilation: Convolution dilation
        depthwise: Use depthwise convolution
        **kwargs: Additional convolution arguments
    """
    
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        kernel_size: Union[int, List[int]] = 3,
        stride: int = 1,
        padding: str = '',
        dilation: int = 1,
        depthwise: bool = False,
        **kwargs
    ): ...

Normalization Layers

Normalization Classes

class LayerNorm(torch.nn.Module):
    """
    Layer normalization with optional 2D support.

    Args:
        normalized_shape: Input shape for normalization
        eps: Epsilon for numerical stability
        elementwise_affine: Learn affine parameters
        bias: Include bias parameter
    """
    
    def __init__(
        self,
        normalized_shape: Union[int, List[int], torch.Size],
        eps: float = 1e-5,
        elementwise_affine: bool = True,
        bias: bool = True
    ): ...

class LayerNorm2d(torch.nn.Module):
    """
    2D Layer normalization (channel-wise).

    Args:
        num_channels: Number of channels
        eps: Epsilon for numerical stability
        affine: Learn affine parameters
    """
    
    def __init__(
        self,
        num_channels: int,
        eps: float = 1e-6,
        affine: bool = True
    ): ...

class RmsNorm(torch.nn.Module):
    """
    Root Mean Square normalization.

    Args:
        dim: Normalization dimension
        eps: Epsilon for numerical stability
        bias: Include bias parameter
    """
    
    def __init__(
        self,
        dim: int,
        eps: float = 1e-8,
        bias: bool = False
    ): ...

class BatchNormAct2d(torch.nn.Module):
    """
    BatchNorm + Activation in single layer.

    Args:
        num_features: Number of features
        eps: Epsilon for numerical stability
        momentum: Momentum for running statistics
        affine: Learn affine parameters
        track_running_stats: Track running statistics
        act_layer: Activation layer
        inplace: Use inplace activation
        drop_layer: Dropout layer
    """
    
    def __init__(
        self,
        num_features: int,
        eps: float = 1e-5,
        momentum: float = 0.1,
        affine: bool = True,
        track_running_stats: bool = True,
        act_layer: torch.nn.Module = torch.nn.ReLU,
        inplace: bool = True,
        drop_layer: torch.nn.Module = None
    ): ...

Pooling Layers

Advanced Pooling Classes

class AdaptiveAvgMaxPool2d(torch.nn.Module):
    """
    Adaptive average + max pooling combination.

    Args:
        output_size: Target output size
    """
    
    def __init__(self, output_size: int = 1): ...

class SelectAdaptivePool2d(torch.nn.Module):
    """
    Selectable adaptive pooling (avg, max, avgmax, catavgmax).

    Args:
        output_size: Target output size
        pool_type: Pooling type ('avg', 'max', 'avgmax', 'catavgmax')
        flatten: Flatten output
    """
    
    def __init__(
        self,
        output_size: int = 1,
        pool_type: str = 'avg',
        flatten: bool = False
    ): ...

class BlurPool2d(torch.nn.Module):
    """
    Blur pooling for anti-aliasing.

    Args:
        channels: Number of input channels
        filt_size: Filter size
        stride: Pooling stride
    """
    
    def __init__(
        self,
        channels: int,
        filt_size: int = 4,
        stride: int = 2
    ): ...

Embedding Layers

Vision Transformer Embeddings

class PatchEmbed(torch.nn.Module):
    """
    2D image to patch embedding.

    Args:
        img_size: Input image size
        patch_size: Patch size
        in_chans: Number of input channels
        embed_dim: Embedding dimension
        norm_layer: Normalization layer
        flatten: Flatten spatial dimensions
        bias: Include bias in projection
    """
    
    def __init__(
        self,
        img_size: int = 224,
        patch_size: int = 16,
        in_chans: int = 3,
        embed_dim: int = 768,
        norm_layer: torch.nn.Module = None,
        flatten: bool = True,
        bias: bool = True
    ): ...

class HybridEmbed(torch.nn.Module):
    """
    CNN feature map to patch embedding.

    Args:
        backbone: CNN backbone model
        img_size: Input image size
        patch_size: Patch size for embedding
        feature_size: Feature map size from backbone
        in_chans: Number of input channels
        embed_dim: Embedding dimension
    """
    
    def __init__(
        self,
        backbone: torch.nn.Module,
        img_size: int = 224,
        patch_size: int = 1,
        feature_size: int = None,
        in_chans: int = 3,
        embed_dim: int = 768
    ): ...

Regularization Layers

Dropout Variants

class DropPath(torch.nn.Module):
    """
    Stochastic depth (drop path) regularization.

    Args:
        drop_prob: Drop probability
        scale_by_keep: Scale by keep probability
    """
    
    def __init__(
        self,
        drop_prob: float = 0.0,
        scale_by_keep: bool = True
    ): ...

class DropBlock2d(torch.nn.Module):
    """
    DropBlock regularization for 2D feature maps.

    Args:
        drop_rate: Drop rate
        block_size: Size of dropped blocks
    """
    
    def __init__(
        self,
        drop_rate: float = 0.1,
        block_size: int = 7
    ): ...

class PatchDropout(torch.nn.Module):
    """
    Patch dropout for vision transformers.

    Args:
        prob: Dropout probability
        num_prefix_tokens: Number of prefix tokens to preserve
        ordered: Use ordered dropout
        return_indices: Return dropped indices
    """
    
    def __init__(
        self,
        prob: float = 0.5,
        num_prefix_tokens: int = 1,
        ordered: bool = False,
        return_indices: bool = False
    ): ...

MLP and Feed-Forward Layers

MLP Variants

class Mlp(torch.nn.Module):
    """
    Multi-layer perceptron.

    Args:
        in_features: Input feature dimension
        hidden_features: Hidden layer dimension
        out_features: Output feature dimension
        act_layer: Activation layer
        norm_layer: Normalization layer
        bias: Include bias parameters
        drop: Dropout rate
        use_conv: Use 1x1 convolution instead of linear
    """
    
    def __init__(
        self,
        in_features: int,
        hidden_features: int = None,
        out_features: int = None,
        act_layer: torch.nn.Module = torch.nn.GELU,
        norm_layer: torch.nn.Module = None,
        bias: bool = True,
        drop: float = 0.0,
        use_conv: bool = False
    ): ...

class GluMlp(torch.nn.Module):
    """
    MLP with Gated Linear Unit (GLU) activation.

    Args:
        in_features: Input feature dimension
        hidden_features: Hidden layer dimension
        out_features: Output feature dimension
        act_layer: Activation layer for gate
        norm_layer: Normalization layer
        bias: Include bias parameters
        drop: Dropout rate
        gate_last: Apply gate after activation
    """
    
    def __init__(
        self,
        in_features: int,
        hidden_features: int = None,
        out_features: int = None,
        act_layer: torch.nn.Module = torch.nn.Sigmoid,
        norm_layer: torch.nn.Module = None,
        bias: bool = True,
        drop: float = 0.0,
        gate_last: bool = True
    ): ...

Utility Functions

Helper Functions

def to_ntuple(n: int) -> Callable:
    """
    Create function to convert input to n-tuple.

    Args:
        n: Tuple length

    Returns:
        Function that converts input to n-tuple
    """

def make_divisible(
    v: int,
    divisor: int = 8,
    min_value: int = None,
    round_limit: float = 0.9
) -> int:
    """
    Make value divisible by divisor.

    Args:
        v: Input value
        divisor: Divisor value
        min_value: Minimum allowed value
        round_limit: Rounding threshold

    Returns:
        Value divisible by divisor
    """

def get_padding(
    kernel_size: int,
    stride: int = 1,
    dilation: int = 1
) -> int:
    """
    Calculate padding for convolution.

    Args:
        kernel_size: Convolution kernel size
        stride: Convolution stride
        dilation: Convolution dilation

    Returns:
        Required padding
    """

Weight Initialization

def trunc_normal_(
    tensor: torch.Tensor,
    mean: float = 0.0,
    std: float = 1.0,
    a: float = -2.0,
    b: float = 2.0
) -> torch.Tensor:
    """
    Initialize tensor with truncated normal distribution.

    Args:
        tensor: Tensor to initialize
        mean: Mean of distribution
        std: Standard deviation
        a: Lower truncation bound
        b: Upper truncation bound

    Returns:
        Initialized tensor
    """

def variance_scaling_(
    tensor: torch.Tensor,
    scale: float = 1.0,
    mode: str = 'fan_in',
    distribution: str = 'normal'
) -> torch.Tensor:
    """
    Initialize tensor with variance scaling.

    Args:
        tensor: Tensor to initialize
        scale: Scaling factor
        mode: Computation mode ('fan_in', 'fan_out', 'fan_avg')
        distribution: Distribution type ('normal', 'uniform')

    Returns:
        Initialized tensor
    """

def lecun_normal_(tensor: torch.Tensor) -> torch.Tensor:
    """
    Initialize tensor with LeCun normal initialization.

    Args:
        tensor: Tensor to initialize

    Returns:
        Initialized tensor
    """

Types

from typing import Optional, Union, List, Dict, Callable, Any, Tuple
import torch

# Layer types
LayerType = torch.nn.Module
ActivationType = torch.nn.Module
NormType = torch.nn.Module

# Padding types
PadType = Union[str, int, Tuple[int, ...]]

# Dimension types  
DimType = Union[int, Tuple[int, ...]]

# Format enumeration
class Format:
    NCHW = 'NCHW'
    NHWC = 'NHWC' 
    NCL = 'NCL'
    NLC = 'NLC'

Install with Tessl CLI

npx tessl i tessl/pypi-timm

docs

data.md

features.md

index.md

layers.md

models.md

training.md

utils.md

tile.json