The Deep Learning framework to train, deploy, and ship AI products Lightning fast.
—
Support for various hardware accelerators including CPU, CUDA GPUs, Apple Metal Performance Shaders, and Google TPUs with automatic device detection and optimization.
CPU-based training for development, debugging, and CPU-only environments.
class CPUAccelerator:
def setup_device(self, device: torch.device) -> None:
"""Set up CPU device for training."""
def get_device_stats(self, device: torch.device) -> Dict[str, Any]:
"""Get CPU device statistics."""
@staticmethod
def parse_devices(devices: Union[int, str, List[int]]) -> int:
"""Parse CPU device specification."""
@staticmethod
def get_parallel_devices(devices: int) -> List[torch.device]:
"""Get list of CPU devices for parallel training."""
@staticmethod
def auto_device_count() -> int:
"""Get number of available CPU cores."""
@staticmethod
def is_available() -> bool:
"""Check if CPU is available."""NVIDIA GPU acceleration with CUDA support for high-performance training.
class CUDAAccelerator:
def setup_device(self, device: torch.device) -> None:
"""Set up CUDA device for training."""
def get_device_stats(self, device: torch.device) -> Dict[str, Any]:
"""Get CUDA device statistics including memory usage."""
@staticmethod
def parse_devices(devices: Union[int, str, List[int]]) -> List[int]:
"""Parse CUDA device specification."""
@staticmethod
def get_parallel_devices(devices: List[int]) -> List[torch.device]:
"""Get list of CUDA devices for parallel training."""
@staticmethod
def auto_device_count() -> int:
"""Get number of available CUDA devices."""
@staticmethod
def is_available() -> bool:
"""Check if CUDA is available."""
def find_usable_cuda_devices(num_gpus: int = -1) -> List[int]:
"""
Find usable CUDA devices.
Args:
num_gpus: Number of GPUs to find (-1 for all)
Returns:
List of usable CUDA device IDs
"""Apple Silicon GPU acceleration for M1/M2 Macs.
class MPSAccelerator:
def setup_device(self, device: torch.device) -> None:
"""Set up MPS device for training."""
def get_device_stats(self, device: torch.device) -> Dict[str, Any]:
"""Get MPS device statistics."""
@staticmethod
def parse_devices(devices: Union[int, str, List[int]]) -> int:
"""Parse MPS device specification."""
@staticmethod
def get_parallel_devices(devices: int) -> List[torch.device]:
"""Get MPS device for training."""
@staticmethod
def auto_device_count() -> int:
"""Get number of available MPS devices."""
@staticmethod
def is_available() -> bool:
"""Check if MPS is available."""Google TPU acceleration using XLA compilation.
class XLAAccelerator:
def setup_device(self, device: torch.device) -> None:
"""Set up XLA device for training."""
def get_device_stats(self, device: torch.device) -> Dict[str, Any]:
"""Get XLA device statistics."""
@staticmethod
def parse_devices(devices: Union[int, str, List[int]]) -> List[int]:
"""Parse XLA device specification."""
@staticmethod
def get_parallel_devices(devices: List[int]) -> List[torch.device]:
"""Get list of XLA devices for parallel training."""
@staticmethod
def auto_device_count() -> int:
"""Get number of available XLA devices."""
@staticmethod
def is_available() -> bool:
"""Check if XLA is available."""Install with Tessl CLI
npx tessl i tessl/pypi-lightning