Python library for Runpod API and serverless worker SDK.
—
Comprehensive pod lifecycle management for creating, monitoring, controlling, and cleaning up GPU and CPU cloud instances on the RunPod platform. Includes template management, container registry authentication, and hardware discovery capabilities.
Complete pod lifecycle operations from creation through termination, with support for various cloud types, GPU configurations, and persistent storage options.
def create_pod(
name: str,
image_name: str,
gpu_type_id: str = None,
cloud_type: str = "ALL",
support_public_ip: bool = True,
start_ssh: bool = True,
data_center_id: str = None,
country_code: str = None,
gpu_count: int = 1,
volume_in_gb: int = 0,
container_disk_in_gb: int = None,
min_vcpu_count: int = 1,
min_memory_in_gb: int = 1,
docker_args: str = "",
ports: str = None,
volume_mount_path: str = "/runpod-volume",
env: dict = None,
template_id: str = None,
network_volume_id: str = None,
allowed_cuda_versions: list = None,
min_download: int = None,
min_upload: int = None,
instance_id: str = None
) -> dict:
"""
Create a new GPU or CPU pod instance.
Parameters:
- name: Display name for the pod
- image_name: Docker image to run (e.g., "runpod/pytorch:1.13.1-py3.10-cuda11.8.0-devel-ubuntu22.04")
- gpu_type_id: GPU type identifier (e.g., "NVIDIA GeForce RTX 3070"), optional for CPU pods
- cloud_type: Cloud type ("ALL", "SECURE", "COMMUNITY")
- support_public_ip: Whether to assign public IP
- start_ssh: Whether to enable SSH access
- data_center_id: Specific data center to deploy in
- country_code: Country code preference for deployment
- gpu_count: Number of GPUs to allocate
- volume_in_gb: Persistent volume size in GB (0 for no volume)
- container_disk_in_gb: Container disk size in GB (None for default)
- min_vcpu_count: Minimum CPU cores required
- min_memory_in_gb: Minimum RAM in GB required
- docker_args: Additional Docker arguments
- ports: Port mapping configuration (e.g., "8888/http,22/tcp")
- volume_mount_path: Where to mount the persistent volume
- env: Environment variables dictionary
- template_id: Pod template ID to use
- network_volume_id: Network volume ID to attach
- allowed_cuda_versions: List of allowed CUDA versions
- min_download: Minimum download speed requirement (Mbps)
- min_upload: Minimum upload speed requirement (Mbps)
- instance_id: Specific instance ID to use
Returns:
dict: Pod creation response with pod ID and configuration
"""
def get_pod(pod_id: str) -> dict:
"""
Get details of a specific pod by ID.
Parameters:
- pod_id: Unique pod identifier
Returns:
dict: Pod details including status, configuration, and runtime info
"""
def get_pods() -> list:
"""
Get list of all user's pods.
Returns:
list: List of pod dictionaries with basic information
"""
def stop_pod(pod_id: str) -> dict:
"""
Stop a running pod without terminating it.
Parameters:
- pod_id: Pod identifier to stop
Returns:
dict: Operation result with updated pod status
"""
def resume_pod(pod_id: str) -> dict:
"""
Resume a stopped pod.
Parameters:
- pod_id: Pod identifier to resume
Returns:
dict: Operation result with updated pod status
"""
def terminate_pod(pod_id: str) -> dict:
"""
Permanently terminate a pod and release all resources.
Parameters:
- pod_id: Pod identifier to terminate
Returns:
dict: Termination confirmation
"""Retrieve information about available GPU types, pricing, and real-time availability across different cloud regions.
def get_gpu(gpu_id: str) -> dict:
"""
Get details of a specific GPU type.
Parameters:
- gpu_id: GPU type identifier
Returns:
dict: GPU specifications including memory, compute capability, and pricing
"""
def get_gpus() -> list:
"""
Get list of all available GPU types.
Returns:
list: GPU type information including availability and pricing
"""Create and manage pod templates for consistent deployments with predefined configurations.
def create_template(
name: str,
image_name: str,
is_public: bool = False,
readme: str = None,
docker_args: str = None,
container_disk_in_gb: int = 10,
volume_in_gb: int = 0,
volume_mount_path: str = "/workspace",
ports: str = None,
env: dict = None,
start_jupyter: bool = True,
start_ssh: bool = True
) -> dict:
"""
Create a new pod template for reusable configurations.
Parameters:
- name: Template name
- image_name: Docker image for the template
- is_public: Whether template is publicly available
- readme: Template description and usage instructions
- docker_args: Docker run arguments
- container_disk_in_gb: Default container disk size
- volume_in_gb: Default persistent volume size
- volume_mount_path: Default volume mount path
- ports: Default port configuration
- env: Default environment variables
- start_jupyter: Default Jupyter server setting
- start_ssh: Default SSH access setting
Returns:
dict: Created template information
"""Manage authentication credentials for private container registries to use custom Docker images.
def create_container_registry_auth(
name: str,
username: str,
password: str,
registry: str = "docker.io"
) -> dict:
"""
Create container registry authentication credentials.
Parameters:
- name: Friendly name for the auth configuration
- username: Registry username
- password: Registry password or access token
- registry: Registry URL (defaults to Docker Hub)
Returns:
dict: Created authentication configuration
"""
def update_container_registry_auth(
auth_id: str,
name: str = None,
username: str = None,
password: str = None,
registry: str = None
) -> dict:
"""
Update existing container registry authentication.
Parameters:
- auth_id: Authentication configuration ID
- name: New friendly name (optional)
- username: New username (optional)
- password: New password (optional)
- registry: New registry URL (optional)
Returns:
dict: Updated authentication configuration
"""
def delete_container_registry_auth(auth_id: str) -> dict:
"""
Delete container registry authentication credentials.
Parameters:
- auth_id: Authentication configuration ID to delete
Returns:
dict: Deletion confirmation
"""import runpod
# Set credentials
runpod.set_credentials("your-api-key")
# Create a PyTorch GPU pod
pod = runpod.create_pod(
name="pytorch-training",
image_name="runpod/pytorch:1.13.1-py3.10-cuda11.8.0-devel-ubuntu22.04",
gpu_type_id="NVIDIA GeForce RTX 3070",
cloud_type="SECURE",
container_disk_in_gb=20,
volume_in_gb=50,
env={"WANDB_API_KEY": "your-wandb-key"}
)
print(f"Pod created: {pod['id']}")import runpod
import time
# Get pod details
pod_info = runpod.get_pod("your-pod-id")
print(f"Pod status: {pod_info['desiredStatus']}")
# Stop pod temporarily
runpod.stop_pod("your-pod-id")
time.sleep(30) # Wait for stop to complete
# Resume pod
runpod.resume_pod("your-pod-id")
# List all pods
pods = runpod.get_pods()
for pod in pods:
print(f"{pod['name']}: {pod['desiredStatus']}")
# Terminate when done
runpod.terminate_pod("your-pod-id")import runpod
# Create a reusable template
template = runpod.create_template(
name="ml-training-template",
image_name="runpod/pytorch:latest",
container_disk_in_gb=30,
volume_in_gb=100,
env={"CUDA_VISIBLE_DEVICES": "0"},
start_jupyter=True,
readme="Template for ML training with PyTorch"
)
# Use template to create pod
pod = runpod.create_pod(
name="training-session-1",
image_name="placeholder", # Will be overridden by template
gpu_type_id="NVIDIA GeForce RTX 3070",
template_id=template['id']
)import runpod
# Set up private registry auth
auth = runpod.create_container_registry_auth(
name="my-private-registry",
username="myuser",
password="mytoken",
registry="registry.company.com"
)
# Create pod with private image
pod = runpod.create_pod(
name="private-image-pod",
image_name="registry.company.com/myuser/custom-ml-image:latest",
gpu_type_id="NVIDIA GeForce RTX 3070"
)Install with Tessl CLI
npx tessl i tessl/pypi-runpod