Hugging Face Space operations including duplication for private use, hardware configuration, and deployment as Discord bots.
Create private copies of public Hugging Face Spaces for unlimited usage and custom configurations.
@classmethod
def duplicate(
cls,
from_id: str,
to_id: str | None = None,
hf_token: str | None = None,
private: bool = True,
hardware: Literal[
"cpu-basic",
"cpu-upgrade",
"t4-small",
"t4-medium",
"a10g-small",
"a10g-large",
"a100-large",
] | SpaceHardware | None = None,
secrets: dict[str, str] | None = None,
sleep_timeout: int = 5,
max_workers: int = 40,
verbose: bool = True,
) -> Client:
"""
Duplicate a Hugging Face Space for private use.
Parameters:
- from_id: Source Space identifier (e.g., "abidlabs/whisper")
- to_id: Target Space name (auto-generated if None)
- hf_token: Hugging Face token for authentication
- private: Whether the duplicated Space should be private (default: True)
- hardware: Hardware configuration for the duplicated Space
- secrets: Environment secrets to set in the duplicated Space
- sleep_timeout: Time to wait between status checks during duplication
- max_workers: Maximum thread workers for the new client
- verbose: Whether to print status messages
Returns:
Client instance connected to the duplicated Space
Raises:
- AuthenticationError: If HF token is invalid or missing for private operations
- ValueError: If duplication fails or Space not found
"""Deploy Gradio applications as Discord bots with configurable API endpoints and interaction patterns.
def deploy_discord(
self,
discord_bot_token: str | None = None,
api_names: list[str | tuple[str, str]] | None = None,
to_id: str | None = None,
hf_token: str | None = None,
private: bool = False,
) -> None:
"""
Deploy the Gradio app as a Discord bot.
Parameters:
- discord_bot_token: Discord bot token for authentication (optional if explained in space)
- api_names: List of API endpoint names or (name, display_name) tuples to expose via Discord
- to_id: Target Space name for the Discord bot deployment (auto-generated if None)
- hf_token: Hugging Face token for Space operations
- private: Whether the space hosting the discord bot should be private
Raises:
- ValueError: If Discord token is invalid or deployment fails
- AuthenticationError: If HF token is required but invalid
"""# Hardware options for Space duplication
SpaceHardware = Literal[
"cpu-basic",
"cpu-upgrade",
"t4-small",
"t4-medium",
"a10g-small",
"a10g-large",
"a100-large"
]from gradio_client import Client
# Duplicate a public Space for unlimited usage
client = Client.duplicate("abidlabs/whisper-large-v2")
# The client is now connected to your private copy
result = client.predict("audio.wav")
print(result)from gradio_client import Client
# Duplicate with custom configuration
client = Client.duplicate(
from_id="abidlabs/whisper-large-v2",
to_id="my-whisper-copy",
hf_token="hf_your_token_here",
hardware="t4-small",
private=True,
timeout=600 # 10 minute timeout
)
# Use the duplicated Space
result = client.predict("my_audio.wav")from gradio_client import Client
# Duplicate with GPU hardware for faster processing
client = Client.duplicate(
"abidlabs/stable-diffusion",
hardware="a10g-small", # Use A10G GPU
hf_token="hf_your_token"
)
# Generate images with GPU acceleration
image = client.predict(
"a beautiful landscape",
api_name="/generate"
)from gradio_client import Client
# Connect to a Gradio app
client = Client("abidlabs/whisper-large-v2")
# Deploy as Discord bot
client.deploy_discord(
discord_bot_token="your_discord_bot_token",
api_names=["/predict"], # Expose the prediction endpoint
to_id="whisper-discord-bot"
)
# The bot is now deployed and can be invited to Discord serversfrom gradio_client import Client
# Connect to previously duplicated Space
# If Space already exists, it will connect instead of duplicating again
client = Client.duplicate(
"abidlabs/whisper",
to_id="my-existing-whisper",
exist_ok=True # Connect to existing if already duplicated
)
# Check if this is a new duplication or existing connection
print(f"Connected to: {client.src}")from gradio_client import Client
# Duplicate with basic CPU for cost efficiency
client = Client.duplicate(
"username/text-processing-space",
hardware="cpu-basic", # Most cost-effective option
hf_token="hf_your_token"
)
# Process text data efficiently
result = client.predict("input text", api_name="/process")
# Close client to stop billing when done
client.close()from gradio_client import Client
import time
# Duplicate for batch processing
client = Client.duplicate(
"abidlabs/image-classifier",
hardware="t4-medium",
hf_token="hf_your_token"
)
# Process multiple images
image_files = ["img1.jpg", "img2.jpg", "img3.jpg"]
results = []
for image_file in image_files:
result = client.predict(image_file, api_name="/classify")
results.append(result)
time.sleep(0.1) # Rate limiting
print(f"Processed {len(results)} images")
# Clean up
client.close()