Python library for easily interacting with trained machine learning models
—
Quality
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Functions for loading models from Hugging Face, integrating with existing web frameworks, working with external APIs, and connecting Gradio applications with other systems and services.
Functions for loading and integrating with Hugging Face models, datasets, and Spaces directly into Gradio interfaces.
def load(
name,
src=None,
api_key=None,
alias=None,
**kwargs
):
"""
Load external Hugging Face models and Spaces into Gradio interfaces.
Parameters:
- name: Model or Space name (e.g., "microsoft/DialoGPT-medium", "huggingface/CodeBERTa-small-v1")
- src: Source type ("huggingface", "github", or None for auto-detection)
- api_key: Hugging Face API key for private models
- alias: Alternative name for the loaded interface
- kwargs: Additional parameters for model loading
Returns:
- Gradio interface object for the loaded model/Space
"""
def load_chat(
name,
src=None,
api_key=None,
**kwargs
):
"""
Load chat interfaces from external sources with conversational UI.
Parameters:
- name: Chat model or Space name
- src: Source type ("huggingface" or None)
- api_key: API key for private models
- kwargs: Additional chat configuration parameters
Returns:
- ChatInterface object configured for the loaded model
"""
def load_openapi(
url,
api_key=None,
headers=None,
**kwargs
):
"""
Load and integrate OpenAPI specifications as Gradio interfaces.
Parameters:
- url: URL to OpenAPI specification (JSON or YAML)
- api_key: API key for authenticated endpoints
- headers: Additional HTTP headers for API requests
- kwargs: Additional configuration options
Returns:
- Gradio interface for interacting with the API
"""Usage examples:
import gradio as gr
# Load a Hugging Face model
text_generator = gr.load("microsoft/DialoGPT-medium")
# Load a Hugging Face Space
image_classifier = gr.load("huggingface/image-classification-demo")
# Load with custom configuration
private_model = gr.load(
"organization/private-model",
api_key="hf_your_token_here",
src="huggingface"
)
# Load chat interface
chatbot = gr.load_chat("microsoft/DialoGPT-medium")
# Load OpenAPI service
weather_api = gr.load_openapi(
"https://api.openweathermap.org/data/2.5/openapi.json",
api_key="your_weather_api_key"
)
# Combine loaded interfaces
with gr.Blocks() as demo:
gr.Markdown("# Multi-Model Demo")
with gr.Tab("Text Generation"):
text_generator.render()
with gr.Tab("Image Classification"):
image_classifier.render()
with gr.Tab("Chat"):
chatbot.render()
demo.launch()Functions and classes for mounting Gradio applications within existing web frameworks and handling HTTP requests.
def mount_gradio_app(
app,
gradio_app,
path="/gradio",
gradio_api_url="http://localhost:7860",
**kwargs
):
"""
Mount Gradio applications in existing web frameworks (FastAPI, Flask, etc.).
Parameters:
- app: Existing web application instance (FastAPI, Flask app, etc.)
- gradio_app: Gradio interface or Blocks instance to mount
- path: URL path where Gradio app will be mounted
- gradio_api_url: URL for Gradio API endpoints
- kwargs: Additional mounting configuration
Returns:
- Modified web application with Gradio integration
"""
class Request:
def __init__(self, request):
"""
HTTP request wrapper for accessing request data in Gradio functions.
Parameters:
- request: Underlying HTTP request object
Attributes:
- headers: Request headers dictionary
- query_params: Query parameters dictionary
- path_params: Path parameters dictionary
- cookies: Request cookies
- client: Client information (IP, host, etc.)
- method: HTTP method (GET, POST, etc.)
- url: Full request URL
"""
@property
def headers(self):
"""Access request headers."""
@property
def query_params(self):
"""Access query parameters."""
@property
def cookies(self):
"""Access request cookies."""
@property
def client(self):
"""Access client information."""
def get_header(self, name, default=None):
"""Get specific header value."""
class Header:
def __init__(self, name, value):
"""
HTTP header utility for request/response handling.
Parameters:
- name: Header name
- value: Header value
"""Usage examples:
import gradio as gr
from fastapi import FastAPI
import uvicorn
# Create FastAPI app
app = FastAPI()
# Create Gradio interface
def greet(name, request: gr.Request):
client_ip = request.client.host
user_agent = request.headers.get("user-agent", "Unknown")
return f"Hello {name}! IP: {client_ip}, Browser: {user_agent}"
gradio_app = gr.Interface(
fn=greet,
inputs="text",
outputs="text"
)
# Mount Gradio in FastAPI
app = gr.mount_gradio_app(app, gradio_app, path="/demo")
# Add regular FastAPI routes
@app.get("/")
def root():
return {"message": "FastAPI with Gradio demo at /demo"}
@app.get("/api/status")
def status():
return {"status": "running", "demo_url": "/demo"}
# Run combined application
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)Flask integration example:
import gradio as gr
from flask import Flask
# Create Flask app
flask_app = Flask(__name__)
# Create Gradio interface
def process_data(data, request: gr.Request):
session_id = request.cookies.get("session_id", "anonymous")
return f"Processed {data} for session {session_id}"
gradio_app = gr.Interface(
fn=process_data,
inputs="text",
outputs="text"
)
# Mount Gradio in Flask
flask_app = gr.mount_gradio_app(flask_app, gradio_app, path="/ml")
# Add Flask routes
@flask_app.route("/")
def home():
return '<h1>Flask + Gradio</h1><a href="/ml">ML Demo</a>'
@flask_app.route("/api/info")
def info():
return {"app": "Flask + Gradio", "ml_endpoint": "/ml"}
flask_app.run(debug=True, port=5000)Classes and utilities for handling OAuth authentication and user management in Gradio applications.
class OAuthProfile:
def __init__(self, profile_data):
"""
OAuth user profile data representation.
Parameters:
- profile_data: Raw profile data from OAuth provider
Attributes:
- username: User's username or handle
- name: User's display name
- email: User's email address
- avatar_url: URL to user's profile picture
- profile_url: URL to user's profile page
- provider: OAuth provider name (e.g., "github", "google")
"""
@property
def username(self):
"""Get username from profile."""
@property
def name(self):
"""Get display name from profile."""
@property
def email(self):
"""Get email from profile."""
@property
def avatar_url(self):
"""Get avatar image URL."""
class OAuthToken:
def __init__(self, token_data):
"""
OAuth token management and validation.
Parameters:
- token_data: Raw token data from OAuth flow
Attributes:
- access_token: OAuth access token
- refresh_token: OAuth refresh token (if available)
- expires_at: Token expiration timestamp
- scope: Token permissions scope
"""
@property
def access_token(self):
"""Get access token."""
@property
def is_expired(self):
"""Check if token is expired."""
def refresh(self):
"""Refresh the access token if refresh token available."""Usage examples:
import gradio as gr
def authenticated_function(input_data, request: gr.Request):
# Check if user is authenticated
auth_header = request.headers.get("authorization")
if not auth_header:
return "Please log in to use this feature"
# Extract user info (simplified example)
user_info = validate_token(auth_header)
if user_info:
return f"Hello {user_info['name']}, processing: {input_data}"
else:
return "Invalid authentication"
def oauth_callback(code, state):
# Handle OAuth callback
token = exchange_code_for_token(code)
profile = get_user_profile(token.access_token)
return f"Welcome {profile.name}!"
# Interface with authentication
auth_demo = gr.Interface(
fn=authenticated_function,
inputs="text",
outputs="text",
title="Authenticated Demo"
)
# OAuth login interface
oauth_demo = gr.Interface(
fn=oauth_callback,
inputs=["text", "text"], # code, state
outputs="text",
title="OAuth Login"
)Integration with Model Context Protocol for AI agent workflows and advanced model interactions.
# MCP module for Model Context Protocol integration
import gradio.mcp as mcp
# MCP integration functions and classes would be defined here
# This is a placeholder for the MCP functionalityUsage example:
import gradio as gr
import gradio.mcp as mcp
def mcp_enabled_function(input_data):
# Use MCP for enhanced AI interactions
context = mcp.get_context()
response = mcp.process_with_context(input_data, context)
return response
mcp_demo = gr.Interface(
fn=mcp_enabled_function,
inputs="text",
outputs="text",
title="MCP Enhanced Interface"
)Integrating Gradio with API gateways and microservice architectures:
import gradio as gr
import requests
def proxy_to_service(input_data, request: gr.Request):
# Forward request to microservice
api_gateway_url = "https://api.example.com/ml-service"
headers = {
"Authorization": request.headers.get("authorization"),
"Content-Type": "application/json"
}
response = requests.post(
api_gateway_url,
json={"input": input_data},
headers=headers
)
return response.json()["result"]
# Create interface that proxies to external services
proxy_demo = gr.Interface(
fn=proxy_to_service,
inputs="text",
outputs="text"
)Connecting Gradio interfaces with databases for data persistence:
import gradio as gr
import sqlite3
def save_and_process(user_input, request: gr.Request):
# Save input to database
conn = sqlite3.connect("app_data.db")
cursor = conn.cursor()
cursor.execute(
"INSERT INTO user_inputs (input, ip_address, timestamp) VALUES (?, ?, datetime('now'))",
(user_input, request.client.host)
)
conn.commit()
conn.close()
# Process and return result
result = process_data(user_input)
return result
def get_history(request: gr.Request):
# Retrieve user's history
conn = sqlite3.connect("app_data.db")
cursor = conn.cursor()
cursor.execute(
"SELECT input, timestamp FROM user_inputs WHERE ip_address = ? ORDER BY timestamp DESC LIMIT 10",
(request.client.host,)
)
history = cursor.fetchall()
conn.close()
return str(history)
with gr.Blocks() as demo:
input_text = gr.Textbox(label="Input")
result_output = gr.Textbox(label="Result")
history_output = gr.Textbox(label="Your History")
process_btn = gr.Button("Process")
history_btn = gr.Button("View History")
process_btn.click(save_and_process, input_text, result_output)
history_btn.click(get_history, outputs=history_output)Setting up webhooks for external system notifications:
import gradio as gr
from fastapi import FastAPI, BackgroundTasks
import json
app = FastAPI()
webhook_data = {"latest": None}
def process_webhook_data():
if webhook_data["latest"]:
return f"Latest webhook: {json.dumps(webhook_data['latest'], indent=2)}"
return "No webhook data received"
def clear_webhook_data():
webhook_data["latest"] = None
return "Webhook data cleared"
# Webhook endpoint
@app.post("/webhook")
async def receive_webhook(data: dict, background_tasks: BackgroundTasks):
webhook_data["latest"] = data
return {"status": "received"}
# Gradio interface for viewing webhook data
webhook_viewer = gr.Interface(
fn=process_webhook_data,
inputs=None,
outputs="text",
title="Webhook Data Viewer"
)
# Mount Gradio interface
app = gr.mount_gradio_app(app, webhook_viewer, path="/viewer")Integrating with cloud services (AWS, GCP, Azure):
import gradio as gr
import boto3
from io import BytesIO
def upload_to_s3(file, bucket_name="my-gradio-uploads"):
# Upload file to AWS S3
s3_client = boto3.client('s3')
try:
s3_client.upload_file(file.name, bucket_name, file.orig_name)
return f"File uploaded successfully: s3://{bucket_name}/{file.orig_name}"
except Exception as e:
return f"Upload failed: {str(e)}"
def analyze_with_rekognition(image):
# Use AWS Rekognition for image analysis
rekognition = boto3.client('rekognition')
with open(image.name, 'rb') as img_file:
response = rekognition.detect_labels(
Image={'Bytes': img_file.read()},
MaxLabels=10
)
labels = [label['Name'] for label in response['Labels']]
return f"Detected: {', '.join(labels)}"
# Cloud-integrated interface
cloud_demo = gr.Interface(
fn=[upload_to_s3, analyze_with_rekognition],
inputs=["file", "image"],
outputs=["text", "text"],
title="Cloud Integration Demo"
)Install with Tessl CLI
npx tessl i tessl/pypi-gradio