A production-quality pure-Python WSGI server with robust HTTP protocol support and comprehensive configuration options
—
Primary functions for creating and managing WSGI servers, providing both high-level convenience functions and lower-level server instances for advanced control.
The primary entry point for serving WSGI applications with automatic server configuration and startup.
def serve(app, **kw):
"""
Serve a WSGI application using waitress server.
Parameters:
- app: WSGI application callable
- **kw: Configuration parameters (see Adjustments class)
Common Parameters:
- host (str): Hostname to bind to (default: '0.0.0.0')
- port (int): Port to bind to (default: 8080)
- threads (int): Number of worker threads (default: 4)
- unix_socket (str): Unix socket path (Unix only)
- connection_limit (int): Max concurrent connections (default: 100)
- channel_timeout (int): Idle connection timeout (default: 120)
Returns:
None (blocks until server shutdown)
"""Usage example:
from waitress import serve
def my_app(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/html')]
start_response(status, headers)
return [b'<h1>Hello World!</h1>']
# Basic usage
serve(my_app)
# With configuration
serve(my_app, host='127.0.0.1', port=5000, threads=6)
# Unix socket (Linux/macOS)
serve(my_app, unix_socket='/tmp/waitress.sock')Creates server instances for advanced control over server lifecycle and configuration.
def create_server(application, map=None, _start=True, _sock=None, _dispatcher=None, **kw):
"""
Create a WSGI server instance.
Parameters:
- application: WSGI application callable
- map (dict): asyncore socket map (default: None, creates new map)
- _start (bool): Whether to start listening immediately (default: True)
- _sock: Pre-existing socket to use (default: None)
- _dispatcher: Custom task dispatcher (default: None, creates ThreadedTaskDispatcher)
- **kw: Configuration parameters (see Adjustments class)
Returns:
Server instance (TcpWSGIServer, UnixWSGIServer, or MultiSocketServer)
"""Usage example:
from waitress import create_server
def my_app(environ, start_response):
# Your WSGI app here
pass
# Create server without starting
server = create_server(my_app, _start=False, host='localhost', port=8080)
# Custom startup logic
print(f"Starting server on {server.effective_host}:{server.effective_port}")
server.run() # Start the server loop
# Multiple servers
servers = []
for port in [8080, 8081, 8082]:
server = create_server(my_app, port=port)
servers.append(server)Entry point for Paste deployment configurations.
def serve_paste(app, global_conf, **kw):
"""
Paste deployment entry point.
Parameters:
- app: WSGI application
- global_conf: Paste global configuration
- **kw: Waitress configuration parameters
Returns:
int: Exit code (always 0)
"""Used in Paste deployment files:
[server:main]
use = egg:waitress#main
host = 0.0.0.0
port = 6543The server classes returned by create_server() provide direct control over server lifecycle.
class BaseWSGIServer:
"""Base class for all WSGI server implementations."""
def run(self):
"""Start the asyncore event loop (blocking)."""
def close(self):
"""Shutdown the server cleanly."""
def print_listen(self, format_str):
"""Print server listening information."""
class TcpWSGIServer(BaseWSGIServer):
"""TCP/IP socket-based WSGI server."""
effective_host: str # Actual bound hostname
effective_port: int # Actual bound port
class UnixWSGIServer(BaseWSGIServer):
"""Unix domain socket WSGI server (Unix platforms only)."""
effective_host: str # Socket path
class MultiSocketServer:
"""Manages multiple socket servers simultaneously."""
def run(self):
"""Start all managed servers."""
def close(self):
"""Shutdown all managed servers."""Usage example:
from waitress import create_server
server = create_server(my_app, host='0.0.0.0', port=8080)
try:
print(f"Server running on {server.effective_host}:{server.effective_port}")
server.run()
except KeyboardInterrupt:
print("Shutting down...")
server.close()Utility function for profiling server performance and application code.
def profile(cmd, globals, locals, sort_order=None, callers=False):
"""
Run a command under the Python profiler and print results.
Parameters:
- cmd (str): Command string to execute and profile
- globals (dict): Global namespace for command execution
- locals (dict): Local namespace for command execution
- sort_order (tuple): Sort order for profile stats (default: ('cumulative', 'calls', 'time'))
- callers (bool): Print caller information instead of stats (default: False)
Returns:
None (prints profiling output to stdout)
Usage:
Used internally by serve() when _profile=True parameter is passed.
"""Usage example:
import waitress
def my_app(environ, start_response):
# Your WSGI application
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
return [b'Hello World']
# Profile the server startup and initial request handling
waitress.profile(
"waitress.serve(my_app, port=8080)",
globals(),
locals(),
sort_order=('cumulative', 'time'),
callers=False
)Install with Tessl CLI
npx tessl i tessl/pypi-waitress