Python module to run and analyze benchmarks with high precision and statistical rigor
—
Core benchmarking functionality providing the primary interface for executing precise performance measurements. The Runner class manages worker processes, automatic calibration, and statistical validation to ensure reliable benchmark results.
The central interface for benchmark execution with comprehensive configuration options and automatic optimization for different Python implementations.
class Runner:
def __init__(self, values=None, processes=None, loops=0, min_time=0.1,
metadata=None, show_name=True, program_args=None,
add_cmdline_args=None, _argparser=None, warmups=1):
"""
Create a benchmark runner.
Args:
values: Number of values per process (default: 3 for CPython, 10 for PyPy)
processes: Number of worker processes (default: 20 for CPython, 6 for PyPy)
loops: Number of loops per value (0 for auto-calibration)
min_time: Minimum duration per measurement in seconds
metadata: Custom metadata dictionary
show_name: Whether to show benchmark names in output
program_args: Command line arguments for worker processes
add_cmdline_args: Callback for preparing worker arguments
warmups: Number of warmup iterations
"""Direct benchmarking of Python functions with argument passing and automatic loop calibration.
def bench_func(self, name: str, func: callable, *args, **kwargs) -> Benchmark:
"""
Benchmark a Python function.
Args:
name: Benchmark name for identification
func: Function to benchmark
*args: Positional arguments to pass to function
**kwargs: Keyword arguments (inner_loops, metadata supported)
Returns:
Benchmark object with timing results
"""
def bench_time_func(self, name: str, time_func: callable, *args, **kwargs) -> Benchmark:
"""
Benchmark a function that returns elapsed time.
Args:
name: Benchmark name for identification
time_func: Function that returns timing measurements
*args: Positional arguments to pass to function
**kwargs: Keyword arguments (inner_loops, metadata supported)
Returns:
Benchmark object with timing results
"""Specialized benchmarking for asynchronous functions with event loop management.
def bench_async_func(self, name: str, func: callable, *args, **kwargs) -> Benchmark:
"""
Benchmark an async function.
Args:
name: Benchmark name for identification
func: Async function to benchmark
*args: Positional arguments to pass to function
**kwargs: Keyword arguments (inner_loops, metadata, loop_factory supported)
Returns:
Benchmark object with timing results
"""timeit-style code benchmarking with setup and teardown phases.
def timeit(self, name: str, stmt=None, setup="pass", teardown="pass",
inner_loops=None, duplicate=None, metadata=None, globals=None) -> Benchmark:
"""
Benchmark Python code statements (like timeit module).
Args:
name: Benchmark name for identification
stmt: Code statement to benchmark
setup: Setup code executed once before timing
teardown: Teardown code executed after timing
inner_loops: Number of inner loop iterations
duplicate: Number of statement duplicates per loop
metadata: Custom metadata for this benchmark
globals: Global namespace for code execution
Returns:
Benchmark object with timing results
"""Benchmarking of external processes and command-line tools.
def bench_command(self, name: str, command: list) -> Benchmark:
"""
Benchmark an external command.
Args:
name: Benchmark name for identification
command: Command and arguments as list (e.g., ['python', '-c', 'print("hello")'])
Returns:
Benchmark object with timing results
"""Command-line argument parsing and configuration management.
def parse_args(self, args=None) -> argparse.Namespace:
"""
Parse command line arguments for benchmark configuration.
Args:
args: Argument list (defaults to sys.argv)
Returns:
Parsed arguments namespace
"""import pyperf
def fibonacci(n):
if n < 2:
return n
return fibonacci(n-1) + fibonacci(n-2)
runner = pyperf.Runner()
benchmark = runner.bench_func('fibonacci_20', fibonacci, 20)
print(f"Mean: {benchmark.mean():.6f} ± {benchmark.stdev():.6f} seconds")import pyperf
runner = pyperf.Runner()
# Simple statement timing
benchmark = runner.timeit('list_comprehension',
stmt='[i*2 for i in range(1000)]')
# With setup code
benchmark = runner.timeit('dict_lookup',
stmt='d[key]',
setup='d = {i: i*2 for i in range(1000)}; key = 500')import pyperf
import asyncio
async def async_task(n):
await asyncio.sleep(0.001) # Simulate async work
return sum(range(n))
runner = pyperf.Runner()
benchmark = runner.bench_async_func('async_task', async_task, 100)import pyperf
runner = pyperf.Runner()
benchmark = runner.bench_command('python_version',
['python', '-c', 'import sys; print(sys.version)'])import pyperf
# Rigorous benchmarking with more samples
runner = pyperf.Runner(values=10, processes=10, min_time=0.2)
# Quick rough measurements
runner = pyperf.Runner(values=1, processes=3, min_time=0.05)
# Custom metadata
runner = pyperf.Runner(metadata={'test_environment': 'production'})
benchmark = runner.bench_func('test', some_function,
metadata={'algorithm': 'quicksort'})Install with Tessl CLI
npx tessl i tessl/pypi-pyperf