Python module to run and analyze benchmarks with high precision and statistical rigor
—
System utilities for metadata collection, platform detection, statistical functions, and performance optimization. Includes CPU affinity management, memory tracking, and environment analysis capabilities for comprehensive benchmarking support.
Utility functions for detecting Python implementation characteristics and platform-specific features.
def python_implementation() -> str:
"""
Get Python implementation name.
Returns:
Implementation name (e.g., 'CPython', 'PyPy', 'Jython')
"""
def python_has_jit() -> bool:
"""
Check if Python implementation has JIT compilation.
Returns:
True if implementation includes JIT (e.g., PyPy), False otherwise
"""Re-exported high-precision timer for consistent timing across platforms.
perf_counter: callable
"""
High-precision timer function (re-exported from time.perf_counter).
Returns monotonic time in seconds as float with highest available resolution.
Used internally by PyPerf for all timing measurements.
"""Functions for formatting and displaying benchmark metadata.
def format_metadata(name: str, value) -> str:
"""
Format metadata value for human-readable display.
Args:
name: Metadata field name
value: Metadata value to format
Returns:
Formatted string representation of the metadata value
"""Utility functions for managing benchmark data files and merging results.
def add_runs(filename: str, result):
"""
Add benchmark results to existing JSON file.
Args:
filename: Path to existing benchmark JSON file
result: Benchmark or BenchmarkSuite object to merge into file
Note:
Creates new file if it doesn't exist. Merges runs if benchmark
names match, otherwise adds as new benchmark to suite.
"""
def perf_counter() -> float:
"""
High-precision timer function (re-exported from time.perf_counter).
Returns:
Monotonic time in seconds as float with highest available resolution.
Note:
Maintained for backward compatibility with pyperf 1.7.
Use time.perf_counter() directly in new code.
"""Constants providing version information for the PyPerf package.
VERSION: tuple
"""Version tuple (major, minor, patch) - currently (2, 9, 0)"""
__version__: str
"""Version string - currently '2.9.0'"""Exception classes for different types of errors in PyPerf operations.
# Import from respective modules:
# from pyperf._runner import CLIError
# from pyperf._compare import CompareError
# from pyperf._hooks import HookError
class CLIError(Exception):
"""Exception raised for command-line interface errors."""
class CompareError(Exception):
"""Exception raised during benchmark comparison operations."""
class HookError(Exception):
"""Exception raised by the hook system."""Advanced integration system for extending PyPerf with custom functionality.
def get_hook_names() -> list:
"""
Get list of available hook names.
Returns:
List of available hook names from entry points
"""
def get_selected_hooks(hook_names: list) -> list:
"""
Get specific hooks by name.
Args:
hook_names: List of hook names to retrieve
Returns:
List of hook classes
"""
def instantiate_selected_hooks(hook_names: list) -> list:
"""
Create instances of selected hooks.
Args:
hook_names: List of hook names to instantiate
Returns:
List of instantiated hook objects
"""
# Built-in hooks available through entry points:
# perf_record - Linux perf integration for detailed performance analysis
# pystats - Python internal statistics collection
# _test_hook - Testing and development hook
# Hook usage in CLI:
# python -m pyperf timeit --hook perf_record 'sum(range(100))'
# python -m pyperf timeit --hook pystats 'list(range(100))'Internal constants used throughout PyPerf for consistency and configuration.
# Default measurement unit
DEFAULT_UNIT: str = 'second'
# Valid numeric types for measurements
NUMBER_TYPES: tuple = (int, float)
# Valid metadata value types
METADATA_VALUE_TYPES: tuple = (int, str, float)
# JSON format version for file compatibility
_JSON_VERSION: str = '1.0'
# Metadata fields checked for consistency across runs
_CHECKED_METADATA: tuple = (
'aslr', 'cpu_count', 'cpu_model_name', 'hostname', 'inner_loops',
'name', 'platform', 'python_executable', 'python_implementation',
'python_unicode', 'python_version', 'unit'
)import pyperf
# Check Python implementation
impl = pyperf.python_implementation()
print(f"Running on: {impl}") # e.g., "CPython" or "PyPy"
# Optimize based on JIT availability
if pyperf.python_has_jit():
# PyPy or other JIT implementations
runner = pyperf.Runner(values=10, processes=6)
else:
# CPython - use more processes, fewer values per process
runner = pyperf.Runner(values=3, processes=20)import pyperf
# Use PyPerf's timer directly
start = pyperf.perf_counter()
# ... code to measure ...
end = pyperf.perf_counter()
elapsed = end - start
print(f"Elapsed: {elapsed:.9f} seconds")
# This is the same timer used internally by PyPerf
# for all benchmark measurementsimport pyperf
# Load benchmark and examine metadata
benchmark = pyperf.Benchmark.load('results.json')
metadata = benchmark.get_metadata()
# Format metadata for display
for name, value in metadata.items():
formatted = pyperf.format_metadata(name, value)
print(f"{name}: {formatted}")
# Common metadata fields include:
# - python_version: Python version string
# - python_implementation: Implementation name
# - platform: Operating system and architecture
# - cpu_model_name: CPU model information
# - hostname: System hostname
# - date: Benchmark execution timestampimport pyperf
# Create initial benchmark
runner = pyperf.Runner()
bench1 = runner.timeit('test1', 'sum(range(100))')
bench1.dump('results.json')
# Add more results to the same file
bench2 = runner.timeit('test2', 'list(range(100))')
pyperf.add_runs('results.json', bench2)
# Load the combined results
suite = pyperf.BenchmarkSuite.load('results.json')
print(f"Benchmarks: {suite.get_benchmark_names()}") # ['test1', 'test2']import pyperf
# Check PyPerf version programmatically
print(f"PyPerf version: {pyperf.__version__}") # "2.9.0"
print(f"Version tuple: {pyperf.VERSION}") # (2, 9, 0)
# Version compatibility checking
major, minor, patch = pyperf.VERSION
if major >= 2 and minor >= 9:
print("Using modern PyPerf with latest features")import pyperf
try:
runner = pyperf.Runner()
# This might raise CLIError if CLI arguments are invalid
runner.parse_args(['--invalid-option'])
except pyperf.CLIError as e:
print(f"CLI error: {e}")
try:
# This might raise CompareError if benchmarks are incompatible
bench1 = pyperf.Benchmark.load('results1.json')
bench2 = pyperf.Benchmark.load('results2.json')
bench1.add_runs(bench2) # Different units or metadata
except pyperf.CompareError as e:
print(f"Comparison error: {e}")import pyperf
# Check available hooks
hooks = pyperf.get_hook_names()
print(f"Available hooks: {hooks}")
# Use hooks in Runner (advanced usage)
runner = pyperf.Runner()
# Hooks are typically configured via CLI: --hook perf_record
# Or through environment/configuration files
# Example of what hooks enable:
# - perf_record: Integrates with Linux perf for detailed CPU analysis
# - pystats: Collects Python internal statistics during benchmarking
# - Custom hooks: User-defined extensions for specialized measurementsimport pyperf
import os
# Environment-aware benchmarking
def create_optimized_runner():
"""Create runner optimized for current system."""
# Detect system characteristics
has_jit = pyperf.python_has_jit()
cpu_count = os.cpu_count()
# Optimize parameters
if has_jit:
# JIT implementations need more warmup
return pyperf.Runner(
values=10,
processes=min(6, cpu_count // 2),
warmups=3,
min_time=0.2
)
else:
# CPython benefits from more processes
return pyperf.Runner(
values=3,
processes=min(20, cpu_count),
warmups=1,
min_time=0.1
)
# Use in production
runner = create_optimized_runner()
benchmark = runner.timeit('optimized_test', 'sum(range(1000))')Install with Tessl CLI
npx tessl i tessl/pypi-pyperf