A pytest fixture for benchmarking code that automatically calibrates test runs for accurate performance measurements.
—
Aspect-oriented benchmarking allows you to benchmark existing functions without modifying the test code structure. This is useful for benchmarking third-party libraries, existing functions, or when you want to benchmark multiple function calls within a single test.
def weave(self, target, **kwargs) -> None:
"""
Apply benchmarking to a target function using aspect-oriented programming.
Args:
target: The function, method, or object to benchmark
**kwargs: Additional arguments passed to aspectlib.weave()
Raises:
ImportError: If aspectlib is not installed
FixtureAlreadyUsed: If the fixture has already been used in this test
"""def patch(self, target, **kwargs) -> None:
"""
Alias for weave method - applies benchmarking to target function.
Args:
target: The function, method, or object to benchmark
**kwargs: Additional arguments passed to aspectlib.weave()
Raises:
ImportError: If aspectlib is not installed
FixtureAlreadyUsed: If the fixture has already been used in this test
"""@pytest.fixture
def benchmark_weave(benchmark) -> callable:
"""
Shortcut fixture that provides direct access to benchmark.weave.
Returns:
callable: The benchmark.weave method
"""Aspect-oriented benchmarking requires the aspectlib package:
pip install pytest-benchmark[aspect]
# or
pip install aspectlibimport math
def test_math_operations(benchmark):
# Benchmark all calls to math.sqrt in this test
benchmark.weave(math.sqrt)
# Now all calls to math.sqrt will be benchmarked
result1 = math.sqrt(16)
result2 = math.sqrt(25)
result3 = math.sqrt(36)
assert result1 == 4.0
assert result2 == 5.0
assert result3 == 6.0class DataProcessor:
def process(self, data):
return sum(x**2 for x in data)
def transform(self, data):
return [x * 2 for x in data]
def test_class_method_weaving(benchmark):
processor = DataProcessor()
# Benchmark the process method
benchmark.weave(processor, 'process')
data = list(range(1000))
result = processor.process(data)
assert result == sum(x**2 for x in range(1000))def test_with_weave_fixture(benchmark_weave):
import json
# Direct access to weave functionality
benchmark_weave(json.dumps)
data = {"key": "value", "numbers": [1, 2, 3, 4, 5]}
result = json.dumps(data)
assert '"key": "value"' in resultdef custom_function(x):
return x * 2 + 1
def another_function(x, y):
return x + y
def test_multiple_weaving(benchmark):
# Note: Only one weave per fixture instance
# This will benchmark the first function called
benchmark.weave(custom_function)
result1 = custom_function(5) # This gets benchmarked
result2 = another_function(3, 4) # This doesn't
assert result1 == 11
assert result2 == 7def test_advanced_weaving(benchmark):
def target_function(a, b, c=10):
return a + b + c
# Pass additional options to aspectlib.weave
benchmark.weave(
target_function,
# aspectlib options
lazy=True, # Lazy weaving
# methods=['__call__'] # Specific methods to weave
)
result = target_function(1, 2, c=3)
assert result == 6import hashlib
def test_hashlib_performance(benchmark):
benchmark.weave(hashlib.md5)
data = b"Hello, World!" * 1000
# All md5() calls in this test will be benchmarked
hash1 = hashlib.md5(data)
hash2 = hashlib.md5(data)
assert hash1.hexdigest() == hash2.hexdigest()class DatabaseConnection:
def __init__(self):
self.queries = []
def execute(self, query):
self.queries.append(query)
return f"Result for: {query}"
def test_database_method_weaving(benchmark):
db = DatabaseConnection()
# Benchmark the execute method
benchmark.weave(db.execute)
# Multiple calls - all benchmarked as one aggregate
results = []
results.append(db.execute("SELECT * FROM users"))
results.append(db.execute("SELECT * FROM products"))
results.append(db.execute("SELECT * FROM orders"))
assert len(results) == 3
assert len(db.queries) == 3# mymodule.py content (example)
def expensive_computation(n):
return sum(i**2 for i in range(n))
# Test file
def test_module_function_weaving(benchmark):
import mymodule
# Weave the module function
benchmark.weave(mymodule.expensive_computation)
result = mymodule.expensive_computation(1000)
expected = sum(i**2 for i in range(1000))
assert result == expecteddef test_weave_single_use(benchmark):
def func1():
return 1
def func2():
return 2
# This works
benchmark.weave(func1)
result1 = func1()
# This would raise FixtureAlreadyUsed
# benchmark.weave(func2) # Error!The weaving is automatically cleaned up after the test completes. No manual cleanup is required.
def test_automatic_cleanup(benchmark):
import math
original_sqrt = math.sqrt
benchmark.weave(math.sqrt)
# Function is woven during test
result = math.sqrt(16)
assert result == 4.0
# After test completes, weaving is automatically removeddef test_weave_error_handling(benchmark):
def failing_function():
raise ValueError("Test error")
benchmark.weave(failing_function)
# Exceptions are properly propagated
with pytest.raises(ValueError):
failing_function()
# Benchmark still captures error state
assert benchmark.has_errorfrom unittest.mock import patch, MagicMock
def test_weave_with_mocks(benchmark):
with patch('requests.get') as mock_get:
mock_get.return_value = MagicMock(status_code=200)
# Can weave mocked functions
benchmark.weave(mock_get)
import requests
response = requests.get('http://example.com')
assert response.status_code == 200
mock_get.assert_called_once()@pytest.fixture
def sample_data():
return list(range(1000))
def test_weave_with_fixtures(benchmark, sample_data):
def process_data(data):
return sum(x**2 for x in data)
benchmark.weave(process_data)
result = process_data(sample_data)
expected = sum(x**2 for x in range(1000))
assert result == expectedInstall with Tessl CLI
npx tessl i tessl/pypi-pytest-benchmark