Extensions to the Python standard library unit testing framework
Control how individual tests run including custom test execution, skip decorators, concurrent test execution, and specialized test suite implementations.
RunTest class for controlling individual test execution lifecycle.
class RunTest:
"""
Controls how individual tests are run.
Provides hooks for custom test setup, execution,
and teardown with support for different execution strategies.
"""
def __init__(self, case, handlers=None):
"""
Create test runner for a test case.
Args:
case: TestCase instance to run
handlers: Optional exception handlers
"""
def run(self, result):
"""
Execute the test case.
Args:
result: TestResult to record results
Returns:
The test result
"""
def setUp(self):
"""
Perform test setup.
Called before test method execution.
Override for custom setup logic.
"""
def tearDown(self):
"""
Perform test teardown.
Called after test method execution.
Override for custom cleanup logic.
"""
def _run_user(self, fn):
"""
Execute user test method with exception handling.
Args:
fn: Test method to execute
"""
def _got_user_exception(self, exc_info):
"""
Handle exceptions from user test code.
Args:
exc_info: Exception information tuple
"""Classes for managing test exceptions and error conditions.
class MultipleExceptions(Exception):
"""
Exception class representing multiple test failures.
Collects multiple exceptions that occurred during
test execution for comprehensive error reporting.
"""
def __init__(self, *args):
"""
Create multiple exceptions container.
Args:
*args: Exception instances or information
"""
def __str__(self):
"""
String representation of all exceptions.
Returns:
str: Formatted list of all exceptions
"""Specialized test suite classes for different execution strategies.
class ConcurrentTestSuite(unittest.TestSuite):
"""
Run tests concurrently across multiple processes/threads.
Provides parallel test execution for improved performance
with proper result aggregation and error handling.
"""
def __init__(self, suite, fork_for_tests):
"""
Create concurrent test suite.
Args:
suite: Test suite to run concurrently
fork_for_tests: Function to create test processes
"""
def run(self, result):
"""
Execute tests concurrently.
Args:
result: TestResult to collect results
Returns:
The aggregated test result
"""
class ConcurrentStreamTestSuite(unittest.TestSuite):
"""
Concurrent test suite using stream results.
Combines concurrent execution with streaming
result reporting for real-time feedback.
"""
def __init__(self, suite, fork_runner):
"""
Create concurrent streaming test suite.
Args:
suite: Test suite to run
fork_runner: Process creation function
"""
def run(self, result):
"""
Execute tests with concurrent streaming.
Args:
result: StreamResult for real-time reporting
Returns:
The stream result
"""
class FixtureSuite(unittest.TestSuite):
"""
Test suite with fixture support for shared setup/teardown.
Manages fixtures that need to be shared across
multiple tests for resource optimization.
"""
def __init__(self, fixture, tests):
"""
Create fixture-based test suite.
Args:
fixture: Fixture instance for setup/teardown
tests: Test cases or suites to run
"""
def run(self, result):
"""
Execute tests with fixture management.
Args:
result: TestResult to collect results
Returns:
The test result
"""Functions for working with test suites and test organization.
def iterate_tests(test_suite_or_case):
"""
Iterate through all individual tests in a test suite.
Recursively flattens test suites to yield individual
test cases for processing or analysis.
Args:
test_suite_or_case: TestSuite or TestCase to iterate
Yields:
TestCase: Individual test cases
"""
def filter_by_ids(test_suite, test_ids):
"""
Filter tests by test IDs.
Args:
test_suite: TestSuite to filter
test_ids: Set of test IDs to include
Returns:
TestSuite: Filtered test suite
"""
def sorted_tests(test_suite, cmp_func=None):
"""
Sort tests by some criteria.
Args:
test_suite: TestSuite to sort
cmp_func: Optional comparison function
Returns:
TestSuite: Sorted test suite
"""Functions for test method decoration and execution control.
def run_test_with(test_runner):
"""
Decorator to specify custom test runner for a test method.
Args:
test_runner: RunTest class or callable
Returns:
Function: Test method decorator
Example:
@run_test_with(CustomRunTest)
def test_custom_execution(self):
pass
"""
def attr(**kwargs):
"""
Add attributes to test methods.
Args:
**kwargs: Attribute name-value pairs
Returns:
Function: Test method decorator
Example:
@attr(speed='slow', category='integration')
def test_database_integration(self):
pass
"""
def gather_details(source_dict, target_dict):
"""
Gather details from multiple sources into target dictionary.
Args:
source_dict (dict): Source details dictionary
target_dict (dict): Target details dictionary
"""import testtools
class TimingRunTest(testtools.RunTest):
"""Custom runner that times test execution."""
def __init__(self, case, handlers=None):
super().__init__(case, handlers)
self.start_time = None
self.end_time = None
def setUp(self):
import time
self.start_time = time.time()
super().setUp()
def tearDown(self):
import time
super().tearDown()
self.end_time = time.time()
duration = self.end_time - self.start_time
self.case.addDetail('execution_time',
testtools.content.text_content(f"{duration:.3f}s"))
class MyTest(testtools.TestCase):
@testtools.run_test_with(TimingRunTest)
def test_with_timing(self):
import time
time.sleep(0.1) # Simulate work
self.assertTrue(True)import testtools
from testtools.testsuite import ConcurrentTestSuite
def fork_for_tests(suite):
"""Simple process forker for tests."""
import multiprocessing
pool = multiprocessing.Pool(processes=4)
return pool
# Create test suite
suite = testtools.TestSuite()
suite.addTest(SlowTest('test_method_1'))
suite.addTest(SlowTest('test_method_2'))
suite.addTest(SlowTest('test_method_3'))
suite.addTest(SlowTest('test_method_4'))
# Run concurrently
concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests)
result = testtools.TestResult()
concurrent_suite.run(result)
print(f"Executed {result.testsRun} tests concurrently")import testtools
from testtools.testsuite import FixtureSuite
from fixtures import TempDir
class DatabaseTest(testtools.TestCase):
def test_user_creation(self):
# Test uses shared database fixture
user = create_user("test_user")
self.assertIsNotNone(user.id)
def test_user_deletion(self):
# Test uses same shared database
user = create_user("delete_me")
delete_user(user.id)
self.assertIsNone(get_user(user.id))
# Create fixture suite with shared database
database_fixture = DatabaseFixture()
suite = FixtureSuite(database_fixture, [
DatabaseTest('test_user_creation'),
DatabaseTest('test_user_deletion'),
])
result = testtools.TestResult()
suite.run(result)import testtools
def discover_slow_tests(suite):
"""Find all tests marked as slow."""
slow_tests = []
for test in testtools.iterate_tests(suite):
if hasattr(test, 'speed') and test.speed == 'slow':
slow_tests.append(test)
return slow_tests
# Create comprehensive test suite
full_suite = testtools.TestSuite()
# ... add many tests ...
# Filter for specific test IDs
test_ids = {'MyTest.test_specific', 'OtherTest.test_important'}
filtered_suite = filter_by_ids(full_suite, test_ids)
# Run only filtered tests
result = testtools.TestResult()
filtered_suite.run(result)import testtools
class RetryRunTest(testtools.RunTest):
"""Runner that retries failed tests."""
def __init__(self, case, max_retries=3):
super().__init__(case)
self.max_retries = max_retries
def run(self, result):
for attempt in range(self.max_retries + 1):
temp_result = testtools.TestResult()
super().run(temp_result)
if temp_result.wasSuccessful():
# Success, forward to real result
result.addSuccess(self.case)
break
elif attempt == self.max_retries:
# Final attempt failed, forward failure
for error in temp_result.errors:
result.addError(*error)
for failure in temp_result.failures:
result.addFailure(*failure)
else:
# Retry
self.case.addDetail(f'retry_{attempt}',
testtools.content.text_content(f"Retrying after failure"))
class FlakyTest(testtools.TestCase):
@testtools.run_test_with(lambda case: RetryRunTest(case, max_retries=2))
def test_flaky_operation(self):
import random
if random.random() < 0.7: # 70% chance of failure
self.fail("Random failure")
self.assertTrue(True)import testtools
class MetadataTest(testtools.TestCase):
@testtools.attr(category='unit', priority='high', owner='alice')
def test_critical_function(self):
self.assertEqual(critical_function(), 'expected')
@testtools.attr(category='integration', priority='low', owner='bob')
def test_external_service(self):
result = call_external_service()
self.assertIsNotNone(result)
# Discover tests by attributes
def find_tests_by_owner(suite, owner):
"""Find tests owned by specific person."""
return [test for test in testtools.iterate_tests(suite)
if hasattr(test, 'owner') and test.owner == owner]
def find_high_priority_tests(suite):
"""Find high priority tests."""
return [test for test in testtools.iterate_tests(suite)
if hasattr(test, 'priority') and test.priority == 'high']
# Use metadata for test organization
suite = testtools.TestSuite()
# ... add tests ...
alice_tests = find_tests_by_owner(suite, 'alice')
high_priority = find_high_priority_tests(suite)Install with Tessl CLI
npx tessl i tessl/pypi-testtools