Code audit tool for python
—
Seamless integration with pytest for automated code quality checking during testing. Pylama provides a pytest plugin that automatically discovers and checks Python files as part of your test suite.
Add pylama checking option to pytest command line interface.
def pytest_addoption(parser):
"""
Add --pylama option to pytest command line.
Args:
parser: pytest argument parser
Adds:
--pylama: Enable pylama code quality checks on .py files
When enabled, pytest will collect all .py files and run pylama
checks on them as part of the test suite.
"""Collect Python files for pylama checking during pytest discovery phase.
def pytest_collect_file(path, parent):
"""
Collect Python files for pylama checking.
Args:
path: File path being considered for collection
parent: Parent pytest collector
Returns:
Optional[PylamaFile]: PylamaFile collector if --pylama enabled and path is .py file
Only collects files when:
- --pylama option is enabled
- File has .py extension
- File is not excluded by pytest collection rules
"""Manage caching across pytest sessions for performance optimization.
def pytest_sessionstart(session):
"""
Initialize pylama session with file modification time caching.
Args:
session: pytest session object
Sets up caching to skip unchanged files that previously passed
pylama checks, improving performance on large codebases.
"""
def pytest_sessionfinish(session):
"""
Save modification time cache at end of pytest session.
Args:
session: pytest session object
Persists file modification times to cache for next test run,
enabling incremental checking.
"""
def pytest_load_initial_conftests(early_config, *_):
"""
Register pylama markers before pytest configuration loading.
Args:
early_config: Early pytest configuration
Registers 'pycodestyle' marker to prevent warnings when using
--strict command line argument.
"""Core test classes that perform pylama checking within pytest framework.
class PylamaError(Exception):
"""
Exception raised when pylama checks fail.
Contains formatted error messages from pylama checking that
will be displayed in pytest output when tests fail.
"""
class PylamaFile(pytest.File):
"""
Pytest file collector for pylama checks.
Represents a Python file that will be checked by pylama.
Creates a single PylamaItem for the entire file.
"""
def collect(self):
"""
Create test items for this file.
Returns:
List[PylamaItem]: Single item representing pylama check for this file
"""
class PylamaItem(pytest.Item):
"""
Pytest test item for individual file pylama checks.
Represents the actual test that runs pylama on a single file
and reports any code quality issues as test failures.
"""
def __init__(self, *args, **kwargs):
"""
Initialize pylama test item.
Automatically adds 'pycodestyle' marker and sets up caching
infrastructure for performance optimization.
"""
def setup(self):
"""
Set up test item with caching logic.
Returns:
bool: True if setup successful, False if should skip
Checks file modification time against cache and skips test
if file hasn't changed since last successful check.
"""
def runtest(self):
"""
Execute pylama check on the file.
Raises:
PylamaError: If any code quality issues are found
Runs pylama analysis and raises PylamaError with formatted
error messages if any issues are discovered.
"""
def repr_failure(self, excinfo, style=None):
"""
Format test failure output.
Args:
excinfo: Exception information from test failure
style: Output style (unused)
Returns:
str: Formatted error messages for pytest output
"""Standalone function for checking individual files within pytest context.
def check_file(path):
"""
Check a single file using pylama with default configuration.
Args:
path: File path to check
Returns:
List[Error]: Errors found in the file
Loads default pylama configuration and checks the specified file,
returning any code quality issues found.
"""# Run tests with pylama checking
import subprocess
# Enable pylama for all .py files
result = subprocess.run(['pytest', '--pylama'])
# Combine with specific test selection
result = subprocess.run(['pytest', '--pylama', 'tests/', '-v'])
# Use with pytest configuration
result = subprocess.run(['pytest', '--pylama', '--tb=short'])[tool:pytest]
addopts = --pylama
testpaths = src tests
python_files = *.py
markers =
pycodestyle: Code style checks[tool.pytest.ini_options]
addopts = "--pylama"
testpaths = ["src", "tests"]
markers = [
"pycodestyle: Code style checks"
]import pytest
from pylama.pytest import check_file
# Run pytest with pylama programmatically
exit_code = pytest.main(['--pylama', 'src/'])
# Check individual file
errors = check_file('mymodule.py')
if errors:
for error in errors:
print(f"{error.filename}:{error.lnum} - {error.message}")import pytest
from pylama.pytest import check_file
def test_code_quality():
"""Custom test function that uses pylama checking."""
# Check specific files
files_to_check = ['src/main.py', 'src/utils.py']
all_errors = []
for filepath in files_to_check:
errors = check_file(filepath)
all_errors.extend(errors)
# Assert no code quality issues
if all_errors:
error_messages = [
f"{err.filename}:{err.lnum} - {err.text}"
for err in all_errors
]
pytest.fail(f"Code quality issues found:\n" + "\n".join(error_messages))import pytest
import sys
# Skip pylama tests in certain conditions
@pytest.mark.skipif(sys.version_info < (3, 8), reason="Requires Python 3.8+")
def test_pylama_advanced_features():
"""Test that only runs on newer Python versions."""
result = pytest.main(['--pylama', 'src/advanced_features.py'])
assert result == 0# pytest command combining pylama with coverage
import subprocess
result = subprocess.run([
'pytest',
'--pylama', # Code quality checks
'--cov=src', # Coverage reporting
'--cov-report=html', # HTML coverage report
'--cov-fail-under=80' # Minimum coverage threshold
])import pytest
@pytest.mark.pycodestyle
def test_specific_style_check():
"""Test specifically marked for style checking."""
# This test will be collected when running with --pylama
pass
# Run only style-marked tests
# pytest -m pycodestyle --pylama# pytest-xdist for parallel execution with pylama
import subprocess
# Run pylama checks in parallel
result = subprocess.run([
'pytest',
'--pylama',
'-n', 'auto', # Use all available CPU cores
'--dist=loadfile' # Distribute by file
])# GitHub Actions example
name: Code Quality
on: [push, pull_request]
jobs:
pylama:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Install dependencies
run: |
pip install pylama pytest
- name: Run code quality checks
run: pytest --pylama src/# conftest.py
import pytest
from pylama.config import parse_options
@pytest.fixture(scope="session")
def pylama_options():
"""Custom pylama configuration for tests."""
return parse_options([
'--linters=pycodestyle,pyflakes',
'--ignore=E501,W503',
'--max-line-length=100'
])
def test_with_custom_config(pylama_options):
"""Test using custom pylama configuration."""
from pylama.main import check_paths
errors = check_paths(['src/'], pylama_options)
assert len(errors) == 0, f"Code quality issues: {errors}"The pytest integration includes an intelligent caching system:
HISTKEY: str = "pylama/mtimes"
"""Cache key for storing file modification times."""Cache Behavior:
Cache Location:
.pytest_cache/)pytest --cache-clearInstall with Tessl CLI
npx tessl i tessl/pypi-pylama