Allure pytest integration that generates comprehensive test reports with rich metadata and visual test execution tracking
npx @tessl/cli install tessl/pypi-allure-pytest@2.15.0A pytest plugin that integrates with the Allure reporting framework to generate comprehensive test reports. The plugin enables pytest tests to produce detailed test execution reports with rich attachments, steps, metadata, and visual test results that can be viewed through Allure's web-based reporting interface.
pip install allure-pytestThe plugin automatically integrates with pytest when installed. Use the main allure module for decorators and dynamic API:
import allureFor pytest markers:
import pytestimport allure
import pytest
# Basic test with Allure decorators
@allure.title("User Authentication Test")
@allure.description("Verify user can log in with valid credentials")
@allure.severity(allure.severity_level.CRITICAL)
@allure.feature("Authentication")
@allure.story("User Login")
def test_user_login():
with allure.step("Navigate to login page"):
# Test implementation
pass
with allure.step("Enter credentials"):
allure.dynamic.parameter("username", "test_user")
# Test implementation
pass
with allure.step("Verify successful login"):
# Test implementation
pass
# Run tests with Allure reporting
# pytest --alluredir=./allure-results tests/
# allure serve ./allure-resultsThe allure-pytest plugin follows a hook-based architecture that integrates seamlessly with pytest's execution flow:
pytest11 entry point for automatic discoveryControl Allure reporting behavior through pytest command-line options including output directory configuration, test filtering by labels, and attachment settings.
# Core options
--alluredir DIR # Generate Allure report in specified directory
--clean-alluredir # Clean alluredir folder if it exists
--allure-no-capture # Do not attach pytest captured outputFilter and select tests based on Allure labels, test plans, and metadata for targeted test execution and reporting.
# Label-based filtering options
--allure-severities SEVERITIES_SET # Filter by severity levels
--allure-features FEATURES_SET # Filter by feature labels
--allure-stories STORIES_SET # Filter by story labels
--allure-epics EPICS_SET # Filter by epic labels
--allure-ids IDS_SET # Filter by test ID labels
--allure-label LABEL_NAME=values # Filter by custom labelsAdd rich metadata to tests using pytest markers for categorization, organization, and enhanced reporting in the Allure interface.
@pytest.mark.allure_label(label_type="feature", "User Management")
@pytest.mark.allure_label(label_type="story", "User Registration")
@pytest.mark.allure_link(url="http://example.com/issue/123", link_type="issue", name="Bug-123")Decorate test functions with metadata using static decorators from the allure module for comprehensive test documentation.
@allure.title(name: str) # Set test title
@allure.description(description: str) # Set test description
@allure.description_html(html_description: str) # Set HTML description
@allure.severity(severity_level: str) # Set test severity
@allure.epic(*epics: str) # Add epic labels
@allure.feature(*features: str) # Add feature labels
@allure.story(*stories: str) # Add story labels
@allure.suite(suite_name: str) # Set suite label
@allure.parent_suite(parent_suite_name: str) # Set parent suite
@allure.sub_suite(sub_suite_name: str) # Set sub suite
@allure.tag(*tags: str) # Add tags
@allure.id(test_id: str) # Set test ID
@allure.link(url: str, link_type: str, name: str) # Add link
@allure.issue(url: str, name: str) # Add issue link
@allure.testcase(url: str, name: str) # Add test case link
@allure.manual # Mark as manual testEnhance test reporting with step-by-step execution tracking and rich attachments including files, screenshots, and data.
@allure.step(title: str) # Step decorator
allure.step(title: str) # Step context manager
allure.attach(body: Any, name: str, attachment_type: str) # Attach data
allure.attach.file(source: str, name: str, attachment_type: str) # Attach fileSteps and Attachments\n\n### Dynamic Test Enhancement
Dynamically modify test metadata, add parameters, and enhance test reporting during test execution using the allure module's dynamic API.
# Key dynamic functions (from allure module)
allure.dynamic.title(name: str) # Set test title
allure.dynamic.description(description: str) # Set test description
allure.dynamic.description_html(html_description: str) # Set HTML description
allure.dynamic.parameter(name: str, value: Any, excluded: bool, mode: str) # Add test parameter
allure.dynamic.label(label_type: str, label_value: str) # Add test label
allure.dynamic.severity(severity_level: str) # Set test severity
allure.dynamic.epic(*epics: str) # Add epic labels
allure.dynamic.feature(*features: str) # Add feature labels
allure.dynamic.story(*stories: str) # Add story labels
allure.dynamic.suite(suite_name: str) # Set suite label
allure.dynamic.parent_suite(parent_suite_name: str) # Set parent suite
allure.dynamic.sub_suite(sub_suite_name: str) # Set sub suite
allure.dynamic.tag(*tags: str) # Add tags
allure.dynamic.id(test_id: str) # Set test ID
allure.dynamic.link(url: str, link_type: str, name: str) # Add test link
allure.dynamic.issue(url: str, name: str) # Add issue link
allure.dynamic.testcase(url: str, name: str) # Add test case link
allure.dynamic.manual() # Mark as manual testThis capability relies on the main allure module rather than the plugin directly.
class LabelType:
"""Standard label types for test categorization."""
SEVERITY = "severity"
FEATURE = "feature"
STORY = "story"
EPIC = "epic"
ID = "as_id"
SUITE = "suite"
PARENT_SUITE = "parentSuite"
SUB_SUITE = "subSuite"
HOST = "host"
THREAD = "thread"
FRAMEWORK = "framework"
LANGUAGE = "language"
TAG = "tag"class Severity:
"""Test severity levels for prioritization."""
BLOCKER = "blocker"
CRITICAL = "critical"
NORMAL = "normal"
MINOR = "minor"
TRIVIAL = "trivial"class AttachmentType:
"""Standard attachment types for test artifacts."""
TEXT = "text/plain"
CSV = "text/csv"
TSV = "text/tab-separated-values"
URI_LIST = "text/uri-list"
HTML = "text/html"
XML = "application/xml"
JSON = "application/json"
YAML = "application/yaml"
PCAP = "application/vnd.tcpdump.pcap"
PNG = "image/png"
JPG = "image/jpg"
SVG = "image/svg+xml"
GIF = "image/gif"
BMP = "image/bmp"
TIFF = "image/tiff"
MP4 = "video/mp4"
OGG = "video/ogg"
WEBM = "video/webm"
PDF = "application/pdf"class ParameterMode:
"""Parameter display modes for test parameters."""
HIDDEN = "hidden" # Parameter hidden from report
MASKED = "masked" # Parameter value masked in report
DEFAULT = None # Parameter shown normallyThe plugin handles various error conditions:
Test status mapping:
AssertionError or pytest.fail.Exception → FAILEDpytest.skip.Exception → SKIPPEDBROKENPASSED