Behavior-driven development testing framework for Python using Gherkin syntax
—
Quality
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Command-line interface and configuration system for customizing test execution, output formatting, test selection, and runtime behavior. Supports extensive options for different testing environments.
Main configuration class that manages all behave configuration options and command-line argument parsing.
class Configuration:
"""
Main configuration class managing all behave settings.
Attributes:
- format: list, output formatter names
- outdir: str, output directory path
- outfiles: list, output file paths for formatters
- dry_run: bool, show steps without executing them
- verbosity: int, output verbosity level (0-2)
- stop: bool, stop on first failure
- tags: list, tag expressions for test selection
- include_re: str, regex for including features by filename
- exclude_re: str, regex for excluding features by filename
- name: list, scenario name patterns to include
- junit: bool, enable JUnit XML output
- junit_directory: str, directory for JUnit XML files
- logging_level: str, logging level ("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG")
- logging_format: str, log message format
- logging_datefmt: str, log timestamp format
- log_capture: bool, capture logging output
- no_capture: bool, disable output capture
- no_capture_stderr: bool, disable stderr capture
- color: bool, enable colored output
- lang: str, language for step keywords
- show_source: bool, show source location of step definitions
- show_timings: bool, show step execution times
- expand: bool, expand scenario outlines in formatters
- show_multiline: bool, show multiline text in output
Methods:
- load_configuration(filenames): Load configuration from files
- setup_outputs(): Initialize output formatters
- setup_stage(stage): Configure for specific execution stage
"""Main CLI entry point that parses arguments and executes tests.
def main(argv: list = None) -> int:
"""
Main CLI entry point for behave command.
Parameters:
- argv: list, command line arguments (default: sys.argv)
Returns:
int: Exit code (0 for success, non-zero for failure)
"""Exception class for configuration-related errors and validation failures.
class ConfigError(Exception):
"""
Configuration error exception.
Raised when:
- Invalid configuration file format
- Conflicting configuration options
- Missing required configuration values
- Invalid formatter specifications
"""Behave loads configuration from multiple sources in priority order:
# Basic execution
behave
# Specify features directory
behave features/
# Select specific feature file
behave features/login.feature
# Run with specific formatter
behave --format json --outfile results.json
# Run with tag selection
behave --tags @smoke --tags ~@slow
# Dry run (show steps without executing)
behave --dry-run
# Stop on first failure
behave --stop
# Increase verbosity
behave -vBehave reads configuration from .behaverc, setup.cfg, or pyproject.toml:
[behave]
format = pretty
outdir = reports
tags = @current
stop = true
verbosity = 2
color = true[behave]
format = pretty,json
outdir = test-reports
tags = @smoke,~@slow
junit = true
junit_directory = test-reports/junit[tool.behave]
format = ["pretty", "json"]
outdir = "reports"
tags = ["@smoke", "~@slow"]
color = true
verbosity = 2export BEHAVE_VERBOSITY=2
export BEHAVE_FORMAT=json
export BEHAVE_OUTDIR=reportsPowerful tag-based test selection system:
# Run scenarios tagged with @smoke
behave --tags @smoke
# Run scenarios NOT tagged with @slow
behave --tags ~@slow
# Run scenarios tagged with @smoke AND not @slow
behave --tags @smoke --tags ~@slow# OR logic (comma separation)
behave --tags @smoke,@regression
# AND logic with NOT
behave --tags @smoke --tags ~@manual
# Complex expressions
behave --tags "(@smoke or @regression) and not @slow"# Available formatter options:
formatters = [
"plain", # Simple text output
"pretty", # Enhanced text with colors
"json", # JSON format for tooling
"json.pretty", # Pretty-printed JSON
"junit", # JUnit XML format
"progress", # Progress bar
"progress2", # Compact progress display
"progress3", # Detailed progress display
"rerun", # Failed scenario rerun info
"tags", # List all available tags
"steps", # List all step definitions
"steps.doc", # Step definitions with docstrings
"steps.code", # Step definitions with source
"steps.usage", # Step usage statistics
]# Output to multiple formats
behave --format pretty --format json --outfile results.json
# Separate output files
behave --format pretty --format json:results.json --format junit:junit.xml# Configuration options for output capture
config.log_capture = True # Capture logging output
config.no_capture = False # Enable stdout/stderr capture
config.no_capture_stderr = False # Enable stderr capture
config.show_source = True # Show step definition locations
config.show_timings = True # Show execution times# Execution behavior options
config.dry_run = False # Actually execute steps
config.stop = False # Continue after failures
config.wip = False # Work-in-progress mode
config.expand = False # Expand scenario outlines
config.lang = "en" # Language for keywords# File and scenario selection
config.include_re = r".*" # Include pattern
config.exclude_re = None # Exclude pattern
config.name = [] # Scenario name patterns
config.exclude_names = [] # Excluded scenario names# environment.py - Central configuration point
def before_all(context):
# Global test setup
context.config.setup_stage("before_all")
setup_test_environment(context.config)
def before_feature(context, feature):
# Feature-level setup based on tags
if "database" in feature.tags:
setup_database(context.config.database_url)
def after_all(context):
# Global cleanup
cleanup_test_environment()@given('I configure the system')
def step_impl(context):
# Access configuration in steps
base_url = context.config.base_url
timeout = context.config.timeout
# Use configuration values# Custom configuration class
class CustomConfiguration(Configuration):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.custom_option = kwargs.get('custom_option', 'default')
self.api_base_url = kwargs.get('api_base_url', 'http://localhost')# Configuration for custom plugins
config.plugin_config = {
'screenshot_on_failure': True,
'screenshot_dir': 'screenshots',
'browser_type': 'chrome',
'headless': False
}# Development workflow
behave --tags @current --stop -v
# CI/CD pipeline
behave --format json --outfile results.json --format junit --junit-directory reports/
# Smoke testing
behave --tags @smoke --format pretty --no-capture
# Regression testing
behave --tags @regression --format progress --outdir reports/
# Debug mode
behave --tags @debug --dry-run --show-source -v
# Parallel execution preparation
behave --format rerun --outfile failed_scenarios.txt# Production-like environment
behave --define environment=prod --tags ~@local_only
# Local development
behave --define environment=local --no-capture --stop
# Staging environment
behave --define environment=staging --format json --outfile staging_results.jsonInstall with Tessl CLI
npx tessl i tessl/pypi-behave