Pragmatic Testing Framework for Python with BDD-style syntax and pluggable architecture
49
Pending
Does it follow best practices?
Impact
49%
1.08xAverage score across 10 eval scenarios
Pending
The risk profile of this skill
Command-line interface for running tests and managing plugins with extensive configuration options.
Main function to run vedro tests programmatically or via command line.
def run(*, plugins=None) -> None:
"""
Primary entry point to run vedro tests.
Args:
plugins: Deprecated parameter for plugin specification.
Use vedro.cfg.py configuration instead.
Raises:
DeprecationWarning: If plugins parameter is used
"""import vedro
# Run tests programmatically
vedro.run()
# The above is equivalent to running from command line:
# python -m vedro
# or just: vedroVedro provides a comprehensive CLI with multiple commands and options.
Execute test scenarios with various options and filters.
# Basic usage - runs all tests
vedro
# Explicit run command
vedro run
# Run with specific project directory
vedro run --project-dir /path/to/tests
# Run with plugin-specific arguments (plugin-dependent)
vedro run --seed 12345 # Set random seed
vedro run --order random # Random test order
vedro run --slice 1/4 # Run first quarter of tests
vedro run --last-failed # Rerun only failed tests
vedro run --dry-run # Show what would run without executing
vedro run --tags smoke,integration # Run tests with specific tags
vedro run --reporter rich # Use rich console reporter
vedro run --artifacts-dir artifacts/ # Specify artifacts directoryDisplay the current vedro version.
vedro version
# Output: Vedro 1.14.3Manage plugins through the command line interface.
# List all available plugins
vedro plugin list
# Show popular plugins from registry
vedro plugin top
# Install and enable plugins
vedro plugin install vedro-reporter-html
vedro plugin install vedro-database-cleaner
# Enable/disable plugins
vedro plugin enable vedro-reporter-html
vedro plugin disable vedro-database-cleaner
# Multiple plugin operations
vedro plugin install plugin1 plugin2 plugin3The CLI follows a modular command architecture with extensible argument parsing.
# Base command interface (for custom commands)
class Command:
"""Abstract base class for CLI commands."""
def __init__(self, config: Type[Config], arg_parser: CommandArgumentParser): ...
async def run(self) -> None: ...
class CommandArgumentParser:
"""Enhanced argument parser for vedro commands."""
def add_argument(self, *args, **kwargs): ...
def parse_args(self): ...
def parse_known_args(self): ...Core commands provided by the vedro CLI system.
class RunCommand(Command):
"""
Command for executing test scenarios.
Handles test discovery, plugin initialization, scenario execution,
and result reporting with extensive configuration options.
"""
async def run(self) -> None: ...
class VersionCommand(Command):
"""
Command for displaying version information.
Shows the current vedro version in a formatted output.
"""
async def run(self) -> None: ...
class PluginCommand(Command):
"""
Command for plugin management operations.
Supports listing, installing, enabling, and disabling plugins
through various subcommands.
"""
async def run(self) -> None: ...# Run all tests in current directory
vedro
# Run tests with verbose output
vedro run --reporter rich
# Run tests in specific directory
vedro run --project-dir tests/
# Run with specific ordering
vedro run --order random --seed 42
# Run subset of tests
vedro run --slice 2/4 # Run second quarter of tests# Run only tests with specific tags
vedro run --tags "smoke,critical"
# Exclude tests with certain tags
vedro run --exclude-tags "slow,flaky"
# Run only previously failed tests
vedro run --last-failed
# Run specific test files or patterns
vedro run scenarios/user_auth/
vedro run scenarios/test_login.py# Install popular reporting plugin
vedro plugin install vedro-reporter-html
# Install multiple plugins at once
vedro plugin install vedro-screenshot-capture vedro-allure-reporter
# List installed plugins with status
vedro plugin list
# Enable a disabled plugin
vedro plugin enable vedro-reporter-html
# Disable a plugin temporarily
vedro plugin disable vedro-slow-tests# Run with specific configuration file
vedro run --config custom_vedro.cfg.py
# Set environment variables
TEST_ENV=staging vedro run
# Run with different output directory
vedro run --artifacts-dir results/$(date +%Y%m%d_%H%M%S)
# Run with custom timeout
vedro run --timeout 300 # 5 minutes per scenario# Dry run - show what would execute without running
vedro run --dry-run
# Run with detailed debugging information
vedro run --verbose --debug
# Run with performance monitoring
vedro run --profile --performance-report
# Stop on first failure
vedro run --fail-fast
# Run with specific reporter for CI
CI=true vedro run --reporter silent --output-format junitPlugins can extend the CLI with custom arguments through the event system.
from vedro.core import Plugin, PluginConfig
from vedro.events import ArgParseEvent
class CustomPlugin(Plugin):
"""Plugin that adds custom CLI arguments."""
def subscribe(self, dispatcher):
dispatcher.listen(ArgParseEvent, self.add_arguments)
def add_arguments(self, event: ArgParseEvent):
"""Add custom arguments to the CLI parser."""
parser = event.arg_parser
parser.add_argument(
"--custom-option",
help="Custom option provided by plugin",
default="default_value"
)
parser.add_argument(
"--enable-feature",
action="store_true",
help="Enable special feature"
)
parser.add_argument(
"--output-format",
choices=["json", "xml", "csv"],
default="json",
help="Output format for results"
)
class CustomPluginConfig(PluginConfig):
plugin = CustomPlugin
enabled = TruePlugins can access parsed arguments through events.
from vedro.events import ArgParsedEvent
class ArgumentConsumerPlugin(Plugin):
"""Plugin that uses CLI arguments."""
def __init__(self, config):
super().__init__(config)
self.args = None
def subscribe(self, dispatcher):
dispatcher.listen(ArgParsedEvent, self.handle_args)
def handle_args(self, event: ArgParsedEvent):
"""Process parsed CLI arguments."""
self.args = event.args
# Access custom arguments
if hasattr(self.args, 'custom_option'):
print(f"Custom option value: {self.args.custom_option}")
if hasattr(self.args, 'enable_feature') and self.args.enable_feature:
print("Special feature enabled!")
# Configure plugin behavior based on arguments
if hasattr(self.args, 'output_format'):
self.configure_output_format(self.args.output_format)Create wrapper scripts for different environments:
#!/bin/bash
# run_staging_tests.sh
export TEST_ENV=staging
export DATABASE_URL=postgresql://staging-db:5432/testdb
vedro run \
--tags "smoke,integration" \
--reporter html \
--artifacts-dir results/staging \
--timeout 600#!/bin/bash
# run_ci_tests.sh
export CI=true
export TEST_ENV=ci
vedro run \
--reporter silent \
--output-format junit \
--artifacts-dir $CI_ARTIFACTS_DIR \
--fail-fast \
--last-failedUse advanced filtering for complex test suites:
# Run performance tests only on weekends
if [ $(date +%u) -gt 5 ]; then
vedro run --tags performance --timeout 1800
else
vedro run --exclude-tags "performance,slow"
fi
# Run different test suites based on git branch
BRANCH=$(git branch --show-current)
case $BRANCH in
"main"|"master")
vedro run --tags "smoke,critical" --fail-fast
;;
"develop")
vedro run --exclude-tags "flaky"
;;
"feature/"*)
vedro run --tags "unit,integration" --slice 1/2
;;
*)
vedro run --dry-run
;;
esacRun multiple test configurations in sequence:
#!/bin/bash
# comprehensive_test_run.sh
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
RESULTS_DIR="results/$TIMESTAMP"
# Create results directory
mkdir -p "$RESULTS_DIR"
# Run smoke tests first
echo "Running smoke tests..."
vedro run \
--tags smoke \
--reporter html \
--artifacts-dir "$RESULTS_DIR/smoke" \
--fail-fast
if [ $? -eq 0 ]; then
echo "Smoke tests passed, running full suite..."
# Run full test suite
vedro run \
--exclude-tags "performance,flaky" \
--reporter html \
--artifacts-dir "$RESULTS_DIR/full" \
--parallel 4
# Run performance tests if full suite passes
if [ $? -eq 0 ]; then
echo "Running performance tests..."
vedro run \
--tags performance \
--reporter json \
--artifacts-dir "$RESULTS_DIR/performance" \
--timeout 3600
fi
else
echo "Smoke tests failed, skipping full suite"
exit 1
fi
# Generate combined report
echo "Generating combined report..."
python generate_combined_report.py "$RESULTS_DIR"Example GitHub Actions workflow:
name: Vedro Tests
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.8, 3.9, "3.10", "3.11"]
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v3
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
pip install vedro
pip install -r requirements-test.txt
- name: Run smoke tests
run: |
vedro run \
--tags smoke \
--reporter silent \
--fail-fast
- name: Run full test suite
if: success()
run: |
vedro run \
--exclude-tags "slow,flaky" \
--reporter junit \
--artifacts-dir test-results
- name: Upload test results
if: always()
uses: actions/upload-artifact@v3
with:
name: test-results-${{ matrix.python-version }}
path: test-results/The CLI automatically loads configuration from vedro.cfg.py:
# vedro.cfg.py
import vedro
from pathlib import Path
class Config(vedro.Config):
"""Project-specific vedro configuration."""
# Default CLI behavior
default_tags = ["smoke", "integration"]
exclude_tags = ["flaky", "manual"]
# Output configuration
artifacts_dir = Path("test_artifacts")
class Plugins(vedro.Config.Plugins):
class RichReporter(vedro.Config.Plugins.RichReporter):
enabled = True
show_timings = True
class HtmlReporter(HtmlReporterConfig):
enabled = True
output_file = "test_report.html"Global configuration in ~/.vedro/config.py:
# ~/.vedro/config.py
import vedro
from pathlib import Path
import os
class GlobalConfig(vedro.Config):
"""User-specific vedro configuration."""
# User preferences
preferred_reporter = "rich"
class Plugins(vedro.Config.Plugins):
class LastFailed(vedro.Config.Plugins.LastFailed):
enabled = True
cache_file = Path.home() / ".vedro" / "last_failed"
class PerformanceMonitor(PerformanceMonitorConfig):
enabled = os.environ.get("VEDRO_PERF_MONITOR") == "true"docs
evals
scenario-1
scenario-2
scenario-3
scenario-4
scenario-5
scenario-6
scenario-7
scenario-8
scenario-9
scenario-10