Pragmatic Testing Framework for Python with BDD-style syntax and pluggable architecture
49
Pending
Does it follow best practices?
Impact
49%
1.08xAverage score across 10 eval scenarios
Pending
The risk profile of this skill
Skip conditions, selective execution, and test flow control mechanisms for managing which tests run under different conditions.
Unconditionally skip tests with optional reason messages.
def skip(reason: str = "") -> Callable:
"""
Decorator to unconditionally skip a test scenario or step.
Args:
reason: Optional reason for skipping the test
Returns:
Decorator that marks the test as skipped
"""from vedro import Scenario, skip
@skip("Feature not implemented yet")
class Scenario(vedro.Scenario):
subject = "new payment feature"
def when_payment_is_processed(self):
# This test will be skipped
pass
def then_payment_succeeds(self):
pass
# Skip specific methods
class Scenario(vedro.Scenario):
subject = "user management"
def given_user_exists(self):
self.user = create_user("test@example.com")
@skip("Delete functionality under review")
def when_user_is_deleted(self):
delete_user(self.user.id)
def then_user_no_longer_exists(self):
assert not user_exists(self.user.id)Skip tests based on runtime conditions with optional reason messages.
def skip_if(cond: Callable[[], bool], reason: str = "") -> Callable:
"""
Decorator to conditionally skip a test based on a condition.
Args:
cond: Callable returning a boolean condition to evaluate
reason: Optional reason for skipping the test
Returns:
Decorator that conditionally skips the test
"""import sys
import os
from vedro import Scenario, skip_if
@skip_if(lambda: sys.platform != "linux", "Linux-only functionality")
class Scenario(vedro.Scenario):
subject = "Linux system calls"
def when_system_call_is_made(self):
result = os.system("ps aux")
self.exit_code = result
def then_call_succeeds(self):
assert self.exit_code == 0
@skip_if(lambda: not os.environ.get("DATABASE_URL"), "Database not configured")
class Scenario(vedro.Scenario):
subject = "database operations"
def given_database_connection(self):
self.db = connect_to_database()
def when_data_is_queried(self):
self.result = self.db.query("SELECT 1")
def then_query_succeeds(self):
assert self.result is not None
# Function-based with complex conditions
EXTERNAL_API_AVAILABLE = check_external_service()
@skip_if(not EXTERNAL_API_AVAILABLE, "External API not available")
@scenario("External API integration")
def test_external_api():
@when("calling external API")
def action():
return call_external_api("/endpoint")
@then("API responds successfully")
def verification(response):
ensure(response.status_code).equals(200)Run only marked tests, ignoring all others.
def only() -> Callable:
"""
Decorator to mark a test for selective execution.
When any test is marked with @only, only those marked tests will run.
Returns:
Decorator that marks the test for exclusive execution
"""from vedro import Scenario, only
# Only this scenario will run when @only is present anywhere
@only
class Scenario(vedro.Scenario):
subject = "critical functionality"
def when_critical_operation_is_performed(self):
self.result = perform_critical_operation()
def then_operation_succeeds(self):
assert self.result.success
# This scenario will be ignored due to @only above
class Scenario(vedro.Scenario):
subject = "normal functionality"
def when_normal_operation_is_performed(self):
self.result = perform_normal_operation()
def then_operation_works(self):
assert self.result.success
# Function-based selective execution
@only
@scenario("Priority test case")
def test_priority_feature():
@when("priority feature is used")
def action():
return use_priority_feature()
@then("feature works correctly")
def verification(result):
ensure(result.status).equals("success")Combine different execution control decorators for complex test management.
from vedro import Scenario, skip_if, only, params
# Conditional execution with parameters
@params("development", should_run=True)
@params("production", should_run=False)
@skip_if(lambda env, should_run: not should_run, "Not for this environment")
class Scenario(vedro.Scenario):
subject = "environment-specific testing"
def __init__(self, environment, should_run):
self.environment = environment
self.should_run = should_run
def when_environment_feature_is_used(self):
self.result = use_environment_feature(self.environment)
def then_feature_works_correctly(self):
assert self.result.success
# Method-level control combined with class-level
@skip_if(not INTEGRATION_TESTS_ENABLED, "Integration tests disabled")
class Scenario(vedro.Scenario):
subject = "integration testing"
def given_services_are_running(self):
self.services = start_test_services()
@only # Focus on this specific method during development
def when_service_integration_is_tested(self):
self.result = test_service_integration(self.services)
def then_integration_works(self):
assert self.result.all_services_responding
@skip("Flaky test - needs investigation")
def then_performance_is_acceptable(self):
assert self.result.average_response_time < 1.0Create reusable skip conditions for common scenarios:
import platform
import shutil
# Common skip conditions
def requires_docker():
return shutil.which("docker") is not None
def requires_internet():
try:
import requests
requests.get("http://google.com", timeout=5)
return True
except:
return False
def requires_python_version(major, minor):
return sys.version_info >= (major, minor)
# Usage in tests
@skip_if(not requires_docker(), "Docker not available")
class Scenario(vedro.Scenario):
subject = "Docker container testing"
def when_container_is_started(self):
self.container = start_docker_container("test-image")
def then_container_is_running(self):
assert self.container.status == "running"
@skip_if(not requires_internet(), "Internet connection required")
@skip_if(not requires_python_version(3, 9), "Python 3.9+ required")
class Scenario(vedro.Scenario):
subject = "online API with modern Python features"
def when_api_is_called(self):
# Uses Python 3.9+ features
self.result = api_call() | other_operation()
def then_response_is_valid(self):
assert self.result.is_valid()Control test execution based on environment variables and configuration:
import os
# Environment detection helpers
def is_ci_environment():
return os.environ.get("CI") == "true"
def is_development_environment():
return os.environ.get("ENVIRONMENT") == "development"
def has_feature_flag(flag_name):
return os.environ.get(f"FEATURE_{flag_name.upper()}") == "enabled"
# Usage patterns
@skip_if(is_ci_environment(), "Skip in CI - requires manual setup")
class Scenario(vedro.Scenario):
subject = "manual testing workflow"
def when_manual_process_is_triggered(self):
# Requires human interaction
self.result = trigger_manual_process()
def then_process_completes(self):
assert self.result.completed
@only # Focus during development
@skip_if(not is_development_environment(), "Development only")
class Scenario(vedro.Scenario):
subject = "debugging helper functions"
def when_debug_info_is_requested(self):
self.debug_info = get_debug_information()
def then_debug_info_is_comprehensive(self):
assert len(self.debug_info.keys()) > 10
@skip_if(not has_feature_flag("new_payment_flow"), "Feature flag disabled")
class Scenario(vedro.Scenario):
subject = "new payment processing flow"
def when_payment_is_processed_with_new_flow(self):
self.result = process_payment_new_flow(amount=100)
def then_payment_succeeds(self):
assert self.result.status == "completed"Use execution control for organizing large test suites:
# Mark smoke tests for quick feedback
@only # Enable for quick smoke test runs
class SmokeTestScenario(vedro.Scenario):
subject = "basic application functionality"
def when_app_starts(self):
self.app = start_application()
def then_app_is_responsive(self):
assert self.app.health_check()
# Skip slow tests by default
@skip_if(not os.environ.get("RUN_SLOW_TESTS"), "Slow tests disabled")
class SlowIntegrationScenario(vedro.Scenario):
subject = "full system integration"
def when_full_workflow_is_executed(self):
self.result = run_full_integration_workflow()
def then_workflow_completes_successfully(self):
assert self.result.success
# Skip flaky tests until fixed
@skip("Intermittent failure - ticket #1234")
class FlakyTestScenario(vedro.Scenario):
subject = "network-dependent functionality"
def when_network_operation_is_performed(self):
self.result = perform_network_operation()
def then_operation_succeeds(self):
assert self.result.successdocs
evals
scenario-1
scenario-2
scenario-3
scenario-4
scenario-5
scenario-6
scenario-7
scenario-8
scenario-9
scenario-10