Pragmatic Testing Framework for Python with BDD-style syntax and pluggable architecture
49
Pending
Does it follow best practices?
Impact
49%
1.08xAverage score across 10 eval scenarios
Pending
The risk profile of this skill
Comprehensive artifact attachment system and temporary file management with automatic cleanup.
Base artifact system for attaching various types of data to tests.
class Artifact:
"""
Abstract base class for test artifacts.
Artifacts are pieces of data (files, memory objects, etc.) that can be
attached to tests for debugging, reporting, or evidence collection.
"""
pass
class FileArtifact(Artifact):
"""
File-based artifact that references a file on the filesystem.
Used for attaching log files, screenshots, generated reports, etc.
"""
pass
class MemoryArtifact(Artifact):
"""
In-memory artifact that contains data directly in memory.
Used for attaching strings, JSON data, small binary data, etc.
"""
passFunctions to attach artifacts at different scopes within the test execution.
def attach_artifact(artifact: Artifact) -> None:
"""
Attach an artifact to the current context (step or scenario).
Args:
artifact: The artifact to attach
"""
def attach_scenario_artifact(artifact: Artifact) -> None:
"""
Attach an artifact to the current scenario.
Args:
artifact: The artifact to attach to the scenario
"""
def attach_step_artifact(artifact: Artifact) -> None:
"""
Attach an artifact to the current step.
Args:
artifact: The artifact to attach to the step
"""
def attach_global_artifact(artifact: Artifact) -> None:
"""
Attach an artifact globally (available across all tests).
Args:
artifact: The artifact to attach globally
"""from vedro import scenario, given, when, then, ensure
from vedro import FileArtifact, attach_scenario_artifact, attach_step_artifact
import json
import tempfile
import os
@scenario("Report generation with artifacts")
def test_report_generation():
@given("test data")
def setup():
# Create test data file
test_data = {"users": [{"id": 1, "name": "John"}, {"id": 2, "name": "Jane"}]}
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
json.dump(test_data, f, indent=2)
data_file = f.name
# Attach input data as scenario artifact
attach_scenario_artifact(FileArtifact(data_file))
return {"data_file": data_file, "data": test_data}
@when("report is generated")
def action(context):
# Generate report
report_content = generate_user_report(context["data"])
# Save report to file
with tempfile.NamedTemporaryFile(mode='w', suffix='.html', delete=False) as f:
f.write(report_content)
report_file = f.name
# Attach report as step artifact
attach_step_artifact(FileArtifact(report_file))
return {"report_file": report_file, "content": report_content}
@then("report contains expected data")
def verification(result):
ensure(result["content"]).contains("John")
ensure(result["content"]).contains("Jane")
ensure(os.path.exists(result["report_file"])).is_true()from vedro import MemoryArtifact, attach_artifact
import json
@scenario("API testing with request/response artifacts")
def test_api_interaction():
@when("API request is made")
def action():
request_data = {"username": "test_user", "action": "login"}
# Attach request data as memory artifact
attach_artifact(MemoryArtifact(
name="request_payload",
content=json.dumps(request_data, indent=2),
content_type="application/json"
))
# Make API call
response = api_call("/login", request_data)
# Attach response as memory artifact
attach_artifact(MemoryArtifact(
name="response_data",
content=response.text,
content_type="application/json"
))
return {"request": request_data, "response": response}
@then("API responds correctly")
def verification(result):
ensure(result["response"].status_code).equals(200)
# Attach analysis artifact
analysis = {
"response_time": result["response"].elapsed.total_seconds(),
"status": "success",
"token_received": "token" in result["response"].json()
}
attach_artifact(MemoryArtifact(
name="test_analysis",
content=json.dumps(analysis, indent=2),
content_type="application/json"
))Automatic temporary file and directory creation with cleanup.
def create_tmp_dir() -> Path:
"""
Create a temporary directory that will be automatically cleaned up.
Returns:
Path to the created temporary directory
"""
def create_tmp_file(suffix: str = "", content: str = "") -> Path:
"""
Create a temporary file that will be automatically cleaned up.
Args:
suffix: File extension/suffix for the temporary file
content: Initial content to write to the file
Returns:
Path to the created temporary file
"""from vedro import scenario, given, when, then, ensure, create_tmp_dir
import os
import shutil
@scenario("File processing workflow")
def test_file_processing():
@given("temporary workspace")
def setup():
# Create temporary directory for test workspace
workspace = create_tmp_dir()
# Create subdirectories
input_dir = workspace / "input"
output_dir = workspace / "output"
input_dir.mkdir()
output_dir.mkdir()
# Create test files
test_files = []
for i in range(3):
test_file = input_dir / f"test_{i}.txt"
test_file.write_text(f"Test content {i}")
test_files.append(test_file)
return {
"workspace": workspace,
"input_dir": input_dir,
"output_dir": output_dir,
"test_files": test_files
}
@when("files are processed")
def action(context):
results = []
for input_file in context["test_files"]:
output_file = context["output_dir"] / f"processed_{input_file.name}"
# Process file (example: uppercase content)
content = input_file.read_text().upper()
output_file.write_text(content)
results.append({
"input": str(input_file),
"output": str(output_file),
"size": len(content)
})
return results
@then("processing completes successfully")
def verification(results):
ensure(len(results)).equals(3)
for result in results:
ensure(os.path.exists(result["output"])).is_true()
ensure(result["size"]).is_greater_than(0)
# Verify content was processed
with open(result["output"]) as f:
content = f.read()
ensure(content).contains("TEST CONTENT")from vedro import create_tmp_file
import json
@scenario("Configuration file testing")
def test_config_processing():
@given("configuration file")
def setup():
# Create temporary config file with content
config_data = {
"database": {
"host": "localhost",
"port": 5432,
"name": "test_db"
},
"logging": {
"level": "INFO",
"file": "/var/log/app.log"
}
}
config_file = create_tmp_file(
suffix=".json",
content=json.dumps(config_data, indent=2)
)
return {"config_file": config_file, "expected_data": config_data}
@when("configuration is loaded")
def action(context):
# Load configuration from temporary file
loaded_config = load_config_file(str(context["config_file"]))
return loaded_config
@then("configuration is parsed correctly")
def verification(loaded_config, context):
ensure(loaded_config["database"]["host"]).equals("localhost")
ensure(loaded_config["database"]["port"]).equals(5432)
ensure(loaded_config["logging"]["level"]).equals("INFO")
# File should still exist during test
ensure(context["config_file"].exists()).is_true()Organize artifacts by test phases and attach them appropriately:
@scenario("Complex workflow with multiple artifacts")
def test_complex_workflow():
@given("initial setup")
def setup():
# Global artifacts for the entire test run
attach_global_artifact(MemoryArtifact(
name="test_environment",
content=json.dumps({"python_version": sys.version, "platform": platform.system()}),
content_type="application/json"
))
# Scenario-level artifact
setup_log = create_tmp_file(suffix=".log")
setup_log.write_text("Setup phase initiated\n")
attach_scenario_artifact(FileArtifact(setup_log))
return {"setup_log": setup_log}
@when("processing occurs")
def action(context):
# Step-level artifacts for this specific action
context["setup_log"].write_text("Processing started\n", "a")
processing_data = {"start_time": time.time(), "items_processed": 0}
for i in range(5):
# Simulate processing
time.sleep(0.1)
processing_data["items_processed"] += 1
# Attach progress artifact
attach_step_artifact(MemoryArtifact(
name=f"progress_step_{i}",
content=json.dumps(processing_data),
content_type="application/json"
))
processing_data["end_time"] = time.time()
context["setup_log"].write_text("Processing completed\n", "a")
return processing_data
@then("workflow completes with proper documentation")
def verification(result, context):
# Final verification artifacts
summary = {
"total_items": result["items_processed"],
"duration": result["end_time"] - result["start_time"],
"success": True
}
attach_step_artifact(MemoryArtifact(
name="test_summary",
content=json.dumps(summary, indent=2),
content_type="application/json"
))
# Attach final log state
attach_scenario_artifact(FileArtifact(context["setup_log"]))
ensure(result["items_processed"]).equals(5)
ensure(summary["duration"]).is_greater_than(0.5)For testing applications with visual components:
def capture_screenshot(name: str) -> Path:
"""Helper to capture screenshots during testing."""
screenshot_path = create_tmp_file(suffix=".png")
# Simulate screenshot capture
# driver.save_screenshot(str(screenshot_path))
return screenshot_path
@scenario("UI testing with visual artifacts")
def test_ui_workflow():
@given("application is launched")
def setup():
# Capture initial state
initial_screenshot = capture_screenshot("initial_state")
attach_scenario_artifact(FileArtifact(initial_screenshot))
return {"initial_screenshot": initial_screenshot}
@when("user performs actions")
def action(context):
screenshots = []
# Capture screenshot after each major action
for action_name in ["login", "navigate_dashboard", "create_item"]:
# Perform action (simplified)
perform_ui_action(action_name)
# Capture screenshot
screenshot = capture_screenshot(f"after_{action_name}")
attach_step_artifact(FileArtifact(screenshot))
screenshots.append(screenshot)
return {"action_screenshots": screenshots}
@then("UI state is correct")
def verification(result):
# Capture final state
final_screenshot = capture_screenshot("final_state")
attach_step_artifact(FileArtifact(final_screenshot))
# Verify we have all expected screenshots
ensure(len(result["action_screenshots"])).equals(3)
# All screenshot files should exist
for screenshot in result["action_screenshots"]:
ensure(screenshot.exists()).is_true()
ensure(screenshot.stat().st_size).is_greater_than(1000) # Non-empty imageBest practices for organizing artifacts:
@scenario("Well-organized artifact management")
def test_artifact_organization():
@given("test environment")
def setup():
# Use descriptive names and organize by category
attach_global_artifact(MemoryArtifact(
name="environment/system_info",
content=json.dumps({
"os": platform.system(),
"python": sys.version,
"timestamp": datetime.now().isoformat()
}),
content_type="application/json"
))
return {}
@when("test data is processed")
def action(context):
# Group related artifacts with consistent naming
test_data = {"input": [1, 2, 3, 4, 5], "multiplier": 2}
attach_step_artifact(MemoryArtifact(
name="processing/input_data",
content=json.dumps(test_data),
content_type="application/json"
))
# Process data
results = [x * test_data["multiplier"] for x in test_data["input"]]
attach_step_artifact(MemoryArtifact(
name="processing/output_data",
content=json.dumps({"results": results}),
content_type="application/json"
))
# Create detailed log file
log_file = create_tmp_file(suffix=".log")
log_file.write_text(f"Processing started at {datetime.now()}\n")
log_file.write_text(f"Input: {test_data['input']}\n", "a")
log_file.write_text(f"Multiplier: {test_data['multiplier']}\n", "a")
log_file.write_text(f"Results: {results}\n", "a")
log_file.write_text(f"Processing completed at {datetime.now()}\n", "a")
attach_step_artifact(FileArtifact(log_file, name="processing/detailed_log"))
return {"results": results, "original_data": test_data}
@then("results are documented properly")
def verification(result):
# Attach verification summary
verification_summary = {
"expected_length": len(result["original_data"]["input"]),
"actual_length": len(result["results"]),
"all_doubled": all(r == o * 2 for r, o in zip(result["results"], result["original_data"]["input"])),
"verification_passed": True
}
attach_step_artifact(MemoryArtifact(
name="verification/summary",
content=json.dumps(verification_summary, indent=2),
content_type="application/json"
))
ensure(verification_summary["all_doubled"]).is_true()
ensure(verification_summary["expected_length"]).equals(verification_summary["actual_length"])docs
evals
scenario-1
scenario-2
scenario-3
scenario-4
scenario-5
scenario-6
scenario-7
scenario-8
scenario-9
scenario-10