Comprehensive developer toolkit providing reusable skills for Java/Spring Boot, TypeScript/NestJS/React/Next.js, Python, PHP, AWS CloudFormation, AI/RAG, DevOps, and more.
90
90%
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Risky
Do not use without reviewing
#!/usr/bin/env python3
"""Drift Guard Fidelity Report Generator for Claude Code.
Generates fidelity report at TaskCompleted event.
Reads _drift/state.json (Expected Files) and _drift/drift-events.log (actual files),
calculates matched/missing/extra, and generates _drift/fidelity-report.md.
Hook event: TaskCompleted
Input: JSON via stdin { "hook_event_name": "TaskCompleted", "task_id": "...", ... }
Output: Exit 0 = proceed | Report generated | Exit 2 = block (not used)
Zero external dependencies — pure Python 3 standard library only.
"""
import json
import os
import sys
from datetime import datetime
from pathlib import Path
# ─── Constants ──────────────────────────────────────────────────────────────
STATE_FILE_NAME = "state.json"
DRIFT_DIR_NAME = "_drift"
DRIFT_EVENTS_LOG = "drift-events.log"
FIDELITY_REPORT_NAME = "fidelity-report.md"
# ─── State Management ─────────────────────────────────────────────────────────
def find_state_file(cwd: str) -> str | None:
"""Find _drift/state.json by searching upward from cwd.
Returns absolute path to state.json if found, None otherwise.
"""
current = Path(cwd).resolve()
# Search upward until root or found
for _ in range(20): # Prevent infinite loops
drift_dir = current / DRIFT_DIR_NAME
state_file = drift_dir / STATE_FILE_NAME
if state_file.exists():
return str(state_file)
# Move up one directory
if current.parent == current: # Reached root
break
current = current.parent
return None
def load_state(state_path: str) -> dict | None:
"""Load state.json, return None if file not found or invalid.
Graceful degradation: returns None on any error.
"""
try:
with open(state_path, "r", encoding="utf-8") as f:
return json.load(f)
except (FileNotFoundError, json.JSONDecodeError, PermissionError):
return None
def load_drift_events(spec_folder: str) -> list[str]:
"""Load drift events from _drift/drift-events.log.
Returns list of unique file paths that were created/modified.
Returns empty list if log file doesn't exist (graceful degradation).
"""
try:
drift_dir = os.path.join(spec_folder, DRIFT_DIR_NAME)
log_path = os.path.join(drift_dir, DRIFT_EVENTS_LOG)
if not os.path.exists(log_path):
# Log file doesn't exist → no drift events
return []
with open(log_path, "r", encoding="utf-8") as f:
lines = f.readlines()
# Parse log entries: "2026-04-03T12:34:56.789 | file/path"
files = []
for line in lines:
line = line.strip()
if not line:
continue
# Extract file path after " | "
if " | " in line:
_, file_path = line.split(" | ", 1)
files.append(file_path)
# Return unique files (deduplicate)
return list(set(files))
except (OSError, IOError):
# Graceful degradation: return empty list
return []
# ─── Fidelity Calculation ────────────────────────────────────────────────────
def calculate_fidelity(
expected_files: list[str], actual_files: list[str]
) -> dict:
"""Calculate fidelity metrics.
Returns dict with:
- total_expected: int
- matched: list[str] (files in both expected and actual)
- missing: list[str] (files in expected but not actual)
- extra: list[str] (files in actual but not expected)
- matched_count: int
- missing_count: int
- extra_count: int
"""
expected_set = set(expected_files)
actual_set = set(actual_files)
matched = expected_set & actual_set
missing = expected_set - actual_set
extra = actual_set - expected_set
return {
"total_expected": len(expected_files),
"matched": sorted(list(matched)),
"missing": sorted(list(missing)),
"extra": sorted(list(extra)),
"matched_count": len(matched),
"missing_count": len(missing),
"extra_count": len(extra),
}
def get_fidelity_summary(metrics: dict) -> str:
"""Generate human-readable fidelity summary statement."""
total = metrics["total_expected"]
matched = metrics["matched_count"]
extra = metrics["extra_count"]
if total == 0:
if extra == 0:
return "✓ Full fidelity — No expected files and no drift events"
else:
return f"⚠ Partial fidelity — {extra} unplanned file(s) created"
# Calculate percentage of expected files that were matched
percentage = (matched / total) * 100 if total > 0 else 0
if matched == total and extra == 0:
return "✓ Full fidelity — All expected files produced, no drift"
elif matched == total and extra > 0:
return f"✓ Full fidelity with drift — All {total} expected files produced, {extra} unplanned file(s)"
elif percentage >= 80:
return f"⚠ Partial fidelity — {matched}/{total} expected files produced ({percentage:.0f}%), {extra} unplanned file(s)"
elif percentage >= 50:
return f"⚠ Low fidelity — {matched}/{total} expected files produced ({percentage:.0f}%), {extra} unplanned file(s)"
else:
return f"❌ Very low fidelity — Only {matched}/{total} expected files produced ({percentage:.0f}%), {extra} unplanned file(s)"
# ─── Report Generation ────────────────────────────────────────────────────────
def generate_fidelity_report(
spec_folder: str, task_id: str, state: dict, metrics: dict
) -> None:
"""Generate _drift/fidelity-report.md with all required elements.
Report includes:
1. Total Expected Files count
2. Matched files count
3. Missing files list
4. Extra files list
5. Fidelity summary statement
"""
drift_dir = os.path.join(spec_folder, DRIFT_DIR_NAME)
report_path = os.path.join(drift_dir, FIDELITY_REPORT_NAME)
# Build report content
lines = [
f"# Fidelity Report — {task_id}",
"",
f"**Generated**: {datetime.now().isoformat()}",
f"**Task**: {task_id}",
"",
"## Summary",
"",
get_fidelity_summary(metrics),
"",
"## Metrics",
"",
f"- **Total Expected Files**: {metrics['total_expected']}",
f"- **Matched Files**: {metrics['matched_count']}",
f"- **Missing Files**: {metrics['missing_count']}",
f"- **Extra Files**: {metrics['extra_count']}",
"",
]
# Add Missing Files section if any
if metrics["missing"]:
lines.extend([
"## Missing Files (Expected but Not Produced)",
"",
])
for file_path in metrics["missing"]:
lines.append(f"- `{file_path}`")
lines.append("")
# Add Extra Files section if any
if metrics["extra"]:
lines.extend([
"## Extra Files (Produced but Not Expected)",
"",
])
for file_path in metrics["extra"]:
lines.append(f"- `{file_path}`")
lines.append("")
# Add Matched Files section if any
if metrics["matched"]:
lines.extend([
"## Matched Files (Expected and Produced)",
"",
])
for file_path in metrics["matched"]:
lines.append(f"- `{file_path}`")
lines.append("")
# Write report
with open(report_path, "w", encoding="utf-8") as f:
f.write("\n".join(lines))
# ─── Entry Point ───────────────────────────────────────────────────────────────
def main() -> None:
# 1. Parse input JSON
try:
input_data = json.load(sys.stdin)
except (json.JSONDecodeError, ValueError):
sys.exit(0)
# 2. Validate hook event
if input_data.get("hook_event_name") != "TaskCompleted":
sys.exit(0)
# 3. Get current working directory
cwd = input_data.get("cwd", os.getcwd())
# 4. Find and load state.json
state_path = find_state_file(cwd)
if not state_path:
# State not found → graceful degradation
sys.exit(0)
state = load_state(state_path)
if not state:
# Invalid state → graceful degradation
sys.exit(0)
# 5. Get spec folder and task ID
spec_folder = str(Path(state_path).parent.parent)
task_id = state.get("task_id", "unknown")
# 6. Load drift events (actual files produced)
actual_files = load_drift_events(spec_folder)
# 7. Get expected files from state
expected_files = state.get("expected_files", [])
# 8. Calculate fidelity metrics
metrics = calculate_fidelity(expected_files, actual_files)
# 9. Generate fidelity report
try:
generate_fidelity_report(spec_folder, task_id, state, metrics)
except (OSError, IOError):
# Graceful degradation: fail silently if report cannot be written
sys.exit(0)
# 10. Success (report generated)
sys.exit(0)
if __name__ == "__main__":
main()docs
plugins
developer-kit-ai
developer-kit-aws
agents
docs
skills
aws
aws-cli-beast
aws-cost-optimization
aws-drawio-architecture-diagrams
aws-sam-bootstrap
aws-cloudformation
aws-cloudformation-auto-scaling
aws-cloudformation-bedrock
aws-cloudformation-cloudfront
aws-cloudformation-cloudwatch
aws-cloudformation-dynamodb
aws-cloudformation-ec2
aws-cloudformation-ecs
aws-cloudformation-elasticache
references
aws-cloudformation-iam
references
aws-cloudformation-lambda
aws-cloudformation-rds
aws-cloudformation-s3
aws-cloudformation-security
aws-cloudformation-task-ecs-deploy-gh
aws-cloudformation-vpc
references
developer-kit-core
agents
commands
skills
developer-kit-devops
developer-kit-java
agents
commands
docs
skills
aws-lambda-java-integration
aws-rds-spring-boot-integration
aws-sdk-java-v2-bedrock
aws-sdk-java-v2-core
aws-sdk-java-v2-dynamodb
aws-sdk-java-v2-kms
aws-sdk-java-v2-lambda
aws-sdk-java-v2-messaging
aws-sdk-java-v2-rds
aws-sdk-java-v2-s3
aws-sdk-java-v2-secrets-manager
clean-architecture
graalvm-native-image
langchain4j-ai-services-patterns
references
langchain4j-mcp-server-patterns
references
langchain4j-rag-implementation-patterns
references
langchain4j-spring-boot-integration
langchain4j-testing-strategies
langchain4j-tool-function-calling-patterns
langchain4j-vector-stores-configuration
references
qdrant
references
spring-ai-mcp-server-patterns
spring-boot-actuator
spring-boot-cache
spring-boot-crud-patterns
spring-boot-dependency-injection
spring-boot-event-driven-patterns
spring-boot-openapi-documentation
spring-boot-project-creator
spring-boot-resilience4j
spring-boot-rest-api-standards
spring-boot-saga-pattern
spring-boot-security-jwt
assets
references
scripts
spring-boot-test-patterns
spring-data-jpa
references
spring-data-neo4j
references
unit-test-application-events
unit-test-bean-validation
unit-test-boundary-conditions
unit-test-caching
unit-test-config-properties
references
unit-test-controller-layer
unit-test-exception-handler
references
unit-test-json-serialization
unit-test-mapper-converter
references
unit-test-parameterized
unit-test-scheduled-async
references
unit-test-service-layer
references
unit-test-utility-methods
unit-test-wiremock-rest-api
references
developer-kit-php
developer-kit-project-management
developer-kit-python
developer-kit-specs
commands
docs
hooks
test-templates
tests
skills
developer-kit-tools
developer-kit-typescript
agents
docs
hooks
rules
skills
aws-cdk
aws-lambda-typescript-integration
better-auth
clean-architecture
drizzle-orm-patterns
dynamodb-toolbox-patterns
references
nestjs
nestjs-best-practices
nestjs-code-review
nestjs-drizzle-crud-generator
nextjs-app-router
nextjs-authentication
nextjs-code-review
nextjs-data-fetching
nextjs-deployment
nextjs-performance
nx-monorepo
react-code-review
react-patterns
shadcn-ui
tailwind-css-patterns
tailwind-design-system
references
turborepo-monorepo
typescript-docs
typescript-security-review
zod-validation-utilities
references
github-spec-kit