Google Cloud reCAPTCHA Enterprise API client library for protecting websites and applications from fraud
—
Comprehensive reporting and analytics for monitoring reCAPTCHA usage, effectiveness, performance metrics, score distributions, and challenge completion rates. These metrics help optimize reCAPTCHA configuration and measure protection effectiveness.
Retrieves detailed metrics and analytics data for reCAPTCHA usage and performance within a specified time range.
def get_metrics(
request: GetMetricsRequest = None,
*,
name: str = None,
retry: Union[retries.Retry, gapic_v1.method._MethodDefault] = _MethodDefault._DEFAULT_VALUE,
timeout: Union[float, object] = _MethodDefault._DEFAULT_VALUE,
metadata: Sequence[Tuple[str, str]] = ()
) -> Metrics:
"""
Get metrics for a specific key.
Args:
request: The request object for getting metrics
name: Required. The metrics resource name in format
'projects/{project}/keys/{key}/metrics'
retry: Retry configuration for the request
timeout: Timeout for the request in seconds
metadata: Additional metadata for the request
Returns:
Metrics: Comprehensive metrics data including usage, scores, and challenges
Raises:
google.api_core.exceptions.NotFound: If the key doesn't exist
google.api_core.exceptions.PermissionDenied: If insufficient permissions
google.api_core.exceptions.InvalidArgument: If request parameters are invalid
"""from google.cloud import recaptchaenterprise
client = recaptchaenterprise.RecaptchaEnterpriseServiceClient()
# Get metrics for a specific key
request = recaptchaenterprise.GetMetricsRequest(
name="projects/your-project-id/keys/your-key-id/metrics"
)
metrics = client.get_metrics(request=request)
print(f"Metrics for key: {request.name}")
print(f"Total assessments: {len(metrics.score_metrics) if metrics.score_metrics else 0}")
# Display score distribution
if metrics.score_metrics:
for score_metric in metrics.score_metrics:
print(f"Score range {score_metric.overall_metrics.score_buckets[0].lower_bound}-"
f"{score_metric.overall_metrics.score_buckets[0].upper_bound}: "
f"{score_metric.overall_metrics.score_buckets[0].count} assessments")class GetMetricsRequest:
"""Request message for retrieving metrics."""
name: str # Required. Metrics resource name in format
# 'projects/{project}/keys/{key}/metrics'class Metrics:
"""Comprehensive metrics data for reCAPTCHA usage."""
name: str # Output only. Resource name
start_time: Timestamp # Start time of the metrics period
score_metrics: List[ScoreMetrics] # Score-related metrics
challenge_metrics: List[ChallengeMetrics] # Challenge-related metricsclass ScoreMetrics:
"""Metrics related to reCAPTCHA scores."""
overall_metrics: ScoreDistribution # Overall score distribution
action_metrics: Dict[str, ScoreDistribution] # Per-action score distributions
class ScoreDistribution:
"""Distribution of reCAPTCHA scores."""
score_buckets: List[ScoreBucket] # Score ranges and counts
class ScoreBucket:
"""A bucket representing a score range."""
lower_bound: float # Lower bound of score range (inclusive)
upper_bound: float # Upper bound of score range (exclusive)
count: int # Number of assessments in this rangeclass ChallengeMetrics:
"""Metrics related to reCAPTCHA challenges."""
pageload_count: int # Number of pageloads with challenges
nocaptcha_count: int # Number of successful no-challenge verifications
failed_count: int # Number of failed challenge attempts
passed_count: int # Number of successful challenge completionsdef get_key_metrics(client, project_id, key_id):
"""Get and display basic metrics for a key."""
metrics_name = f"projects/{project_id}/keys/{key_id}/metrics"
request = recaptchaenterprise.GetMetricsRequest(name=metrics_name)
try:
metrics = client.get_metrics(request=request)
print(f"=== Metrics for Key {key_id} ===")
print(f"Period: {metrics.start_time}")
# Score metrics
if metrics.score_metrics:
print("\n--- Score Metrics ---")
for score_metric in metrics.score_metrics:
print("Overall score distribution:")
display_score_distribution(score_metric.overall_metrics)
if score_metric.action_metrics:
print("\nPer-action metrics:")
for action, distribution in score_metric.action_metrics.items():
print(f" Action '{action}':")
display_score_distribution(distribution, indent=" ")
# Challenge metrics
if metrics.challenge_metrics:
print("\n--- Challenge Metrics ---")
for challenge_metric in metrics.challenge_metrics:
total_challenges = (challenge_metric.pageload_count +
challenge_metric.nocaptcha_count +
challenge_metric.failed_count +
challenge_metric.passed_count)
print(f"Total challenge events: {total_challenges}")
print(f" Pageloads: {challenge_metric.pageload_count}")
print(f" No-challenge success: {challenge_metric.nocaptcha_count}")
print(f" Challenge passed: {challenge_metric.passed_count}")
print(f" Challenge failed: {challenge_metric.failed_count}")
if challenge_metric.passed_count + challenge_metric.failed_count > 0:
success_rate = (challenge_metric.passed_count /
(challenge_metric.passed_count + challenge_metric.failed_count)) * 100
print(f" Challenge success rate: {success_rate:.1f}%")
return metrics
except Exception as e:
print(f"Error retrieving metrics: {e}")
return None
def display_score_distribution(distribution, indent=""):
"""Display score distribution in a readable format."""
if not distribution.score_buckets:
print(f"{indent}No score data available")
return
total_assessments = sum(bucket.count for bucket in distribution.score_buckets)
print(f"{indent}Total assessments: {total_assessments}")
for bucket in distribution.score_buckets:
percentage = (bucket.count / total_assessments * 100) if total_assessments > 0 else 0
print(f"{indent} {bucket.lower_bound:.1f}-{bucket.upper_bound:.1f}: "
f"{bucket.count} ({percentage:.1f}%)")
# Get metrics for a key
metrics = get_key_metrics(client, "your-project-id", "your-key-id")def analyze_metrics_for_alerts(client, project_id, key_id):
"""Analyze metrics and generate alerts for unusual patterns."""
metrics_name = f"projects/{project_id}/keys/{key_id}/metrics"
request = recaptchaenterprise.GetMetricsRequest(name=metrics_name)
try:
metrics = client.get_metrics(request=request)
alerts = []
# Analyze score distribution
if metrics.score_metrics:
for score_metric in metrics.score_metrics:
overall = score_metric.overall_metrics
if overall.score_buckets:
# Calculate percentage of low scores (potential attacks)
total_assessments = sum(bucket.count for bucket in overall.score_buckets)
low_score_count = sum(bucket.count for bucket in overall.score_buckets
if bucket.upper_bound <= 0.3)
if total_assessments > 0:
low_score_percentage = (low_score_count / total_assessments) * 100
if low_score_percentage > 20: # Alert if >20% low scores
alerts.append({
'type': 'HIGH_SUSPICIOUS_ACTIVITY',
'message': f'{low_score_percentage:.1f}% of assessments have low scores (<0.3)',
'severity': 'HIGH' if low_score_percentage > 50 else 'MEDIUM'
})
# Check for unusual patterns
high_score_count = sum(bucket.count for bucket in overall.score_buckets
if bucket.lower_bound >= 0.9)
high_score_percentage = (high_score_count / total_assessments) * 100
if high_score_percentage < 30: # Alert if <30% high scores
alerts.append({
'type': 'LOW_LEGITIMATE_ACTIVITY',
'message': f'Only {high_score_percentage:.1f}% of assessments have high scores (>=0.9)',
'severity': 'MEDIUM'
})
# Analyze challenge metrics
if metrics.challenge_metrics:
for challenge_metric in metrics.challenge_metrics:
total_challenges = (challenge_metric.passed_count + challenge_metric.failed_count)
if total_challenges > 0:
failure_rate = (challenge_metric.failed_count / total_challenges) * 100
if failure_rate > 50: # Alert if >50% challenge failures
alerts.append({
'type': 'HIGH_CHALLENGE_FAILURE_RATE',
'message': f'Challenge failure rate is {failure_rate:.1f}%',
'severity': 'HIGH' if failure_rate > 80 else 'MEDIUM'
})
# Check for unusual no-challenge rate
total_events = (challenge_metric.pageload_count + challenge_metric.nocaptcha_count +
challenge_metric.failed_count + challenge_metric.passed_count)
if total_events > 0:
nocaptcha_rate = (challenge_metric.nocaptcha_count / total_events) * 100
if nocaptcha_rate < 70: # Alert if <70% no-challenge
alerts.append({
'type': 'LOW_NOCAPTCHA_RATE',
'message': f'No-challenge rate is only {nocaptcha_rate:.1f}%',
'severity': 'MEDIUM'
})
# Report alerts
if alerts:
print(f"=== ALERTS for Key {key_id} ===")
for alert in alerts:
print(f"[{alert['severity']}] {alert['type']}: {alert['message']}")
else:
print(f"No alerts for key {key_id} - metrics look normal")
return alerts
except Exception as e:
print(f"Error analyzing metrics: {e}")
return []
# Analyze metrics for alerts
alerts = analyze_metrics_for_alerts(client, "your-project-id", "your-key-id")def compare_key_metrics(client, project_id, key_ids):
"""Compare metrics across multiple keys."""
key_metrics = {}
# Collect metrics for all keys
for key_id in key_ids:
metrics_name = f"projects/{project_id}/keys/{key_id}/metrics"
request = recaptchaenterprise.GetMetricsRequest(name=metrics_name)
try:
metrics = client.get_metrics(request=request)
key_metrics[key_id] = metrics
except Exception as e:
print(f"Error getting metrics for key {key_id}: {e}")
key_metrics[key_id] = None
# Compare key performance
print("=== Key Performance Comparison ===")
print(f"{'Key ID':<20} {'Total Assessments':<20} {'Avg Score':<12} {'Low Score %':<12}")
print("-" * 70)
for key_id, metrics in key_metrics.items():
if not metrics or not metrics.score_metrics:
print(f"{key_id:<20} {'No data':<20} {'N/A':<12} {'N/A':<12}")
continue
# Calculate statistics
overall_metrics = metrics.score_metrics[0].overall_metrics
total_assessments = sum(bucket.count for bucket in overall_metrics.score_buckets)
# Calculate weighted average score
total_weighted_score = sum(
bucket.count * ((bucket.lower_bound + bucket.upper_bound) / 2)
for bucket in overall_metrics.score_buckets
)
avg_score = total_weighted_score / total_assessments if total_assessments > 0 else 0
# Calculate low score percentage
low_score_count = sum(bucket.count for bucket in overall_metrics.score_buckets
if bucket.upper_bound <= 0.3)
low_score_pct = (low_score_count / total_assessments * 100) if total_assessments > 0 else 0
print(f"{key_id:<20} {total_assessments:<20} {avg_score:<12.2f} {low_score_pct:<12.1f}%")
# Find best and worst performing keys
valid_keys = {k: v for k, v in key_metrics.items() if v and v.score_metrics}
if valid_keys:
best_key = min(valid_keys.keys(), key=lambda k: calculate_low_score_percentage(valid_keys[k]))
worst_key = max(valid_keys.keys(), key=lambda k: calculate_low_score_percentage(valid_keys[k]))
print(f"\nBest performing key: {best_key}")
print(f"Worst performing key: {worst_key}")
def calculate_low_score_percentage(metrics):
"""Calculate percentage of low scores for a metrics object."""
if not metrics.score_metrics:
return 100 # Assume worst case if no data
overall_metrics = metrics.score_metrics[0].overall_metrics
total_assessments = sum(bucket.count for bucket in overall_metrics.score_buckets)
low_score_count = sum(bucket.count for bucket in overall_metrics.score_buckets
if bucket.upper_bound <= 0.3)
return (low_score_count / total_assessments * 100) if total_assessments > 0 else 100
# Compare metrics across keys
key_ids = ["web-key", "android-key", "ios-key"]
compare_key_metrics(client, "your-project-id", key_ids)import json
from datetime import datetime
def export_metrics_to_json(client, project_id, key_id, output_file=None):
"""Export metrics to JSON format for external analysis."""
metrics_name = f"projects/{project_id}/keys/{key_id}/metrics"
request = recaptchaenterprise.GetMetricsRequest(name=metrics_name)
try:
metrics = client.get_metrics(request=request)
# Convert to serializable format
metrics_data = {
'key_id': key_id,
'export_time': datetime.utcnow().isoformat(),
'metrics_period_start': metrics.start_time.isoformat() if metrics.start_time else None,
'score_metrics': [],
'challenge_metrics': []
}
# Process score metrics
if metrics.score_metrics:
for score_metric in metrics.score_metrics:
score_data = {
'overall_distribution': [
{
'lower_bound': bucket.lower_bound,
'upper_bound': bucket.upper_bound,
'count': bucket.count
}
for bucket in score_metric.overall_metrics.score_buckets
],
'action_distributions': {}
}
# Process per-action metrics
if score_metric.action_metrics:
for action, distribution in score_metric.action_metrics.items():
score_data['action_distributions'][action] = [
{
'lower_bound': bucket.lower_bound,
'upper_bound': bucket.upper_bound,
'count': bucket.count
}
for bucket in distribution.score_buckets
]
metrics_data['score_metrics'].append(score_data)
# Process challenge metrics
if metrics.challenge_metrics:
for challenge_metric in metrics.challenge_metrics:
challenge_data = {
'pageload_count': challenge_metric.pageload_count,
'nocaptcha_count': challenge_metric.nocaptcha_count,
'failed_count': challenge_metric.failed_count,
'passed_count': challenge_metric.passed_count
}
metrics_data['challenge_metrics'].append(challenge_data)
# Write to file or return data
if output_file:
with open(output_file, 'w') as f:
json.dump(metrics_data, f, indent=2)
print(f"Metrics exported to {output_file}")
return metrics_data
except Exception as e:
print(f"Error exporting metrics: {e}")
return None
# Export metrics to JSON
metrics_data = export_metrics_to_json(
client,
"your-project-id",
"your-key-id",
"recaptcha_metrics.json"
)def create_metrics_summary(client, project_id, key_ids):
"""Create a summary dashboard of key metrics."""
dashboard_data = {
'generated_at': datetime.utcnow().isoformat(),
'project_id': project_id,
'summary': {
'total_keys': len(key_ids),
'keys_with_data': 0,
'total_assessments': 0,
'overall_avg_score': 0,
'alerts': []
},
'key_details': {}
}
all_assessments = 0
all_weighted_scores = 0
for key_id in key_ids:
metrics_name = f"projects/{project_id}/keys/{key_id}/metrics"
request = recaptchaenterprise.GetMetricsRequest(name=metrics_name)
try:
metrics = client.get_metrics(request=request)
if metrics.score_metrics:
dashboard_data['summary']['keys_with_data'] += 1
# Calculate key statistics
overall_metrics = metrics.score_metrics[0].overall_metrics
key_assessments = sum(bucket.count for bucket in overall_metrics.score_buckets)
key_weighted_score = sum(
bucket.count * ((bucket.lower_bound + bucket.upper_bound) / 2)
for bucket in overall_metrics.score_buckets
)
key_avg_score = key_weighted_score / key_assessments if key_assessments > 0 else 0
all_assessments += key_assessments
all_weighted_scores += key_weighted_score
# Store key details
dashboard_data['key_details'][key_id] = {
'total_assessments': key_assessments,
'average_score': key_avg_score,
'low_score_percentage': calculate_low_score_percentage(metrics)
}
except Exception as e:
dashboard_data['key_details'][key_id] = {
'error': str(e)
}
# Calculate overall statistics
dashboard_data['summary']['total_assessments'] = all_assessments
dashboard_data['summary']['overall_avg_score'] = (
all_weighted_scores / all_assessments if all_assessments > 0 else 0
)
# Generate alerts
for key_id, details in dashboard_data['key_details'].items():
if 'error' not in details:
if details['low_score_percentage'] > 30:
dashboard_data['summary']['alerts'].append(
f"Key {key_id}: High suspicious activity ({details['low_score_percentage']:.1f}% low scores)"
)
if details['average_score'] < 0.5:
dashboard_data['summary']['alerts'].append(
f"Key {key_id}: Low average score ({details['average_score']:.2f})"
)
return dashboard_data
# Create dashboard
dashboard = create_metrics_summary(client, "your-project-id", ["key1", "key2", "key3"])
print(json.dumps(dashboard, indent=2))from google.api_core import exceptions
try:
metrics = client.get_metrics(request=request)
except exceptions.NotFound as e:
print(f"Key not found or no metrics available: {e}")
# Key may not exist or may not have sufficient usage for metrics
except exceptions.PermissionDenied as e:
print(f"Insufficient permissions to access metrics: {e}")
# Check IAM permissions for the project and key
except exceptions.InvalidArgument as e:
print(f"Invalid metrics request: {e}")
# Check the metrics resource name formatInstall with Tessl CLI
npx tessl i tessl/pypi-google-cloud-recaptcha-enterprise