Use Markov chain Monte Carlo to analyze districting plans and gerrymanders
—
Single-metric optimization and specialized analysis including Gingles analysis for Voting Rights Act compliance. Optimization algorithms find extreme or optimal districting plans within the space of valid partitions.
Find partitions that maximize or minimize a specific evaluation metric while satisfying constraints.
class SingleMetricOptimizer:
def __init__(
self,
proposal: ProposalFunction,
constraints: Union[ConstraintFunction, List[ConstraintFunction]],
accept: AcceptanceFunction,
initial_state: Partition,
maximize: bool = True
) -> None:
"""
Create optimizer for single metric optimization.
Parameters:
- proposal (ProposalFunction): Function to propose new partitions
- constraints (Union[ConstraintFunction, List[ConstraintFunction]]): Validation constraints
- accept (AcceptanceFunction): Acceptance function
- initial_state (Partition): Starting partition
- maximize (bool): Whether to maximize (True) or minimize (False) the metric
Returns:
None
"""
def optimize(self) -> Tuple[Partition, float]:
"""
Run optimization to find extreme partition.
Returns:
Tuple[Partition, float]: (best_partition, best_score)
"""Usage example:
from gerrychain.optimization import SingleMetricOptimizer
from gerrychain.metrics import efficiency_gap
# Define metric function
def eg_metric(partition):
return abs(efficiency_gap(partition["SEN18"]))
# Set up optimizer to minimize efficiency gap
optimizer = SingleMetricOptimizer(
proposal=recom,
constraints=constraints,
accept=always_accept,
initial_state=partition,
maximize=False # Minimize efficiency gap
)
# Run optimization
best_partition, best_score = optimizer.optimize()
print(f"Best efficiency gap: {best_score:.4f}")Specialized analysis for Voting Rights Act compliance, focusing on minority representation opportunities.
class Gingles:
def __init__(
self,
proposal: ProposalFunction,
constraints: List[ConstraintFunction],
accept: AcceptanceFunction,
initial_state: Partition,
minority_column: str,
threshold: float = 0.5
) -> None:
"""
Create Gingles analyzer for VRA compliance analysis.
Analyzes ability to create districts where minority voters can elect
candidates of choice, key component of Voting Rights Act analysis.
Parameters:
- proposal (ProposalFunction): Function to propose new partitions
- constraints (List[ConstraintFunction]): Validation constraints
- accept (AcceptanceFunction): Acceptance function
- initial_state (Partition): Starting partition
- minority_column (str): Column name for minority population data
- threshold (float): Threshold for minority opportunity (default 0.5)
Returns:
None
"""
def districts_info(self) -> Dict[DistrictId, Dict[str, Any]]:
"""
Get detailed information about minority opportunity districts.
Returns:
Dict[DistrictId, Dict[str, Any]]: District demographics and analysis
"""Usage example:
from gerrychain.optimization import Gingles
# Set up Gingles analysis for Black voting age population
gingles = Gingles(
proposal=recom,
constraints=constraints,
accept=always_accept,
initial_state=partition,
minority_column="BVAP",
threshold=0.4 # 40% threshold for opportunity
)
# Analyze minority representation
district_info = gingles.districts_info()
minority_opportunity_districts = 0
for district_id, info in district_info.items():
if info["minority_percentage"] >= 0.4:
minority_opportunity_districts += 1
print(f"District {district_id}: {info['minority_percentage']:.1%} minority VAP")
print(f"Total minority opportunity districts: {minority_opportunity_districts}")Examples of custom optimization workflows and advanced techniques:
from gerrychain.optimization import SingleMetricOptimizer
from gerrychain.metrics import mean_median, polsby_popper
import numpy as np
# Multi-objective optimization using weighted combination
def combined_metric(partition, weight_partisan=0.7, weight_compactness=0.3):
"""Combine partisan fairness and compactness metrics."""
# Partisan component (minimize bias)
partisan_score = abs(mean_median(partition["SEN18"]))
# Compactness component (maximize average compactness)
pp_scores = polsby_popper(partition)
compactness_score = 1.0 - np.mean(list(pp_scores.values())) # Convert to minimize
# Weighted combination
return weight_partisan * partisan_score + weight_compactness * compactness_score
# Optimization with custom metric
def multi_objective_optimize(initial_partition, steps=10000):
optimizer = SingleMetricOptimizer(
proposal=recom,
constraints=constraints,
accept=always_accept,
initial_state=initial_partition,
maximize=False # Minimize combined score
)
best_partition, best_score = optimizer.optimize()
return best_partition, best_score
# Sequential optimization
def sequential_optimize(initial_partition):
"""First optimize for fairness, then for compactness."""
# Step 1: Optimize for partisan fairness
def fairness_metric(p):
return abs(mean_median(p["SEN18"]))
fairness_optimizer = SingleMetricOptimizer(
proposal=recom,
constraints=constraints,
accept=always_accept,
initial_state=initial_partition,
maximize=False
)
fair_partition, fairness_score = fairness_optimizer.optimize()
print(f"Fairness optimization complete: {fairness_score:.4f}")
# Step 2: Optimize for compactness while maintaining fairness
def compactness_metric(p):
pp_scores = polsby_popper(p)
return np.mean(list(pp_scores.values()))
# Add fairness constraint to maintain previous result
def maintain_fairness(p):
return abs(mean_median(p["SEN18"])) <= fairness_score * 1.1 # 10% tolerance
enhanced_constraints = constraints + [maintain_fairness]
compactness_optimizer = SingleMetricOptimizer(
proposal=recom,
constraints=enhanced_constraints,
accept=always_accept,
initial_state=fair_partition,
maximize=True # Maximize compactness
)
final_partition, compactness_score = compactness_optimizer.optimize()
print(f"Compactness optimization complete: {compactness_score:.4f}")
return final_partition
# Ensemble-based optimization
def ensemble_optimize(initial_partition, num_runs=10):
"""Run multiple optimization runs and select best result."""
results = []
for run in range(num_runs):
optimizer = SingleMetricOptimizer(
proposal=recom,
constraints=constraints,
accept=always_accept,
initial_state=initial_partition,
maximize=False
)
partition, score = optimizer.optimize()
results.append((partition, score, run))
print(f"Run {run+1}/{num_runs}: Score = {score:.4f}")
# Select best result
best_partition, best_score, best_run = min(results, key=lambda x: x[1])
print(f"Best result from run {best_run+1}: {best_score:.4f}")
return best_partition, best_score
# Use optimization functions
# optimized_partition = sequential_optimize(initial_partition)
# best_partition, score = ensemble_optimize(initial_partition, num_runs=5)ProposalFunction = Callable[[Partition], Partition]
ConstraintFunction = Callable[[Partition], bool]
AcceptanceFunction = Callable[[Partition], bool]
MetricFunction = Callable[[Partition], float]
OptimizationResult = Tuple[Partition, float]Install with Tessl CLI
npx tessl i tessl/pypi-gerrychain