Use Markov chain Monte Carlo to analyze districting plans and gerrymanders
—
Control which proposed partitions are accepted in the Markov chain. Acceptance functions implement the probabilistic acceptance rules that determine whether to move to a proposed state or remain in the current state.
Simple acceptance rules for standard Markov chain analysis.
def always_accept(partition: Partition) -> bool:
"""
Always accept any proposed partition that passes constraints.
Most common acceptance function for basic redistricting analysis.
Creates a uniform sampling of the space of valid partitions.
Parameters:
- partition (Partition): Proposed partition (not used in decision)
Returns:
bool: Always returns True
"""Usage example:
from gerrychain.accept import always_accept
# Standard usage in Markov chain
chain = MarkovChain(
proposal=recom,
constraints=constraints,
accept=always_accept, # Accept all valid proposals
initial_state=partition,
total_steps=10000
)Probabilistic acceptance based on proposal quality, enabling biased sampling toward better plans.
def cut_edge_accept(partition: Partition) -> bool:
"""
Metropolis-Hastings acceptance based on number of cut edges.
Always accepts if cut edges increase (more fragmented districts).
Uses Metropolis criterion otherwise: accepts with probability
proportional to cut_edges_parent / cut_edges_current.
This tends to sample partitions with more cut edges, which can
be useful for exploring more varied district shapes.
Parameters:
- partition (Partition): Proposed partition with parent reference
Returns:
bool: True to accept, False to reject
"""Usage example:
from gerrychain.accept import cut_edge_accept
from gerrychain.updaters import cut_edges
# Set up partition to track cut edges
partition = GeographicPartition(
graph,
assignment="district",
updaters={"cut_edges": cut_edges}
)
# Use Metropolis acceptance
chain = MarkovChain(
proposal=recom,
constraints=constraints,
accept=cut_edge_accept, # Biased toward more cut edges
initial_state=partition,
total_steps=10000
)Examples of creating custom acceptance functions for specialized analysis:
import random
import math
from gerrychain.metrics import efficiency_gap, mean_median
def fairness_biased_accept(partition, temperature=1.0):
"""
Accept based on partisan fairness with simulated annealing.
Biases sampling toward more fair partitions using efficiency gap.
Temperature parameter controls exploration vs exploitation.
"""
if partition.parent is None:
return True
# Calculate fairness scores
current_eg = abs(efficiency_gap(partition["election"]))
parent_eg = abs(efficiency_gap(partition.parent["election"]))
# Always accept if fairness improves
if current_eg <= parent_eg:
return True
# Metropolis criterion for worse fairness
delta = current_eg - parent_eg
probability = math.exp(-delta / temperature)
return random.random() < probability
def compactness_threshold_accept(partition, min_compactness=0.3):
"""
Accept only if average compactness exceeds threshold.
"""
from gerrychain.metrics import polsby_popper
import numpy as np
pp_scores = polsby_popper(partition)
avg_compactness = np.mean(list(pp_scores.values()))
return avg_compactness >= min_compactness
def diversity_accept(partition, diversity_factor=0.1):
"""
Probabilistically accept to maintain chain diversity.
Accepts with higher probability if partition is different
from recent states (requires tracking recent states).
"""
# This is a simplified example - real implementation would
# need to track recent states for comparison
base_probability = 0.8
# Add randomness to prevent chains from getting stuck
diversity_bonus = random.random() * diversity_factor
accept_probability = min(1.0, base_probability + diversity_bonus)
return random.random() < accept_probability
def multi_metric_accept(partition, weights=None):
"""
Accept based on weighted combination of multiple metrics.
"""
if weights is None:
weights = {"fairness": 0.4, "compactness": 0.4, "stability": 0.2}
if partition.parent is None:
return True
# Calculate metrics (simplified)
fairness_score = 1.0 - abs(efficiency_gap(partition["election"]))
from gerrychain.metrics import polsby_popper
import numpy as np
compactness_score = np.mean(list(polsby_popper(partition).values()))
# Stability = inverse of changes made
stability_score = 1.0 - (len(partition.flips) / len(partition.graph.nodes)
if hasattr(partition, 'flips') else 0.1)
# Combined score
combined_score = (weights["fairness"] * fairness_score +
weights["compactness"] * compactness_score +
weights["stability"] * stability_score)
# Accept if score improves or with probability based on score
if partition.parent:
parent_fairness = 1.0 - abs(efficiency_gap(partition.parent["election"]))
parent_compactness = np.mean(list(polsby_popper(partition.parent).values()))
parent_stability = 1.0 - 0.1 # Simplified
parent_score = (weights["fairness"] * parent_fairness +
weights["compactness"] * parent_compactness +
weights["stability"] * parent_stability)
if combined_score >= parent_score:
return True
else:
# Probabilistic acceptance
return random.random() < combined_score
return True
# Usage examples with custom acceptance functions
# Fairness-biased chain with cooling schedule
def run_fairness_optimization(initial_partition, steps=10000):
current_temp = 1.0
cooling_rate = 0.95
chain_results = []
partition = initial_partition
for step in range(steps):
# Create acceptance function with current temperature
def temp_accept(p):
return fairness_biased_accept(p, temperature=current_temp)
# Run short chain segment
chain = MarkovChain(
proposal=recom,
constraints=constraints,
accept=temp_accept,
initial_state=partition,
total_steps=100
)
# Get final state and cool temperature
for state in chain:
partition = state
chain_results.append(partition)
current_temp *= cooling_rate
if step % 1000 == 0:
eg = efficiency_gap(partition["election"])
print(f"Step {step}: EG = {eg:.4f}, Temp = {current_temp:.3f}")
return chain_results
# Quality threshold chain
def run_quality_filtered_chain(initial_partition):
"""Run chain that only accepts high-quality partitions."""
def quality_accept(p):
# Combine multiple quality criteria
return (compactness_threshold_accept(p, min_compactness=0.4) and
abs(efficiency_gap(p["election"])) < 0.05) # Low partisan bias
chain = MarkovChain(
proposal=recom,
constraints=constraints,
accept=quality_accept,
initial_state=initial_partition,
total_steps=5000
)
high_quality_plans = []
for state in chain:
high_quality_plans.append(state)
return high_quality_plansExamples of sophisticated acceptance strategies for research applications:
# Adaptive acceptance based on chain performance
class AdaptiveAcceptance:
def __init__(self, base_acceptance=always_accept, adaptation_rate=0.01):
self.base_acceptance = base_acceptance
self.adaptation_rate = adaptation_rate
self.recent_scores = []
self.acceptance_rate = 1.0
def __call__(self, partition):
# Track performance
if hasattr(partition, 'parent') and partition.parent:
score = self.calculate_quality(partition)
self.recent_scores.append(score)
# Keep only recent history
if len(self.recent_scores) > 100:
self.recent_scores.pop(0)
# Adapt acceptance rate based on improvement
if len(self.recent_scores) > 10:
recent_trend = (self.recent_scores[-1] - self.recent_scores[-10])
if recent_trend > 0: # Improving
self.acceptance_rate = min(1.0, self.acceptance_rate + self.adaptation_rate)
else: # Not improving
self.acceptance_rate = max(0.1, self.acceptance_rate - self.adaptation_rate)
# Apply adaptive acceptance
if random.random() > self.acceptance_rate:
return False
return self.base_acceptance(partition)
def calculate_quality(self, partition):
# Define quality metric (example)
fairness = 1.0 - abs(efficiency_gap(partition["election"]))
return fairness
# Ensemble acceptance for robust sampling
def ensemble_accept(partition, acceptance_functions, weights=None):
"""
Combine multiple acceptance functions with voting.
"""
if weights is None:
weights = [1.0] * len(acceptance_functions)
votes = []
for accept_func, weight in zip(acceptance_functions, weights):
vote = accept_func(partition)
votes.append(weight if vote else 0)
# Accept if weighted average exceeds threshold
total_weight = sum(weights)
weighted_score = sum(votes) / total_weight
return weighted_score > 0.5 # Majority ruleAcceptanceFunction = Callable[[Partition], bool]
Temperature = float # For simulated annealing
Probability = float # Between 0 and 1Install with Tessl CLI
npx tessl i tessl/pypi-gerrychain