Use Markov chain Monte Carlo to analyze districting plans and gerrymanders
—
Core MCMC functionality for generating ensembles of districting plans. The MarkovChain class orchestrates the entire analysis workflow by iterating through proposed partitions, validating constraints, and making acceptance decisions.
Create and configure Markov chains with proposal functions, constraints, acceptance criteria, and initial states.
class MarkovChain:
def __init__(
self,
proposal: ProposalFunction,
constraints: Union[ConstraintFunction, List[ConstraintFunction], Validator],
accept: AcceptanceFunction,
initial_state: Partition,
total_steps: int
) -> None:
"""
Create a Markov chain for redistricting analysis.
Parameters:
- proposal (ProposalFunction): Function that proposes new partitions
- constraints (Union[ConstraintFunction, List[ConstraintFunction], Validator]):
Validation functions or Validator instance
- accept (AcceptanceFunction): Function that accepts/rejects proposals
- initial_state (Partition): Starting partition for the chain
- total_steps (int): Number of steps to run
Returns:
None
Raises:
ValueError: If initial_state fails constraint validation
"""Usage example:
from gerrychain import MarkovChain, GeographicPartition
from gerrychain.constraints import contiguous, within_percent_of_ideal_population
from gerrychain.proposals import recom
from gerrychain.accept import always_accept
# Set up initial partition
partition = GeographicPartition(graph, assignment="district", updaters=updaters)
# Create Markov chain
chain = MarkovChain(
proposal=recom,
constraints=[
contiguous,
within_percent_of_ideal_population(partition, 0.05)
],
accept=always_accept,
initial_state=partition,
total_steps=1000
)Access and modify chain properties including constraints, state, and step counting.
@property
def constraints(self) -> Validator:
"""
Read-only access to the constraints validator.
Returns:
Validator: The constraints validator instance
"""
@constraints.setter
def constraints(
self,
constraints: Union[ConstraintFunction, List[ConstraintFunction], Validator]
) -> None:
"""
Update chain constraints and validate initial state against new constraints.
Parameters:
- constraints: New constraint functions or Validator instance
Returns:
None
Raises:
ValueError: If initial_state fails new constraint validation
"""Execute the Markov chain using Python's iterator protocol for seamless integration with loops and analysis workflows.
def __iter__(self) -> "MarkovChain":
"""
Initialize chain iteration, resetting counter and state.
Returns:
MarkovChain: Self as iterator
"""
def __next__(self) -> Partition:
"""
Advance to next valid, accepted partition in the chain.
Returns:
Partition: Next partition state
Raises:
StopIteration: When total_steps is reached
"""
def __len__(self) -> int:
"""
Get total number of steps in the chain.
Returns:
int: Total steps configured for this chain
"""Add visual progress tracking for long-running chains using tqdm progress bars.
def with_progress_bar(self):
"""
Wrap the Markov chain with a tqdm progress bar for visual feedback.
Returns:
tqdm-wrapped MarkovChain iterator
Requires:
tqdm package must be installed
"""Usage example:
# Run chain with progress bar
for partition in chain.with_progress_bar():
# Analyze partition
score = compute_score(partition)
scores.append(score)
# Log every 100 steps
if len(scores) % 100 == 0:
print(f"Step {len(scores)}: Score = {score:.3f}")Example of a complete Markov chain analysis workflow:
from gerrychain import MarkovChain, Graph, GeographicPartition
from gerrychain.constraints import contiguous, within_percent_of_ideal_population
from gerrychain.proposals import recom
from gerrychain.accept import always_accept
from gerrychain.updaters import Tally, cut_edges
from gerrychain.metrics import mean_median, efficiency_gap
# 1. Create graph and initial partition
graph = Graph.from_file("precincts.shp")
partition = GeographicPartition(
graph,
assignment="district",
updaters={
"population": Tally("population"),
"cut_edges": cut_edges,
"SEN18": Election("SEN18", ["SEN18D", "SEN18R"])
}
)
# 2. Set up constraints
constraints = [
contiguous,
within_percent_of_ideal_population(partition, 0.05)
]
# 3. Create and run chain
chain = MarkovChain(
proposal=recom,
constraints=constraints,
accept=always_accept,
initial_state=partition,
total_steps=10000
)
# 4. Collect metrics
mean_medians = []
efficiency_gaps = []
for state in chain.with_progress_bar():
# Compute partisan metrics
mm = mean_median(state["SEN18"])
eg = efficiency_gap(state["SEN18"])
mean_medians.append(mm)
efficiency_gaps.append(eg)
# 5. Analyze results
import numpy as np
print(f"Mean-Median: {np.mean(mean_medians):.3f} ± {np.std(mean_medians):.3f}")
print(f"Efficiency Gap: {np.mean(efficiency_gaps):.3f} ± {np.std(efficiency_gaps):.3f}")ProposalFunction = Callable[[Partition], Partition]
ConstraintFunction = Callable[[Partition], bool]
AcceptanceFunction = Callable[[Partition], bool]
UpdaterFunction = Callable[[Partition], Any]Install with Tessl CLI
npx tessl i tessl/pypi-gerrychain