A library for Probabilistic Graphical Models
—
Exact and approximate inference algorithms for computing marginal probabilities, MAP queries, and causal inference. pgmpy provides comprehensive inference capabilities for different types of queries and model structures.
Exact inference algorithm using variable elimination for computing marginal probabilities and MAP queries.
class VariableElimination:
def __init__(self, model):
"""
Initialize variable elimination inference for a model.
Parameters:
- model: DiscreteBayesianNetwork, MarkovNetwork, or FactorGraph
"""
def query(self, variables, evidence=None, elimination_order="MinFill",
joint=True, show_progress=True):
"""
Compute marginal probability distribution.
Parameters:
- variables: list of query variables
- evidence: dict of observed evidence {variable: value}
- elimination_order: variable elimination order ("MinFill", "MinNeighbors", "MinWeight", or list)
- joint: whether to return joint distribution over all query variables
- show_progress: whether to show progress bar
Returns:
DiscreteFactor: Marginal probability distribution
"""
def max_marginal(self, variables, evidence=None,
elimination_order="MinFill", show_progress=True):
"""
Compute max-marginal probabilities.
Parameters:
- variables: list of query variables
- evidence: dict of observed evidence
- elimination_order: variable elimination order
- show_progress: whether to show progress bar
Returns:
DiscreteFactor: Max-marginal distribution
"""
def map_query(self, variables=None, evidence=None,
elimination_order="MinFill", show_progress=True):
"""
Find Maximum A Posteriori (MAP) assignment.
Parameters:
- variables: list of MAP variables (None for all unobserved)
- evidence: dict of observed evidence
- elimination_order: variable elimination order
- show_progress: whether to show progress bar
Returns:
dict: MAP assignment {variable: value}
"""
def induced_graph(self, elimination_order):
"""
Get the induced graph for given elimination order.
Parameters:
- elimination_order: list of variables in elimination order
Returns:
networkx.Graph: Induced graph
"""
def induced_width(self, elimination_order):
"""
Compute induced width (tree width) for elimination order.
Parameters:
- elimination_order: list of variables in elimination order
Returns:
int: Induced width
"""Exact inference using belief propagation on junction trees.
class BeliefPropagation:
def __init__(self, model):
"""
Initialize belief propagation inference.
Parameters:
- model: DiscreteBayesianNetwork, MarkovNetwork, or JunctionTree
"""
def calibrate(self):
"""
Calibrate the junction tree by passing messages.
Returns:
None: Calibrates clique potentials in-place
"""
def max_calibrate(self):
"""
Calibrate junction tree using max-product algorithm.
Returns:
None: Calibrates clique potentials for MAP queries
"""
def query(self, variables, evidence=None, joint=True, show_progress=True):
"""
Query marginal probabilities after calibration.
Parameters:
- variables: list of query variables
- evidence: dict of observed evidence
- joint: whether to return joint distribution
- show_progress: whether to show progress bar
Returns:
DiscreteFactor: Marginal probability distribution
"""
def map_query(self, variables=None, evidence=None, show_progress=True):
"""
Find MAP assignment using max-product belief propagation.
Parameters:
- variables: list of MAP variables
- evidence: dict of observed evidence
- show_progress: whether to show progress bar
Returns:
dict: MAP assignment
"""
def get_cliques(self):
"""Get cliques in the junction tree."""
def get_sepset(self, clique1, clique2):
"""Get separator set between two cliques."""Extended belief propagation with explicit message passing control.
class BeliefPropagationWithMessagePassing:
def __init__(self, model):
"""Initialize belief propagation with message passing."""
def send_messages(self, from_clique, to_clique):
"""Send message between specific cliques."""
def get_messages(self):
"""Get all messages in the junction tree."""
def calibrate_clique(self, clique):
"""Calibrate a specific clique."""Algorithms for causal reasoning and effect estimation.
class CausalInference:
def __init__(self, model):
"""
Initialize causal inference for a causal model.
Parameters:
- model: DiscreteBayesianNetwork representing causal relationships
"""
def estimate_ate(self, treatment, outcome, common_causes=None,
effect_modifiers=None):
"""
Estimate Average Treatment Effect (ATE).
Parameters:
- treatment: name of treatment variable
- outcome: name of outcome variable
- common_causes: list of common cause variables
- effect_modifiers: list of effect modifier variables
Returns:
float: Estimated average treatment effect
"""
def estimate_cate(self, treatment, outcome, common_causes=None,
effect_modifiers=None):
"""
Estimate Conditional Average Treatment Effect (CATE).
Parameters:
- treatment: treatment variable name
- outcome: outcome variable name
- common_causes: list of common causes
- effect_modifiers: list of effect modifiers
Returns:
dict: CATE estimates for each modifier level
"""
def backdoor_adjustment(self, treatment, outcome, backdoor_set):
"""
Perform backdoor adjustment for causal effect estimation.
Parameters:
- treatment: treatment variable
- outcome: outcome variable
- backdoor_set: list of backdoor adjustment variables
Returns:
float: Causal effect estimate
"""
def instrumental_variable_estimation(self, treatment, outcome, instrument):
"""
Estimate causal effect using instrumental variables.
Parameters:
- treatment: treatment variable
- outcome: outcome variable
- instrument: instrumental variable
Returns:
float: IV-based causal effect estimate
"""
def front_door_adjustment(self, treatment, outcome, mediator_set):
"""
Perform front-door adjustment for causal inference.
Parameters:
- treatment: treatment variable
- outcome: outcome variable
- mediator_set: list of mediator variables
Returns:
float: Front-door causal effect estimate
"""Base class and algorithms for approximate inference when exact methods are intractable.
class ApproxInference:
def __init__(self, model):
"""
Base class for approximate inference algorithms.
Parameters:
- model: probabilistic graphical model
"""
def query(self, variables, evidence=None, n_samples=1000):
"""
Approximate query using sampling.
Parameters:
- variables: list of query variables
- evidence: dict of evidence
- n_samples: number of samples for approximation
Returns:
DiscreteFactor: Approximate marginal distribution
"""
def map_query(self, variables=None, evidence=None, n_samples=1000):
"""
Approximate MAP query.
Parameters:
- variables: list of MAP variables
- evidence: dict of evidence
- n_samples: number of samples
Returns:
dict: Approximate MAP assignment
"""Specialized inference for temporal models.
class DBNInference:
def __init__(self, model):
"""
Initialize inference for Dynamic Bayesian Networks.
Parameters:
- model: DynamicBayesianNetwork
"""
def forward_inference(self, variables, evidence=None, n_time_slices=1):
"""
Perform forward inference over time.
Parameters:
- variables: list of query variables
- evidence: dict of temporal evidence
- n_time_slices: number of time slices
Returns:
list: Marginal distributions for each time slice
"""
def backward_inference(self, variables, evidence=None, n_time_slices=1):
"""Perform backward inference (smoothing)."""
def viterbi(self, evidence=None, n_time_slices=1):
"""
Find most likely sequence using Viterbi algorithm.
Parameters:
- evidence: temporal evidence
- n_time_slices: sequence length
Returns:
list: Most likely state sequence
"""
def particle_filter(self, evidence, n_particles=1000):
"""
Perform particle filtering for state estimation.
Parameters:
- evidence: temporal evidence
- n_particles: number of particles
Returns:
list: Particle-based state estimates
"""Inference using linear programming relaxation.
class Mplp:
def __init__(self, model):
"""
Initialize Max-Product Linear Programming inference.
Parameters:
- model: MarkovNetwork or FactorGraph
"""
def map_query(self, evidence=None, max_iter=100, tol=1e-6):
"""
Find MAP assignment using MPLP.
Parameters:
- evidence: dict of evidence
- max_iter: maximum iterations
- tol: convergence tolerance
Returns:
dict: MAP assignment
"""
def get_dual_objective(self):
"""Get dual objective value."""
def get_primal_objective(self):
"""Get primal objective value."""Inference algorithms based on Monte Carlo sampling.
class BayesianModelSampling:
def __init__(self, model):
"""
Initialize sampling-based inference.
Parameters:
- model: DiscreteBayesianNetwork
"""
def forward_sample(self, size=1, seed=None, include_latents=False,
partial_samples=None, show_progress=True):
"""
Generate samples using forward sampling.
Parameters:
- size: number of samples
- seed: random seed
- include_latents: whether to include latent variables
- partial_samples: pre-specified partial samples
- show_progress: whether to show progress bar
Returns:
pandas.DataFrame: Generated samples
"""
def rejection_sample(self, evidence=[], size=1, seed=None,
include_latents=False, show_progress=True):
"""
Generate samples using rejection sampling.
Parameters:
- evidence: list of evidence as State objects
- size: number of samples
- seed: random seed
- include_latents: whether to include latents
- show_progress: whether to show progress bar
Returns:
pandas.DataFrame: Samples satisfying evidence
"""
def likelihood_weighted_sample(self, evidence=[], size=1, seed=None,
include_latents=False, show_progress=True):
"""
Generate weighted samples using likelihood weighting.
Parameters:
- evidence: list of evidence
- size: number of samples
- seed: random seed
- include_latents: whether to include latents
- show_progress: whether to show progress bar
Returns:
pandas.DataFrame: Weighted samples with 'weight' column
"""
class GibbsSampling:
def __init__(self, model=None):
"""
Initialize Gibbs sampling MCMC.
Parameters:
- model: DiscreteBayesianNetwork or MarkovNetwork
"""
def sample(self, start_state=None, size=1, seed=None, include_latents=False):
"""
Generate samples using Gibbs sampling.
Parameters:
- start_state: initial state for Markov chain
- size: number of samples
- seed: random seed
- include_latents: whether to include latent variables
Returns:
pandas.DataFrame: MCMC samples
"""
def generate_sample(self, start_state=None, size=1, seed=None, include_latents=False):
"""Generate single sample from current state."""Common interface for all inference algorithms.
class Inference:
def __init__(self, model):
"""
Base class for all inference algorithms.
Parameters:
- model: probabilistic graphical model
"""
def query(self, variables, evidence=None):
"""
Abstract method for probability queries.
Parameters:
- variables: list of query variables
- evidence: dict of evidence
Returns:
DiscreteFactor: Query result
"""
def map_query(self, variables=None, evidence=None):
"""
Abstract method for MAP queries.
Parameters:
- variables: list of MAP variables
- evidence: dict of evidence
Returns:
dict: MAP assignment
"""from pgmpy.models import DiscreteBayesianNetwork
from pgmpy.factors.discrete import TabularCPD
from pgmpy.inference import VariableElimination
# Create model (assuming model is already created)
inference = VariableElimination(model)
# Query marginal probability P(C | A=1)
result = inference.query(variables=['C'], evidence={'A': 1})
print("P(C | A=1):")
print(result)
# Find MAP assignment for all unobserved variables
map_result = inference.map_query(evidence={'A': 1})
print("MAP assignment:", map_result)from pgmpy.inference import BeliefPropagation
# Initialize belief propagation
bp = BeliefPropagation(model)
# Calibrate the junction tree
bp.calibrate()
# Query after calibration
result = bp.query(['C'], evidence={'A': 1})
print("BP result:", result)from pgmpy.models import DiscreteBayesianNetwork
from pgmpy.inference import CausalInference
# Create causal model
causal_model = DiscreteBayesianNetwork([('Treatment', 'Outcome'),
('Confounder', 'Treatment'),
('Confounder', 'Outcome')])
# Initialize causal inference
causal_inf = CausalInference(causal_model)
# Estimate average treatment effect
ate = causal_inf.estimate_ate('Treatment', 'Outcome',
common_causes=['Confounder'])
print(f"Average Treatment Effect: {ate}")from pgmpy.sampling import BayesianModelSampling, GibbsSampling
# Forward sampling
sampler = BayesianModelSampling(model)
samples = sampler.forward_sample(size=1000)
# Rejection sampling with evidence
evidence_samples = sampler.rejection_sample(
evidence=[State('A', 1)], size=100
)
# Gibbs sampling for MCMC
gibbs = GibbsSampling(model)
mcmc_samples = gibbs.sample(size=1000)Install with Tessl CLI
npx tessl i tessl/pypi-pgmpy