Non-linear least-squares minimization and curve-fitting with enhanced parameter management and confidence interval estimation
—
LMFIT provides comprehensive tools for confidence interval estimation and error analysis, going beyond simple covariance-based uncertainties to explore parameter space and provide robust confidence bounds even for challenging nonlinear problems.
Statistical analysis tools for determining parameter confidence bounds through parameter space exploration.
def conf_interval(minimizer, result, p_names=None, sigmas=(0.674, 0.95, 0.997),
trace=False, maxiter=200, prob_func=None, verbose=False):
"""
Calculate confidence intervals for parameters using profile likelihood.
Args:
minimizer (Minimizer): Minimizer instance used for original fit
result (MinimizerResult): Results from minimization
p_names (list): Parameter names to analyze (all varied parameters if None)
sigmas (tuple): Confidence levels as probabilities (default: 1σ, 2σ, 3σ)
trace (bool): Return full trace of parameter exploration
maxiter (int): Maximum iterations for each confidence bound
prob_func (callable): Function to calculate probability from chi-squared
verbose (bool): Print progress information
Returns:
dict: Parameter names mapped to confidence interval dictionaries
Each parameter dict contains confidence bounds at each sigma level
"""
def conf_interval2d(minimizer, result, x_name, y_name, nx=10, ny=10, limits=None):
"""
Calculate 2D confidence regions for parameter pairs.
Args:
minimizer (Minimizer): Minimizer instance from original fit
result (MinimizerResult): Results from minimization
x_name (str): Name of first parameter
y_name (str): Name of second parameter
nx (int): Number of grid points in x direction
ny (int): Number of grid points in y direction
limits (tuple): ((x_min, x_max), (y_min, y_max)) for grid bounds
Returns:
tuple: (x_values, y_values, probability_grid)
x_values: Array of x parameter values
y_values: Array of y parameter values
probability_grid: 2D array of probability values
"""
def f_compare(best_fit, new_fit):
"""
F-test comparison of two fits.
Args:
best_fit (MinimizerResult): Results from better fit
new_fit (MinimizerResult): Results from comparison fit
Returns:
tuple: (f_statistic, p_value)
"""Helper functions for confidence interval analysis and parameter management.
def copy_vals(params):
"""
Save current parameter values and uncertainties.
Args:
params (Parameters): Parameters to save
Returns:
dict: Saved parameter state
"""
def restore_vals(tmp_params, params):
"""
Restore parameter values from saved state.
Args:
tmp_params (dict): Saved parameter state
params (Parameters): Parameters to restore
"""
def map_trace_to_names(trace, params):
"""
Map trace array to parameter names.
Args:
trace (array): Parameter trace array
params (Parameters): Parameter object
Returns:
dict: Parameter names mapped to trace values
"""import numpy as np
from lmfit import minimize, Parameters, conf_interval, fit_report
def residual(params, x, data):
"""Exponential decay model"""
a = params['amplitude']
t = params['decay']
c = params['offset']
model = a * np.exp(-t * x) + c
return model - data
# Generate sample data
x = np.linspace(0, 10, 101)
data = 5.0 * np.exp(-0.8 * x) + 1.0 + np.random.normal(size=101, scale=0.1)
# Set up parameters and fit
params = Parameters()
params.add('amplitude', value=10, min=0)
params.add('decay', value=1, min=0)
params.add('offset', value=1)
# Perform fit
result = minimize(residual, params, args=(x, data))
# Calculate confidence intervals
ci = conf_interval(result.minimizer, result)
# Print results with confidence intervals
print(fit_report(result, show_correl=False))
print("\nConfidence Intervals:")
for param_name, bounds in ci.items():
print(f"{param_name}:")
for sigma, (lower, upper) in bounds.items():
percent = sigma * 100
print(f" {percent:.1f}%: [{lower:.4f}, {upper:.4f}]")# Calculate confidence intervals at custom probability levels
custom_sigmas = (0.68, 0.90, 0.95, 0.99) # 68%, 90%, 95%, 99%
ci = conf_interval(result.minimizer, result, sigmas=custom_sigmas, verbose=True)
# Process results
for param_name, bounds in ci.items():
print(f"\n{param_name} confidence intervals:")
for prob, (lower, upper) in bounds.items():
print(f" {prob*100:.0f}%: [{lower:.6f}, {upper:.6f}]")# Calculate confidence intervals only for specific parameters
specific_params = ['amplitude', 'decay']
ci_specific = conf_interval(result.minimizer, result,
p_names=specific_params,
maxiter=500)
# Get trace information for detailed analysis
ci_with_trace = conf_interval(result.minimizer, result,
p_names=['amplitude'],
trace=True, verbose=True)
# Access trace data
amplitude_trace = ci_with_trace['amplitude']['trace']
print(f"Trace points explored: {len(amplitude_trace)}")# Calculate 2D confidence region for parameter pair
x_vals, y_vals, prob_grid = conf_interval2d(result.minimizer, result,
'amplitude', 'decay',
nx=20, ny=20)
# Plot 2D confidence region
import matplotlib.pyplot as plt
plt.figure(figsize=(8, 6))
contour = plt.contour(x_vals, y_vals, prob_grid,
levels=[0.68, 0.95, 0.997],
colors=['blue', 'green', 'red'])
plt.clabel(contour, inline=True, fontsize=10,
fmt={0.68: '68%', 0.95: '95%', 0.997: '99.7%'})
# Mark best-fit point
best_amp = result.params['amplitude'].value
best_decay = result.params['decay'].value
plt.plot(best_amp, best_decay, 'ko', markersize=8, label='Best fit')
plt.xlabel('Amplitude')
plt.ylabel('Decay Rate')
plt.title('2D Confidence Regions')
plt.legend()
plt.grid(True, alpha=0.3)
plt.show()from lmfit import Minimizer
# Custom probability function for confidence intervals
def custom_prob_func(nvarys, ndata, chimin, chisqr):
"""Custom probability calculation"""
dof = ndata - nvarys
delta_chi = chisqr - chimin
# Custom logic for probability calculation
return np.exp(-0.5 * delta_chi)
# Create minimizer instance for detailed control
minimizer = Minimizer(residual, params, fcn_args=(x, data))
result = minimizer.minimize()
# Calculate confidence intervals with custom probability function
ci_custom = conf_interval(minimizer, result,
prob_func=custom_prob_func,
maxiter=1000, verbose=True)from lmfit import Model
from lmfit.models import ExponentialModel
# Using Model interface
exp_model = ExponentialModel()
params = exp_model.guess(data, x=x)
# Fit model
result = exp_model.fit(data, params, x=x)
# Calculate confidence intervals for model parameters
ci = conf_interval(result.minimizer, result)
# Report with confidence intervals included
from lmfit import ci_report
print(result.fit_report())
print("\n" + ci_report(ci))# Compare nested models using F-test
from lmfit.models import LinearModel, QuadraticModel
# Fit simple linear model
linear_model = LinearModel()
linear_params = linear_model.guess(data, x=x)
linear_result = linear_model.fit(data, linear_params, x=x)
# Fit more complex quadratic model
quad_model = QuadraticModel()
quad_params = quad_model.guess(data, x=x)
quad_result = quad_model.fit(data, quad_params, x=x)
# F-test to compare models
f_stat, p_value = f_compare(linear_result, quad_result)
print(f"F-statistic: {f_stat:.4f}")
print(f"P-value: {p_value:.6f}")
if p_value < 0.05:
print("Quadratic model is significantly better")
else:
print("Linear model is adequate")# Create uncertainties variables for error propagation
uvars = result.params.create_uvars()
# Use uncertainties for calculations with error propagation
amplitude_u = uvars['amplitude']
decay_u = uvars['decay']
offset_u = uvars['offset']
# Calculate derived quantities with propagated uncertainties
half_life = np.log(2) / decay_u
initial_value = amplitude_u + offset_u
print(f"Half-life: {half_life}")
print(f"Initial value: {initial_value}")import json
# Convert confidence intervals to JSON-serializable format
ci_data = {}
for param_name, bounds in ci.items():
ci_data[param_name] = {}
for sigma, (lower, upper) in bounds.items():
ci_data[param_name][str(sigma)] = [lower, upper]
# Save to file
with open('confidence_intervals.json', 'w') as f:
json.dump(ci_data, f, indent=2)
# Load from file
with open('confidence_intervals.json', 'r') as f:
loaded_ci = json.load(f)Install with Tessl CLI
npx tessl i tessl/pypi-lmfit