CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-cvxpy

A domain-specific language for modeling convex optimization problems in Python.

Pending
Overview
Eval results
Files

transforms.mddocs/

Transform Functions

Problem transformation utilities for advanced optimization techniques including linearization, partial optimization, and support function transformations in CVXPY.

Capabilities

Linearization

Linearize expressions around current variable values for successive linear programming and sensitivity analysis.

def linearize(expr, var_id=None):
    """
    Linearize expression around current variable values.
    
    Parameters:
    - expr: Expression to linearize
    - var_id: Variable ID to linearize around (None for all variables)
    
    Returns:
    - Expression: linearized expression (affine approximation)
    """
    ...

Usage examples:

import cvxpy as cp
import numpy as np

# Successive linear programming example
x = cp.Variable(2)
y = cp.Variable(2)

# Non-convex constraint: x[0] * y[0] + x[1] * y[1] >= 1
nonconvex_expr = x[0] * y[0] + x[1] * y[1]

# Initialize variables
x.value = np.array([1.0, 1.0])
y.value = np.array([0.5, 0.5])

# Solve sequence of linearized problems
for iteration in range(10):
    # Linearize the non-convex expression
    linear_approx = cp.linearize(nonconvex_expr)
    
    # Solve linearized problem
    objective = cp.Minimize(cp.sum_squares(x) + cp.sum_squares(y))
    constraints = [linear_approx >= 1, x >= 0, y >= 0]
    problem = cp.Problem(objective, constraints)
    
    problem.solve()
    
    print(f"Iteration {iteration}: obj = {problem.value:.4f}")
    print(f"x = {x.value}, y = {y.value}")
    
    # Check convergence
    if iteration > 0 and abs(problem.value - prev_obj) < 1e-6:
        break
    prev_obj = problem.value

# Sensitivity analysis using linearization
# Perturb a parameter and use linearization to estimate effect
A = cp.Parameter((3, 2), value=np.random.randn(3, 2))
b = cp.Parameter(3, value=np.random.randn(3))
x = cp.Variable(2)

original_problem = cp.Problem(
    cp.Minimize(cp.sum_squares(x)),
    [A @ x == b, x >= 0]
)
original_problem.solve()
original_obj = original_problem.value

# Linearize objective around current solution
obj_linear = cp.linearize(cp.sum_squares(x))

# Perturb parameter and estimate new objective
b_new = b.value + 0.1 * np.random.randn(3)
b.value = b_new

# Solve with linearized objective (approximation)
approx_problem = cp.Problem(cp.Minimize(obj_linear), [A @ x == b, x >= 0])
approx_problem.solve()
approx_obj = approx_problem.value

# Solve exactly for comparison
b.value = b_new
original_problem.solve()
exact_obj = original_problem.value

print(f"Original objective: {original_obj:.6f}")
print(f"Approximated objective: {approx_obj:.6f}")
print(f"Exact objective: {exact_obj:.6f}")
print(f"Approximation error: {abs(exact_obj - approx_obj):.6f}")

Partial Optimization

Partially optimize over a subset of variables while keeping others as parameters.

def partial_optimize(problem, opt_vars, opt_params=None):
    """
    Partially optimize a problem over specified variables.
    
    Parameters:
    - problem: Problem to partially optimize
    - opt_vars: list of variables to optimize over
    - opt_params: list of parameters to treat as optimization variables
    
    Returns:
    - Expression: optimal value as function of remaining variables
    """
    ...

Usage examples:

import cvxpy as cp
import numpy as np

# Bilevel optimization example
# Lower level: min_y ||Ay - b||^2 subject to y >= 0
# Upper level: min_x f(x) subject to x in optimal solution set of lower level

# Problem dimensions
m, n = 10, 5

# Upper level variables
x = cp.Variable(n)

# Lower level variables and data
y = cp.Variable(m)
A = cp.Parameter((m, n), value=np.random.randn(m, n))
b = cp.Parameter(m)

# Lower level problem (parameterized by x)
b.value = A.value @ np.ones(n)  # Make problem feasible
lower_obj = cp.sum_squares(A @ y - (b + x))  # b depends on x
lower_constraints = [y >= 0]
lower_problem = cp.Problem(cp.Minimize(lower_obj), lower_constraints)

# Partially optimize over y to get upper level constraint
try:
    # This creates an expression in terms of x
    optimal_lower_value = cp.partial_optimize(lower_problem, [y])
    
    # Upper level problem
    upper_obj = cp.sum_squares(x) + optimal_lower_value
    upper_constraints = [cp.norm(x, 'inf') <= 1]
    upper_problem = cp.Problem(cp.Minimize(upper_obj), upper_constraints)
    
    upper_problem.solve()
    print(f"Optimal x: {x.value}")
    print(f"Upper level objective: {upper_problem.value}")
    
except Exception as e:
    print(f"Partial optimization failed: {e}")

# Portfolio optimization with transaction costs
# Outer problem: choose target portfolio
# Inner problem: minimize transaction costs to reach target

n_assets = 5
w_current = cp.Parameter(n_assets, value=np.random.rand(n_assets))
w_current.value = w_current.value / np.sum(w_current.value)  # normalize

# Target portfolio (outer variable)  
w_target = cp.Variable(n_assets)

# Trade amounts (inner variables)
w_buy = cp.Variable(n_assets, nonneg=True)
w_sell = cp.Variable(n_assets, nonneg=True)

# Transaction cost parameters
cost_buy = 0.01   # 1% transaction cost for buying
cost_sell = 0.01  # 1% transaction cost for selling

# Inner problem: minimize transaction costs to reach target
transaction_costs = cost_buy * cp.sum(w_buy) + cost_sell * cp.sum(w_sell)
rebalancing_constraint = w_current + w_buy - w_sell == w_target

inner_problem = cp.Problem(
    cp.Minimize(transaction_costs),
    [rebalancing_constraint, w_buy >= 0, w_sell >= 0]
)

# Outer problem: choose target portfolio considering transaction costs
expected_return = cp.Parameter(n_assets, value=0.1 * np.random.rand(n_assets))
risk_param = 0.5

try:
    # Partially optimize over trading variables
    min_transaction_cost = cp.partial_optimize(inner_problem, [w_buy, w_sell])
    
    # Outer objective: maximize return minus transaction costs
    portfolio_return = expected_return.T @ w_target
    outer_obj = cp.Maximize(portfolio_return - risk_param * min_transaction_cost)
    
    # Portfolio constraints
    outer_constraints = [
        cp.sum(w_target) == 1,  # fully invested
        w_target >= 0           # long-only
    ]
    
    outer_problem = cp.Problem(outer_obj, outer_constraints)
    outer_problem.solve()
    
    print(f"Optimal target portfolio: {w_target.value}")
    print(f"Current portfolio: {w_current.value}")
    print(f"Expected net return: {outer_problem.value}")
    
except Exception as e:
    print(f"Bilevel portfolio optimization failed: {e}")

Support Function

Transform expressions using support function representations for convex analysis.

def suppfunc(expr, vars=None):
    """
    Compute support function of expression.
    
    Parameters:
    - expr: Expression to compute support function for
    - vars: Variables to compute support function over
    
    Returns:
    - Expression: support function
    """
    ...

Usage examples:

import cvxpy as cp
import numpy as np

# Support function of a norm ball
x = cp.Variable(3)
y = cp.Variable(3)  # dual variable

# L2 ball: {x : ||x||_2 <= 1}
# Support function: sup_{||x||_2 <= 1} y^T x = ||y||_2
l2_ball_constraint = cp.norm(x, 2) <= 1
try:
    support_func = cp.suppfunc(y.T @ x, [x])  # Should give ||y||_2
    print("L2 ball support function computed")
except Exception as e:
    print(f"Support function computation failed: {e}")

# Support function in robust optimization
# Robust least squares: min_x max_{||u||_inf <= delta} ||Ax + u - b||^2

n, m = 5, 10
A = cp.Parameter((m, n), value=np.random.randn(m, n))
b = cp.Parameter(m, value=np.random.randn(m))
x = cp.Variable(n)
delta = 0.1  # uncertainty level

# Uncertainty set: ||u||_inf <= delta
u = cp.Variable(m)
residual = A @ x + u - b

try:
    # Worst-case residual over uncertainty set
    worst_case_obj = cp.suppfunc(cp.sum_squares(residual), [u])
    uncertainty_constraint = cp.norm(u, 'inf') <= delta
    
    # Robust optimization problem
    robust_obj = cp.Minimize(worst_case_obj)
    robust_problem = cp.Problem(robust_obj, [uncertainty_constraint])
    
    robust_problem.solve()
    print(f"Robust solution: {x.value}")
    
except Exception as e:
    print(f"Robust optimization with support function failed: {e}")

# Fenchel conjugate computation
# For convex function f(x), conjugate is f*(y) = sup_x (y^T x - f(x))

x = cp.Variable()
y = cp.Parameter(value=1.0)

# Conjugate of f(x) = x^2/2 should be f*(y) = y^2/2
f_x = 0.5 * x**2
linear_term = y * x

try:
    conjugate = cp.suppfunc(linear_term - f_x, [x])
    print("Fenchel conjugate computed")
    
    # Evaluate conjugate at different points
    for y_val in [-2, -1, 0, 1, 2]:
        y.value = y_val
        conjugate_problem = cp.Problem(cp.Minimize(-conjugate))  # max -> min
        conjugate_problem.solve()
        analytical = 0.5 * y_val**2  # True conjugate value
        print(f"y={y_val}: computed={conjugate_problem.value:.4f}, "
              f"analytical={analytical:.4f}")
        
except Exception as e:
    print(f"Fenchel conjugate computation failed: {e}")

# Game theory: computing Nash equilibrium
# Player 1: min_x max_y x^T Q y
# Player 2: max_y min_x x^T Q y  
# where x, y are in probability simplexes

n_strategies = 3
Q = cp.Parameter((n_strategies, n_strategies), 
                 value=np.random.randn(n_strategies, n_strategies))

x = cp.Variable(n_strategies, nonneg=True)  # Player 1 strategy
y = cp.Variable(n_strategies, nonneg=True)  # Player 2 strategy

# Probability simplex constraints
x_simplex = cp.sum(x) == 1
y_simplex = cp.sum(y) == 1

try:
    # Player 1's worst-case payoff: min_x max_y x^T Q y
    payoff = cp.quad_form(x, Q, y)  # This might not work directly
    player1_worst_case = cp.suppfunc(payoff, [y])
    
    # Nash equilibrium computation
    nash_problem = cp.Problem(
        cp.Minimize(player1_worst_case),
        [x_simplex, x >= 0, y_simplex, y >= 0]
    )
    
    nash_problem.solve()
    print(f"Nash equilibrium strategies:")
    print(f"Player 1: {x.value}")
    print(f"Player 2: {y.value}")
    
except Exception as e:
    print(f"Nash equilibrium computation failed: {e}")

Indicator Function

Transform expressions using indicator functions for set membership constraints.

def indicator(constraints):
    """
    Indicator function for a set defined by constraints.
    
    Parameters:
    - constraints: list of constraints defining the set
    
    Returns:
    - Expression: indicator function (0 if constraints satisfied, +inf otherwise)
    """
    ...

Scalarization Functions

Transform multi-objective problems into single-objective problems using scalarization techniques.

def log_sum_exp(expr, axis=None, keepdims=False):
    """
    Log-sum-exponential scalarization: log(sum(exp(expr))).
    
    Parameters:
    - expr: expression
    - axis: axis to sum along
    - keepdims: whether to keep singleton dimensions
    
    Returns:
    - Expression: log-sum-exp scalarization
    """
    ...

def max(expr, axis=None, keepdims=False):
    """
    Max scalarization for multi-objective problems.
    
    Parameters:
    - expr: expression or list of objective expressions
    - axis: axis to take maximum along
    - keepdims: whether to keep singleton dimensions
    
    Returns:
    - Expression: maximum scalarization
    """
    ...

def targets_and_priorities(objectives, targets, priorities):
    """
    Target-based scalarization with priorities.
    
    Parameters:
    - objectives: list of objective expressions
    - targets: target values for each objective
    - priorities: priority weights for each objective
    
    Returns:
    - Expression: scalarized objective
    """
    ...

def weighted_sum(objectives, weights):
    """
    Weighted sum scalarization.
    
    Parameters:
    - objectives: list of objective expressions
    - weights: weight vector for objectives
    
    Returns:
    - Expression: weighted sum objective
    """
    ...

Usage examples for new functions:

import cvxpy as cp
import numpy as np

# Indicator function example - constrained optimization
x = cp.Variable(2)
y = cp.Variable(2)

# Define a constraint set
constraint_set = [cp.norm(x, 2) <= 1, x >= 0, y >= -1, y <= 1]

try:
    # Use indicator function to enforce constraints
    indicator_expr = cp.indicator(constraint_set)
    
    # Optimization with indicator function
    objective = cp.Minimize(cp.sum_squares(x - y) + indicator_expr)
    problem = cp.Problem(objective)
    problem.solve()
    
    print(f"Solution with indicator function: x={x.value}, y={y.value}")
    
except Exception as e:
    print(f"Indicator function example failed: {e}")

# Multi-objective optimization with scalarization
n = 5
x = cp.Variable(n)
A1 = cp.Parameter((3, n), value=np.random.randn(3, n))
A2 = cp.Parameter((4, n), value=np.random.randn(4, n))
b1 = cp.Parameter(3, value=np.random.randn(3))
b2 = cp.Parameter(4, value=np.random.randn(4))

# Multiple objectives
obj1 = cp.norm(A1 @ x - b1, 2)  # Least squares fit 1
obj2 = cp.norm(A2 @ x - b2, 1)  # Least absolute deviations fit 2
obj3 = cp.norm(x, 1)            # Sparsity regularization

objectives = [obj1, obj2, obj3]

# Weighted sum scalarization
weights = [0.5, 0.3, 0.2]
try:
    weighted_objective = cp.transforms.weighted_sum(objectives, weights)
    
    weighted_problem = cp.Problem(cp.Minimize(weighted_objective), [x >= -2, x <= 2])
    weighted_problem.solve()
    
    print(f"Weighted sum solution: {x.value}")
    print(f"Objective values: {[obj.value for obj in objectives]}")
    
except Exception as e:
    print(f"Weighted sum scalarization failed: {e}")

# Target-based scalarization
targets = [1.0, 0.5, 0.1]  # Target values for each objective
priorities = [1.0, 2.0, 0.5]  # Higher priority = more important

try:
    target_objective = cp.transforms.targets_and_priorities(objectives, targets, priorities)
    
    target_problem = cp.Problem(cp.Minimize(target_objective), [x >= -2, x <= 2])
    target_problem.solve()
    
    print(f"Target-based solution: {x.value}")
    print(f"Distance from targets: {[abs(obj.value - target) for obj, target in zip(objectives, targets)]}")
    
except Exception as e:
    print(f"Target-based scalarization failed: {e}")

# Log-sum-exp scalarization for risk-sensitive optimization
# Minimize risk-sensitive cost: log(E[exp(costs)])
n_scenarios = 10
scenario_costs = [cp.norm(A1 @ x - b1 + 0.1 * np.random.randn(3), 2) 
                  for _ in range(n_scenarios)]

try:
    # Stack scenario costs
    cost_vector = cp.hstack(scenario_costs)
    
    # Risk-sensitive objective using log-sum-exp
    risk_sensitive_obj = cp.transforms.log_sum_exp(cost_vector) - np.log(n_scenarios)
    
    risk_problem = cp.Problem(cp.Minimize(risk_sensitive_obj), [x >= -1, x <= 1])
    risk_problem.solve()
    
    print(f"Risk-sensitive solution: {x.value}")
    print(f"Individual scenario costs: {[cost.value for cost in scenario_costs]}")
    
except Exception as e:
    print(f"Risk-sensitive optimization failed: {e}")

Install with Tessl CLI

npx tessl i tessl/pypi-cvxpy

docs

atoms.md

constraints.md

errors.md

expressions.md

index.md

problems.md

solvers.md

transforms.md

tile.json