Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • pmag/topupopt
1 result
Show changes
Commits on Source (4)
# imports
import pyomo.environ as pyo
# *****************************************************************************
# *****************************************************************************
def add_converters(
model: pyo.AbstractModel,
enable_default_values: bool = True,
enable_validation: bool = True,
enable_initialisation: bool = True,
):
# *************************************************************************
# *************************************************************************
# systems
# set of all systems
model.set_I = pyo.Set()
# set of optional systems
model.set_I_new = pyo.Set(within=model.set_I)
# *************************************************************************
# inputs
# set of inputs (indexed by system)
model.set_M = pyo.Set(model.set_I)
# set of inputs modelled using non-negative real variables
model.set_M_nnr = pyo.Set(model.set_I, within=model.set_M)
# set of inputs modelled using binary variables
model.set_M_bin = pyo.Set(model.set_I, within=model.set_M)
# set of amplitude-constrained inputs
model.set_M_dim = pyo.Set(model.set_I_new, within=model.set_M)
# set of amplitude-constrained inputs
model.set_M_fix = pyo.Set(model.set_I, within=model.set_M)
# set of externality-inducing inputs
model.set_M_ext = pyo.Set(model.set_I, within=model.set_M)
# *************************************************************************
# outputs
# set of outputs (indexed by system)
model.set_R = pyo.Set(model.set_I)
# set of outputs with fixed bounds
model.set_R_fix = pyo.Set(model.set_I, within=model.set_R)
# set of positive amplitude-constrained outputs
model.set_R_dim_pos = pyo.Set(model.set_I, within=model.set_R)
# set of negative amplitude-constrained outputs
model.set_R_dim_neg = pyo.Set(model.set_I, within=model.set_R)
# set of amplitude-limited outputs with matching pos. and neg. amplitudes
model.set_R_dim_eq = pyo.Set(model.set_I, within=model.set_R)
# set of outputs (indexed by system) inducing externalities
model.set_R_ext = pyo.Set(model.set_I)
# *************************************************************************
# states
# set of states
model.set_N = pyo.Set(model.set_I)
# set of states with fixed bounds
model.set_N_fix = pyo.Set(model.set_I, within=model.set_N)
# set of positive amplitude-constrained states
model.set_N_dim_pos = pyo.Set(model.set_I, within=model.set_N)
# set of negative amplitude-constrained states
model.set_N_dim_neg = pyo.Set(model.set_I, within=model.set_N)
# set of amplitude-limited states with matching pos. and neg. amplitudes
model.set_N_dim_eq = pyo.Set(model.set_I, within=model.set_N)
# set of states (indexed by system) inducing externalities
model.set_N_ext = pyo.Set(model.set_I, within=model.set_N)
# set of positive state variation-penalised states
model.set_N_pos_var = pyo.Set(model.set_I, within=model.set_N)
# set of negative state variation-penalised states
model.set_N_neg_var = pyo.Set(model.set_I, within=model.set_N)
# set of upper reference violation-penalised states
model.set_N_ref_u = pyo.Set(model.set_I, within=model.set_N)
# set of lower reference violation-penalised states
model.set_N_ref_d = pyo.Set(model.set_I, within=model.set_N)
# *************************************************************************
# *************************************************************************
# sparse index sets
# *************************************************************************
# *************************************************************************
# inputs
# set of IM tuples
def init_set_IM(m):
return ((i, m_i) for i in m.set_I for m_i in m.set_M[i])
model.set_IM = pyo.Set(dimen=2, initialize=init_set_IM)
# set of IM tuples for systems with binary signals
def init_set_IM_bin(m):
return ((i, m_i) for (i, m_i) in m.set_IM if m_i in m.set_M_bin[i])
model.set_IM_bin = pyo.Set(dimen=2, initialize=init_set_IM_bin, within=model.set_IM)
# set of IM tuples for tech. with dimensionable reference mode levels
def init_set_IM_dim(m):
return ((i, m_i) for (i, m_i) in m.set_IM if m_i in m.set_M_dim[i])
model.set_IM_dim = pyo.Set(dimen=2, initialize=init_set_IM_dim, within=model.set_IM)
# set of IM tuples for fixed amplitude inputs
def init_set_IM_fix(m):
return ((i, m_i) for (i, m_i) in m.set_IM if m_i in m.set_M_fix[i])
model.set_IM_fix = pyo.Set(dimen=2, initialize=init_set_IM_fix, within=model.set_IM)
# set of IM tuples for technologies whose modes can induce externalities
def init_set_IM_ext(m):
return ((i, m_i) for (i, m_i) in m.set_IM if m_i in m.set_M_ext[i])
model.set_IM_ext = pyo.Set(dimen=2, initialize=init_set_IM_ext, within=model.set_IM)
# *************************************************************************
# states
# set of IN tuples
def init_set_IN(m):
return (
(i, n_i) for i in m.set_I for n_i in m.set_N[i] # IN tuple
) # for each state
model.set_IN = pyo.Set(dimen=2, initialize=init_set_IN)
# set of IN tuples for states with fixed bounds
def init_set_IN_fix(m):
return ((i, n_i) for i in m.set_I for n_i in m.set_N_fix[i])
model.set_IN_fix = pyo.Set(dimen=2, initialize=init_set_IN_fix)
# set of IN tuples for converters with amplitude-constrained states
def init_set_IN_dim_eq(m):
return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_dim_eq[i])
model.set_IN_dim_eq = pyo.Set(
dimen=2, initialize=init_set_IN_dim_eq, within=model.set_IN
)
# set of IN tuples for converters with pos. amplitude-constrained states
def init_set_IN_dim_pos(m):
return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_dim_pos[i])
model.set_IN_dim_pos = pyo.Set(
dimen=2, initialize=init_set_IN_dim_pos, within=model.set_IN
)
# set of IN tuples for converters with neg. amplitude-constrained states
def init_set_IN_dim_neg(m):
return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_dim_neg[i])
model.set_IN_dim_neg = pyo.Set(
dimen=2, initialize=init_set_IN_dim_neg, within=model.set_IN
)
# set of IN tuples for converters with externality-inducing states
def init_set_IN_ext(m):
return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_ext[i])
model.set_IN_ext = pyo.Set(dimen=2, initialize=init_set_IN_ext, within=model.set_IN)
# set of IN tuples for positive variation-penalised states
def init_set_IN_pos_var(m):
return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_pos_var[i])
model.set_IN_pos_var = pyo.Set(
dimen=2, initialize=init_set_IN_pos_var, within=model.set_IN
)
# set of IN tuples for negative variation-penalised states
def init_set_IN_neg_var(m):
return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_neg_var[i])
model.set_IN_neg_var = pyo.Set(
dimen=2, initialize=init_set_IN_neg_var, within=model.set_IN
)
# set of IN tuples for upper reference violation penalised states
def init_set_IN_ref_u(m):
return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_ref_u[i])
model.set_IN_ref_u = pyo.Set(
dimen=2, initialize=init_set_IN_ref_u, within=model.set_IN
)
# set of IN tuples for lower reference violation penalised states
def init_set_IN_ref_d(m):
return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_ref_d[i])
model.set_IN_ref_d = pyo.Set(
dimen=2, initialize=init_set_IN_ref_d, within=model.set_IN
)
# *************************************************************************
# outputs
# set of IR tuples
def init_set_IR(m):
return ((i, r_i) for i in m.set_I for r_i in m.set_R[i])
model.set_IR = pyo.Set(dimen=2, initialize=init_set_IR)
# set of IR tuples for outputs with fixed bounds
def init_set_IR_fix(m):
return ((i, r_i) for i in m.set_I for r_i in m.set_R_fix[i])
model.set_IR_fix = pyo.Set(dimen=2, initialize=init_set_IR_fix)
# set of IR tuples for converters with matching pos. and neg. out. amp. limits
def init_set_IR_dim_eq(m):
return ((i, r_i) for (i, r_i) in m.set_IR if r_i in m.set_R_dim_eq[i])
model.set_IR_dim_eq = pyo.Set(dimen=2, initialize=init_set_IR_dim_eq)
# set of IR tuples for converters with amplitude-penalised outputs
def init_set_IR_dim_neg(m):
return ((i, r_i) for (i, r_i) in m.set_IR if r_i in m.set_R_dim_neg[i])
model.set_IR_dim_neg = pyo.Set(dimen=2, initialize=init_set_IR_dim_neg)
# set of IR tuples for converters with amplitude-penalised outputs
def init_set_IR_dim(m):
return ((i, r_i) for (i, r_i) in m.set_IR if r_i in m.set_R_dim[i])
model.set_IR_dim = pyo.Set(dimen=2, initialize=init_set_IR_dim)
# set of IR tuples for converters with pos. amplitude-constrained outputs
def init_set_IR_dim_pos(m):
return ((i, r_i) for (i, r_i) in m.set_IR if r_i in m.set_R_dim_pos[i])
model.set_IR_dim_pos = pyo.Set(dimen=2, initialize=init_set_IR_dim_pos)
# set of IR tuples for converters with externality-inducing outputs
def init_set_IR_ext(m):
return ((i, r_i) for (i, r_i) in m.set_IR if r_i in m.set_R_ext[i])
model.set_IR_ext = pyo.Set(dimen=2, initialize=init_set_IR_ext)
# *************************************************************************
# combined inputs/states/outputs
# TODO: narrow down these sets if possible
# set of INN tuples
def init_set_INN(m):
return ((i, n1, n2) for (i, n1) in m.set_IN for n2 in m.set_N[i])
model.set_INN = pyo.Set(dimen=3, initialize=init_set_INN)
# set of INM tuples
def init_set_INM(m):
return ((i, n_i, m_i) for (i, n_i) in m.set_IN for m_i in m.set_M[i])
model.set_INM = pyo.Set(dimen=3, initialize=init_set_INM)
# set of IRM tuples
def init_set_IRM(m):
return (
(i, r_i, m_i) for (i, r_i) in m.set_IR for m_i in m.set_M[i]
) # can be further constrained
model.set_IRM = pyo.Set(dimen=3, initialize=init_set_IRM)
# set of IRN tuples
def init_set_IRN(m):
return (
(i, r_i, n_i) for (i, r_i) in m.set_IR for n_i in m.set_N[i]
) # can be further constrained
model.set_IRN = pyo.Set(dimen=3, initialize=init_set_IRN)
# *************************************************************************
# *************************************************************************
# parameters
# converters
# externality cost per input unit
model.param_c_ext_u_imqk = pyo.Param(
model.set_IM_ext, model.set_QK, within=pyo.NonNegativeReals, default=0
)
# externality cost per output unit
model.param_c_ext_y_irqk = pyo.Param(
model.set_IR_ext, model.set_QK, within=pyo.NonNegativeReals, default=0
)
# externality cost per state unit
model.param_c_ext_x_inqk = pyo.Param(
model.set_IN_ext, model.set_QK, within=pyo.NonNegativeReals, default=0
)
# unit cost of positive state variations
model.param_c_pos_var_in = pyo.Param(
model.set_IN_pos_var, within=pyo.NonNegativeReals, default=0
)
# unit cost of negative state variations
model.param_c_neg_var_in = pyo.Param(
model.set_IN_neg_var, within=pyo.NonNegativeReals, default=0
)
# unit cost of upper state reference violations
model.param_c_ref_u_inqk = pyo.Param(
model.set_IN_ref_u, model.set_QK, within=pyo.NonNegativeReals, default=0
)
# unit cost of lower state reference violations
model.param_c_ref_d_inqk = pyo.Param(
model.set_IN_ref_d, model.set_QK, within=pyo.NonNegativeReals, default=0
)
# minimum converter cost
model.param_c_cvt_min_i = pyo.Param(
model.set_I_new, within=pyo.NonNegativeReals, default=0
)
# unit (positive) input amplitude cost
model.param_c_cvt_u_im = pyo.Param(
model.set_IM_dim, within=pyo.NonNegativeReals, default=0
)
# unit output amplitude cost
model.param_c_cvt_y_ir = pyo.Param(
model.set_IR_dim, within=pyo.NonNegativeReals, default=0
)
# unit positive state amplitude cost
model.param_c_cvt_x_pos_in = pyo.Param(
model.set_IN_dim_pos, within=pyo.NonNegativeReals, default=0
)
# unit negative state amplitude cost
model.param_c_cvt_x_neg_in = pyo.Param(
model.set_IN_dim_neg, within=pyo.NonNegativeReals, default=0
)
# unit positive output amplitude cost
model.param_c_cvt_y_pos_ir = pyo.Param(
model.set_IR_dim_pos, within=pyo.NonNegativeReals, default=0
)
# unit negative output amplitude cost
model.param_c_cvt_y_neg_ir = pyo.Param(
model.set_IR_dim_neg, within=pyo.NonNegativeReals, default=0
)
# *************************************************************************
# effect of system inputs on specific network and node pairs
model.param_a_nw_glimqk = pyo.Param(
model.set_GL_not_exp_imp,
model.set_IM,
model.set_QK,
default=0, # default: no effect
within=pyo.Reals,
)
# effect of system outputs on specific network and node pairs
model.param_a_nw_glirqk = pyo.Param(
model.set_GL_not_exp_imp,
model.set_IR,
model.set_QK,
default=0, # default: no effect
within=pyo.Reals,
)
# *************************************************************************
# inputs
# upper bounds for (non-binary, non-dimensionable) inputs
model.param_u_ub_imqk = pyo.Param(
model.set_IM_fix, model.set_QK, within=pyo.PositiveReals
)
# maximum input limits
model.param_u_amp_max_im = pyo.Param(
model.set_IM_dim, within=pyo.PositiveReals, default=1
)
# time interval-dependent adjustment coefficients for input limits
model.param_f_amp_u_imqk = pyo.Param(
model.set_IM_dim, model.set_QK, within=pyo.PositiveReals, default=1
)
# *************************************************************************
# states
# initial conditions
model.param_x_inq0 = pyo.Param(model.set_IN, model.set_Q, within=pyo.Reals)
# fixed upper bounds for state variables
model.param_x_ub_irqk = pyo.Param(model.set_IN_fix, model.set_QK, within=pyo.Reals)
# fixed lower bounds for state variables
model.param_x_lb_irqk = pyo.Param(model.set_IN_fix, model.set_QK, within=pyo.Reals)
# maximum positive amplitude for states
model.param_x_amp_pos_max_in = pyo.Param(
model.set_IN_dim_pos, within=pyo.PositiveReals
)
# maximum negative amplitude for states
model.param_x_amp_neg_max_in = pyo.Param(
model.set_IN_dim_neg, within=pyo.PositiveReals
)
# adjustment of positive state amplitude limits
model.param_f_amp_pos_x_inqk = pyo.Param(
model.set_IN_dim_pos, model.set_QK, within=pyo.PositiveReals, default=1
)
# adjustment of negative state amplitude limits
model.param_f_amp_neg_x_inqk = pyo.Param(
model.set_IN_dim_neg, model.set_QK, within=pyo.PositiveReals, default=1
)
# state equations: coefficients from C matrix
model.param_a_eq_x_innqk = pyo.Param(
model.set_INN, model.set_QK, default=0, within=pyo.Reals # default: no effect
)
# state equations: coefficients from D matrix
model.param_b_eq_x_inmqk = pyo.Param(
model.set_INM, model.set_QK, default=0, within=pyo.Reals # default: no effect
)
# state equations: constant term
model.param_e_eq_x_inqk = pyo.Param(
model.set_IN, model.set_QK, default=0, within=pyo.Reals # default: no effect
)
# *************************************************************************
# outputs
# fixed upper bounds for output variables
model.param_y_ub_irqk = pyo.Param(model.set_IR_fix, model.set_QK, within=pyo.Reals)
# fixed lower bounds for output variables
model.param_y_lb_irqk = pyo.Param(model.set_IR_fix, model.set_QK, within=pyo.Reals)
# adjustment of positive output amplitude limits
model.param_f_amp_y_pos_irqk = pyo.Param(
model.set_IR_dim_pos, model.set_QK, within=pyo.PositiveReals, default=1
)
# adjustment of negative output amplitude limits
model.param_f_amp_y_neg_irqk = pyo.Param(
model.set_IR_dim_neg, model.set_QK, within=pyo.PositiveReals, default=1
)
# maximum positive amplitude limit for outputs
model.param_y_amp_pos_max_ir = pyo.Param(
model.set_IR_dim_pos, within=pyo.PositiveReals
)
# maximum negative amplitude limit for outputs
model.param_y_amp_neg_max_ir = pyo.Param(
model.set_IR_dim_neg, within=pyo.PositiveReals
)
# output equation coefficients from C matrix
model.param_c_eq_y_irnqk = pyo.Param(
model.set_IRN, model.set_QK, default=0, within=pyo.Reals # default: no effect
)
# output equation coefficients from D matrix
model.param_d_eq_y_irmqk = pyo.Param(
model.set_IRM, model.set_QK, default=0, within=pyo.Reals # default: no effect
)
# output equation constant
model.param_e_eq_y_irqk = pyo.Param(
model.set_IR, model.set_QK, default=0, within=pyo.Reals # default: no effect
)
# *************************************************************************
# *************************************************************************
# *************************************************************************
# *************************************************************************
# variables
# *************************************************************************
# *************************************************************************
# capex for installing individual converters
model.var_capex_cvt_i = pyo.Var(model.set_I_new, within=pyo.NonNegativeReals)
# *************************************************************************
# converters
# decision to install converter i
model.var_cvt_inv_i = pyo.Var(model.set_I_new, within=pyo.Binary)
# inputs
# input variables
def bounds_var_u_imqk(m, i, m_i, q, k):
if (i, m_i) in m.param_u_ub_imqk:
# predefined limit
return (0, m.param_u_ub_imqk[(i, m_i, q, k)])
else:
# dynamic limit (set elsewhere)
return (0, None)
def domain_var_u_imqk(m, i, m_i, q, k):
try:
if m_i in m.set_M_bin[i]:
return pyo.Binary # binary: {0,1}
else:
return pyo.NonNegativeReals # nonnegative real: [0,inf]
except KeyError:
return pyo.NonNegativeReals # nonnegative real: [0,inf]
model.var_u_imqk = pyo.Var(
model.set_IM,
model.set_QK,
domain=domain_var_u_imqk,
# within=pyo.NonNegativeReals,
bounds=bounds_var_u_imqk,
)
# input amplitude variables (only one per sign is needed, as vars. are nnr)
model.var_u_amp_im = pyo.Var(model.set_IM_dim, within=pyo.NonNegativeReals)
# *************************************************************************
# outputs
# output variables
def bounds_var_y_irqk(m, i, r, q, k):
if r in m.set_R_fix:
# predefined limit
return (m.param_u_lb_irqk[(i, r, q, k)], m.param_u_ub_irqk[(i, r, q, k)])
else:
# do not enforce any limits
return (None, None)
# def domain_var_y_irqk(m, i, r, k):
# try:
# if m_i in m.set_M_bin[i]:
# return pyo.Binary # binary: {0,1}
# else:
# return pyo.NonNegativeReals # nonnegative real: [0,inf]
# except KeyError:
# return pyo.NonNegativeReals # nonnegative real: [0,inf]
model.var_y_irqk = pyo.Var(
model.set_IR, model.set_QK, bounds=bounds_var_y_irqk, within=pyo.Reals
)
# positive output amplitudes
model.var_y_amp_pos_ir = pyo.Var(model.set_IR_dim_pos, within=pyo.Reals)
# output amplitudes
model.var_y_amp_neg_ir = pyo.Var(model.set_IR_dim_neg, within=pyo.Reals)
# *************************************************************************
# states
# state variables
model.var_x_inqk = pyo.Var(model.set_IN, model.set_QK, within=pyo.Reals)
# positive amplitude variables
model.var_x_amp_pos_in = pyo.Var(model.set_IN_dim_pos, within=pyo.NonNegativeReals)
# negative amplitude variables
model.var_x_amp_neg_in = pyo.Var(model.set_IN_dim_neg, within=pyo.NonNegativeReals)
# positive state variation
model.var_delta_x_pos_var_in = pyo.Var(
model.set_IN_pos_var, within=pyo.NonNegativeReals
)
# negative state variation
model.var_delta_x_neg_var_in = pyo.Var(
model.set_IN_neg_var, within=pyo.NonNegativeReals
)
# positive reference state violation
model.var_delta_x_ref_u_inqk = pyo.Var(
model.set_IN_ref_u, model.set_QK, within=pyo.NonNegativeReals
)
# negative reference state violation
model.var_delta_x_ref_d_inqk = pyo.Var(
model.set_IN_ref_d, model.set_QK, within=pyo.NonNegativeReals
)
# *************************************************************************
# *************************************************************************
# objective function
# capex for converters
def rule_capex_converter(m, i):
return (
m.var_cvt_inv_i[i] * m.param_c_cvt_min_i[i]
+ sum(
m.var_u_amp_im[(i, m_i)] * m.param_c_cvt_u_im[(i, m_i)]
for m_i in m.set_M_dim_i[i]
)
+ sum(
m.var_x_amp_pos_in[(i, n)] * m.param_c_cvt_x_pos_in[(i, n)]
for n in m.set_N_dim_pos[i]
)
+ sum(
m.var_x_amp_neg_in[(i, n)] * m.param_c_cvt_x_neg_in[(i, n)]
for n in m.set_N_dim_neg[i]
)
+ sum(
m.var_y_amp_pos_ir[(i, r)] * m.param_c_cvt_y_pos_ir[(i, r)]
for r in m.set_N_dim_pos[i]
)
+ sum(
m.var_y_amp_neg_ir[(i, r)] * m.param_c_cvt_y_neg_ir[(i, r)]
for r in m.set_N_dim_neg[i]
)
<= m.var_capex_cvt_i[i]
)
model.constr_capex_system = pyo.Constraint(
model.set_I_new, rule=rule_capex_converter
)
# *************************************************************************
# *************************************************************************
# converters
# *************************************************************************
# *************************************************************************
# input signal limits for dimensionable inputs
def rule_constr_u_limit_dim(m, i, m_i, q, k):
return (
m.var_u_imqk[(i, m_i, q, k)]
<= m.var_u_amp_im[(i, m_i)] * m.param_f_amp_u_imqk[(i, m_i, q, k)]
)
model.constr_u_limit_dim = pyo.Constraint(
model.set_IM_dim, model.set_QK, rule=rule_constr_u_limit_dim
)
# nominal input amplitude limit for dimensionable inputs
def rule_constr_u_amp_ub(m, i, m_i):
return (
m.var_u_amp_im[(i, m_i)]
<= m.var_cvt_inv_i[i] * m.param_u_amp_max_im[(i, m_i)]
)
model.constr_u_amp_ub = pyo.Constraint(model.set_IM_dim, rule=rule_constr_u_amp_ub)
# fixed upper limits
def rule_constr_u_fix_limits(m, i, m_i, q, k):
# if we need to know the lim input signal (e.g., for the obj. func.)
if i in m.set_I_new:
# new converter
return (
m.var_u_imqk[(i, m_i, q, k)]
<= m.param_u_ub_imqk[(i, m_i, q, k)] * m.var_cvt_inv_i[i]
)
return m.var_u_imqk[(i, m_i, q, k)] <= m.var_cvt_inv_i[i]
else:
# pre-existing
return m.var_u_imqk[(i, m_i, q, k)] <= m.param_u_ub_imqk[(i, m_i, q, k)]
model.constr_u_fix_limits = pyo.Constraint(
model.set_IM_fix, model.set_QK, rule=rule_constr_u_fix_limits
)
# input limits for binary inputs
def rule_constr_u_bin_limits(m, i, m_i, q, k):
if i in m.set_I_new:
# binary variables
return m.var_u_imqk[(i, m_i, q, k)] <= m.var_cvt_inv_i[i]
else:
return pyo.Constraint.Skip
model.constr_u_bin_limits = pyo.Constraint(
model.set_IM_bin, model.set_QK, rule=rule_constr_u_bin_limits
)
# *************************************************************************
# outputs
# output equations
def rule_constr_output_equations(m, i, r, q, k):
return (
m.var_y_irqk[(i, r, k)]
== sum(
m.param_c_eq_y_irnqk[(i, r, n_i, q, k)] * m.var_x_inqk[(i, n_i, q, k)]
for n_i in m.set_N[i]
)
+ sum(
m.param_d_eq_y_irmqk[(i, r, m_i, q, k)] * m.var_u_imqk[(i, m_i, q, k)]
for m_i in m.set_M[i]
)
+ m.param_e_eq_y_irqk[(i, r, q, k)]
)
model.constr_output_equations = pyo.Constraint(
model.set_IR, model.set_QK, rule=rule_constr_output_equations
)
# positive amplitude limit for output variables
def rule_constr_y_vars_have_pos_amp_limits(m, i, r, q, k):
return m.var_y_irqk[(i, r, q, k)] <= (
m.var_y_amp_pos_ir[(i, r)] * m.param_f_amp_y_pos_irqk[(i, r, q, k)]
)
model.constr_y_vars_have_pos_amp_limits = pyo.Constraint(
model.set_IR_dim_pos, model.set_QK, rule=rule_constr_y_vars_have_pos_amp_limits
)
# negative amplitude limit for output variables
def rule_constr_y_vars_have_neg_amp_limits(m, i, r, q, k):
return m.var_y_irqk[(i, r, q, k)] >= (
-m.var_y_amp_neg_ir[(i, r)] * m.param_f_amp_y_neg_irqk[(i, r, q, k)]
)
model.constr_y_vars_have_neg_amp_limits = pyo.Constraint(
model.set_IR_dim_neg, model.set_QK, rule=rule_constr_y_vars_have_neg_amp_limits
)
# positive amplitude limit must be zero unless the system is installed
def rule_constr_y_amp_pos_zero_if_cvt_not_selected(m, i, r):
return m.var_y_amp_pos_ir[(i, r)] <= (
m.var_cvt_inv_i[i] * m.param_y_amp_pos_ir[(i, r)]
)
model.constr_y_amp_pos_zero_if_cvt_not_newected = pyo.Constraint(
model.set_IR_dim_pos, rule=rule_constr_y_amp_pos_zero_if_cvt_not_selected
)
# negative amplitude limit must be zero unless the system is installed
def rule_constr_y_amp_neg_zero_if_cvt_not_selected(m, i, r):
return m.var_y_amp_neg_ir[(i, r)] <= (
m.var_cvt_inv_i[i] * m.param_y_amp_neg_ir[(i, r)]
)
model.constr_y_amp_neg_zero_if_cvt_not_selected = pyo.Constraint(
model.set_IR_dim_neg, rule=rule_constr_y_amp_neg_zero_if_cvt_not_selected
)
# the positive and negative amplitudes must match
def rule_constr_y_amp_pos_neg_match(m, i, r):
return m.var_y_amp_pos_ir[(i, r)] == m.var_y_amp_neg_ir[(i, r)]
model.constr_y_amp_pos_neg_match = pyo.Constraint(
model.set_IR_dim_eq, rule=rule_constr_y_amp_pos_neg_match
)
# *************************************************************************
# states
def rule_constr_state_equations(m, i, n, q, k):
return (
m.var_x_inqk[(i, n, q, k)]
== sum(
m.param_a_eq_x_innqk[(i, n, n_star, q, k)]
* (
m.var_x_inqk[(i, n_star, q, k - 1)]
if k != 0
else m.param_x_inq0[(i, n, q)]
)
for n_star in m.set_N[i]
)
+ sum(
m.param_b_eq_x_inmqk[(i, n, m_i, q, k)] * m.var_u_imqk[(i, m_i, q, k)]
for m_i in m.set_M[i]
)
+ m.param_e_eq_x_inqk[(i, n, q, k)]
)
model.constr_state_equations = pyo.Constraint(
model.set_IN, model.set_QK, rule=rule_constr_state_equations
)
# positive amplitude limit for state variables
def rule_constr_x_vars_have_pos_amp_limits(m, i, n, q, k):
return m.var_x_inqk[(i, n, q, k)] <= (
m.var_x_amp_pos_in[(i, n)] * m.param_f_amp_x_pos_inqk[(i, n, q, k)]
)
model.constr_x_vars_have_pos_amp_limits = pyo.Constraint(
model.set_IN_dim_pos, model.set_QK, rule=rule_constr_x_vars_have_pos_amp_limits
)
# negative amplitude limit for state variables
def rule_constr_x_vars_have_neg_amp_limits(m, i, n, q, k):
return m.var_x_inqk[(i, n, q, k)] >= (
-m.var_y_amp_neg_in[(i, n)] * m.param_f_amp_x_neg_inqk[(i, n, q, k)]
)
model.constr_x_vars_have_neg_amp_limits = pyo.Constraint(
model.set_IN_dim_neg, model.set_QK, rule=rule_constr_x_vars_have_neg_amp_limits
)
# positive amplitude limit must be zero unless the system is installed
def rule_constr_x_amp_pos_zero_if_cvt_not_selected(m, i, n):
return m.var_x_amp_pos_in[(i, n)] <= (
m.var_cvt_inv_i[i] * m.param_x_amp_pos_in[(i, n)]
)
model.constr_x_amp_pos_zero_if_cvt_not_selected = pyo.Constraint(
model.set_IN_dim_pos, rule=rule_constr_x_amp_pos_zero_if_cvt_not_selected
)
# negative amplitude limit must be zero unless the system is installed
def rule_constr_x_amp_neg_zero_if_cvt_not_selected(m, i, n):
return m.var_x_amp_neg_in[(i, n)] <= (
m.var_cvt_inv_i[i] * m.param_x_amp_neg_in[(i, n)]
)
model.constr_x_amp_neg_zero_if_cvt_not_selected = pyo.Constraint(
model.set_IN_dim_neg, rule=rule_constr_x_amp_neg_zero_if_cvt_not_selected
)
# the positive and negative amplitudes must match
def rule_constr_x_amp_pos_neg_match(m, i, n):
return m.var_x_amp_pos_in[(i, n)] == m.var_x_amp_neg_in[(i, n)]
model.constr_x_amp_pos_neg_match = pyo.Constraint(
model.set_IN_dim_eq, rule=rule_constr_x_amp_pos_neg_match
)
# *************************************************************************
# *************************************************************************
return model
# *************************************************************************
# *************************************************************************
# *****************************************************************************
# *****************************************************************************
# *****************************************************************************
# *****************************************************************************
# *****************************************************************************
# imports
import pyomo.environ as pyo
from math import isfinite, inf
# *****************************************************************************
# *****************************************************************************
def add_network_restrictions(
model: pyo.AbstractModel,
enable_default_values: bool = True,
enable_validation: bool = True,
enable_initialisation: bool = True,
):
# *************************************************************************
# *************************************************************************
model.set_L_max_in_g = pyo.Set(
model.set_G, within=model.set_L
) # should inherently exclude import nodes
model.set_L_max_out_g = pyo.Set(
model.set_G, within=model.set_L
) # should inherently exclude export nodes
# maximum number of arcs per node pair
model.param_max_number_parallel_arcs = pyo.Param(
model.set_GLL,
# within=pyo.PositiveIntegers,
within=pyo.PositiveReals,
default=inf,
)
def init_set_GLL_arc_max(m):
return (
(g, l1, l2)
for (g, l1, l2) in m.param_max_number_parallel_arcs
if isfinite(m.param_max_number_parallel_arcs[(g, l1, l2)])
)
model.set_GLL_arc_max = pyo.Set(
dimen=3, within=model.set_GLL, initialize=init_set_GLL_arc_max
)
# *************************************************************************
# *************************************************************************
# limit number of directed arcs per direction
def rule_constr_limited_parallel_arcs_per_direction(m, g, l1, l2):
# cases:
# 1) the number of options is lower than or equal to the limit (skip)
# 2) the number of preexisting and new mandatory arcs exceeds
# the limit (infeasible: pyo.Constraint.Infeasible)
# 3) all other cases (constraint)
# number of preexisting arcs going from l1 to l2
number_arcs_pre_nom = (
len(m.set_J_pre[(g, l1, l2)]) if (g, l1, l2) in m.set_J_pre else 0
)
number_arcs_pre_rev = (
sum(1 for j in m.set_J_pre[(g, l2, l1)] if j in m.set_J_und[(g, l2, l1)])
if (g, l2, l1) in m.set_J_pre
else 0
)
# number of mandatory arcs going from l1 to l2
number_arcs_mdt_nom = (
len(m.set_J_mdt[(g, l1, l2)]) if (g, l1, l2) in m.set_J_mdt else 0
)
number_arcs_mdt_rev = (
sum(1 for j in m.set_J_mdt[(g, l2, l1)] if j in m.set_J_und[(g, l2, l1)])
if (g, l2, l1) in m.set_J_mdt
else 0
)
# number of optional arcs going from l1 to l2
number_arcs_opt_nom = (
sum(
1
for j in m.set_J[(g, l1, l2)]
if j not in m.set_J_pre[(g, l1, l2)]
if j not in m.set_J_mdt[(g, l1, l2)]
)
if (g, l1, l2) in m.set_J
else 0
)
number_arcs_opt_rev = (
sum(
1
for j in m.set_J[(g, l2, l1)]
if j not in m.set_J_pre[(g, l2, l1)]
if j not in m.set_J_mdt[(g, l2, l1)]
if j in m.set_J_und[(g, l2, l1)]
)
if (g, l2, l1) in m.set_J
else 0
)
# build the constraints
if (
number_arcs_mdt_nom
+ number_arcs_mdt_rev
+ number_arcs_pre_nom
+ number_arcs_pre_rev
> m.param_max_number_parallel_arcs[(g, l1, l2)]
):
# the number of unavoidable arcs already exceeds the limit
return pyo.Constraint.Infeasible
elif (
number_arcs_opt_nom
+ number_arcs_opt_rev
+ number_arcs_mdt_nom
+ number_arcs_mdt_rev
+ number_arcs_pre_nom
+ number_arcs_pre_rev
> m.param_max_number_parallel_arcs[(g, l1, l2)]
):
# the number of potential arcs exceeds the limit: cannot be skipped
return (
# preexisting arcs
number_arcs_pre_nom + number_arcs_pre_rev +
# mandatory arcs
number_arcs_mdt_nom + number_arcs_mdt_rev +
# arcs within an (optional) group that uses interfaces
sum(
(
sum(
1
for j in m.set_J_col[(g, l1, l2)]
if (g, l1, l2, j) in m.set_GLLJ_col_t[t]
)
if (g, l1, l2) in m.set_J_col
else 0
+ sum(
1
for j in m.set_J_col[(g, l2, l1)]
if j in m.set_J_und[(g, l2, l1)]
if (g, l2, l1, j) in m.set_GLLJ_col_t[t]
)
if ((g, l2, l1) in m.set_J_col and (g, l2, l1) in m.set_J_und)
else 0
)
* m.var_xi_arc_inv_t[t]
for t in m.set_T_int
)
+
# arcs within an (optional) group that does not use interfaces
sum(
(
sum(
1
for j in m.set_J_col[(g, l1, l2)]
if (g, l1, l2, j) in m.set_GLLJ_col_t[t]
)
if (g, l1, l2) in m.set_J_col
else 0
+ sum(
1
for j in m.set_J_col[(g, l2, l1)]
if j in m.set_J_und[(g, l2, l1)]
if (g, l2, l1, j) in m.set_GLLJ_col_t[t]
)
if ((g, l2, l1) in m.set_J_col and (g, l2, l1) in m.set_J_und)
else 0
)
* sum(m.var_delta_arc_inv_th[(t, h)] for h in m.set_H_t[t])
for t in m.set_T # new
if t not in m.set_T_mdt # optional
if t not in m.set_T_int # not interfaced
)
+
# optional individual arcs using interfaces, nominal direction
sum(
m.var_xi_arc_inv_gllj[(g, l1, l2, j)]
for j in m.set_J_int[(g, l1, l2)] # interfaced
if j not in m.set_J_col[(g, l1, l2)] # individual
)
if (g, l1, l2) in m.set_J_int
else 0 +
# optional individual arcs using interfaces, reverse direction
sum(
m.var_xi_arc_inv_gllj[(g, l2, l1, j)]
for j in m.set_J_int[(g, l2, l1)] # interfaced
if j in m.set_J_und[(g, l2, l1)] # undirected
if j not in m.set_J_col[(g, l1, l2)] # individual
)
if ((g, l2, l1) in m.set_J_int and (g, l2, l1) in m.set_J_und)
else 0 +
# optional individual arcs not using interfaces, nominal dir.
sum(
sum(
m.var_delta_arc_inv_glljh[(g, l1, l2, j, h)]
for h in m.set_H_gllj[(g, l1, l2, j)]
)
for j in m.set_J[(g, l1, l2)]
if j not in m.set_J_pre[(g, l1, l2)] # not preexisting
if j not in m.set_J_mdt[(g, l1, l2)] # not mandatory
if j not in m.set_J_int[(g, l1, l2)] # not interfaced
if j not in m.set_J_col[(g, l1, l2)] # individual
)
if (g, l1, l2) in m.set_J
else 0 +
# optional individual arcs not using interfaces, reverse dir.
sum(
sum(
m.var_delta_arc_inv_glljh[(g, l2, l1, j, h)]
for h in m.set_H_gllj[(g, l2, l1, j)]
)
for j in m.set_J_opt[(g, l2, l1)]
if j in m.set_J_und[(g, l2, l1)]
if j not in m.set_J_pre[(g, l2, l1)] # not preexisting
if j not in m.set_J_mdt[(g, l2, l1)] # not mandatory
if j not in m.set_J_int[(g, l2, l1)] # not interfaced
if j not in m.set_J_col[(g, l2, l1)] # individual
)
if (g, l2, l1) in m.set_J
else 0 <= m.param_max_number_parallel_arcs[(g, l1, l2)]
)
else: # the number of options is lower than or equal to the limit: skip
return pyo.Constraint.Skip
model.constr_limited_parallel_arcs_per_direction = pyo.Constraint(
model.set_GLL_arc_max, rule=rule_constr_limited_parallel_arcs_per_direction
)
# *************************************************************************
# *************************************************************************
# there can only one incoming arc at most, if there are no outgoing arcs
def rule_constr_max_incoming_directed_arcs(m, g, l):
# check if the node is not among those subject to a limited number of incoming arcs
if l not in m.set_L_max_in_g[g]:
# it is not, skip this constraint
return pyo.Constraint.Skip
# max number of directed incoming arcs
n_max_dir_in = sum(
sum(
1
for j in m.set_J[(g, l_line, l)]
if j not in m.set_J_und[(g, l_line, l)]
) # directed
for l_line in m.set_L[g] # for every node
if l_line != l # cannot be the same node
# if l_line not in m.set_L_imp[g] # why?
if (g, l_line, l) in m.set_J
)
# check the maximum number of incoming arcs
if n_max_dir_in <= 1:
# there can only be one incoming arc at most: redundant constraint
return pyo.Constraint.Skip
else: # more than one incoming arc is possible
# number of (new) incoming directed arcs in a group
b_max_in_gl = 0
# the big m
M_gl = n_max_dir_in - 1 # has to be positive since n_max_dir_in > 1
# TODO: put parenthesis to avoid funny results
temp_constr = (
sum(
# *********************************************************
# interfaced groups
sum(
sum(
1
for j in m.set_J_col[(g, l_circ, l)] # part of group
if j not in m.set_J_und[(g, l_circ, l)] # directed
if (g, l_circ, l, j) in m.set_GLLJ_col_t[t]
)
* m.var_xi_arc_inv_t[t] # in t
for t in m.set_T_int
)
+
# *********************************************************
# optional non-interfaced groups
sum(
sum(
sum(
1
for j in m.set_J_col[(g, l_circ, l)] # part of group
if j not in m.set_J_und[(g, l_circ, l)] # directed
if (g, l_circ, l, j) in m.set_GLLJ_col_t[t]
)
* m.var_delta_arc_inv_th[(t, h)]
for h in m.set_H_t[t]
)
for t in m.set_T
if t not in m.set_T_mdt # optional
if t not in m.set_T_int # not interfaced
)
+
# *********************************************************
# interfaced arcs
(sum(
m.var_xi_arc_inv_gllj[(g, l_circ, l, j_circ)]
for j_circ in m.set_J[(g, l_circ, l)]
if j_circ not in m.set_J_und[(g, l_circ, l)] # directed
if j_circ in m.set_J_int[(g, l_circ, l)] # interfaced
if j_circ not in m.set_J_col[(g, l_circ, l)] # individual
)
if (g, l_circ, l) in m.set_J
else 0) +
# *********************************************************
# optional non-interfaced arcs
(sum(
sum(
m.var_delta_arc_inv_glljh[(g, l_circ, l, j_dot, h_dot)]
for h_dot in m.set_H_gllj[(g, l_circ, l, j_dot)]
)
for j_dot in m.set_J[(g, l_circ, l)]
if j_dot not in m.set_J_und[(g, l_circ, l)] # directed
if j_dot not in m.set_J_int[(g, l_circ, l)] # not interfaced
if j_dot not in m.set_J_col[(g, l_circ, l)] # individual
if j_dot not in m.set_J_pre[(g, l_circ, l)] # new
if j_dot not in m.set_J_mdt[(g, l_circ, l)] # optional
)
if (g, l_circ, l) in m.set_J
else 0) +
# *********************************************************
# preexisting directed arcs
(sum(
1
for j_pre_dir in m.set_J_pre[(g, l_circ, l)] # preexisting
if j_pre_dir not in m.set_J_und[(g, l_circ, l)] # directed
)
if (g, l_circ, l) in m.set_J_pre
else 0) +
# *********************************************************
# mandatory directed arcs
(sum(
1
for j_mdt_dir in m.set_J_mdt[(g, l_circ, l)]
if j_mdt_dir not in m.set_J_und[(g, l_circ, l)] # directed
)
if (g, l_circ, l) in m.set_J_mdt
else 0)
# *********************************************************
for l_circ in m.set_L[g]
if l_circ not in m.set_L_exp[g]
if l_circ != l
)
<= 1 # +
# M_gl*sum(
# # *********************************************************
# # outgoing arcs in interfaced groups, nominal direction
# sum(sum(1
# for j in m.set_J_col[(g,l,l_diamond)]
# #if j in m.set_J_int[(g,l,l_diamond)]
# if (g,l,l_diamond,j) in m.set_GLLJ_col_t[t]
# )*m.var_xi_arc_inv_t[t]
# for t in m.set_T_int
# ) if (g,l,l_diamond) in m.set_J_col else 0
# +
# # outgoing arcs in interfaced groups, reverse direction
# sum(sum(1
# for j in m.set_J_col[(g,l_diamond,l)]
# #if j in m.set_J_int[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# if (g,l_diamond,l,j) in m.set_GLLJ_col_t[t]
# )*m.var_xi_arc_inv_t[t]
# for t in m.set_T_int
# ) if (g,l_diamond,l) in m.set_J_col else 0
# +
# # *********************************************************
# # TODO: outgoing arcs in non-interfaced optional groups, nominal
# sum(sum(1
# for j in m.set_J_col[(g,l,l_diamond)]
# #if j in m.set_J_int[(g,l,l_diamond)]
# if (g,l,l_diamond,j) in m.set_GLLJ_col_t[t]
# )*sum(
# m.var_delta_arc_inv_th[(t,h)]
# for h in m.set_H_t[t]
# )
# for t in m.set_T
# if t not in m.set_T_mdt
# if t not in m.set_T_int
# ) if (g,l,l_diamond) in m.set_J_col else 0
# +
# # TODO: outgoing arcs in non-interfaced optional groups, reverse
# sum(sum(1
# for j in m.set_J_col[(g,l_diamond,l)]
# #if j in m.set_J_int[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# if (g,l_diamond,l,j) in m.set_GLLJ_col_t[t]
# )*sum(
# m.var_delta_arc_inv_th[(t,h)]
# for h in m.set_H_t[t]
# )
# for t in m.set_T
# if t not in m.set_T_mdt
# if t not in m.set_T_int
# ) if (g,l_diamond,l) in m.set_J_col else 0
# +
# # *********************************************************
# # interfaced individual outgoing arcs, nominal direction
# sum(m.var_xi_arc_inv_gllj[(g,l,l_diamond,j)]
# for j in m.set_J_int[(g,l,l_diamond)] # interfaced
# if j not in m.set_J_col[(g,l,l_diamond)] # individual
# ) if (g,l,l_diamond) in m.set_J_int else 0
# +
# # *********************************************************
# # interfaced individual undirected arcs, reverse direction
# sum(m.var_xi_arc_inv_gllj[(g,l,l_diamond,j)]
# for j in m.set_J_und[(g,l_diamond,l)] # undirected
# if j in m.set_J_int[(g,l_diamond,l)] # interfaced
# if j not in m.set_J_col[(g,l_diamond,l)] # individual
# ) if (g,l_diamond,l) in m.set_J_und else 0
# +
# # *********************************************************
# # outgoing non-interfaced individual optional arcs
# sum(
# sum(m.var_delta_arc_inv_glljh[(g,l,l_diamond,j,h)]
# for h in m.set_H_gllj[(g,l,l_diamond,j)])
# for j in m.set_J[(g,l,l_diamond)]
# if j not in m.set_J_col[(g,l,l_diamond)] # individual
# if j not in m.set_J_mdt[(g,l,l_diamond)] # optional
# if j not in m.set_J_int[(g,l,l_diamond)] # interfaced
# ) if (g,l,l_diamond) in m.set_J else 0
# +
# # *********************************************************
# # individual non-interfaced undirected arcs, reverse dir.
# sum(
# sum(m.var_delta_arc_inv_glljh[(g,l_diamond,l,j,h)]
# for h in m.set_H_gllj[(g,l_diamond,l,j)])
# for j in m.set_J_und[(g,l_diamond,l)] # undirected
# if j not in m.set_J_col[(g,l_diamond,l)] # individual
# if j not in m.set_J_mdt[(g,l_diamond,l)] # optional
# if j not in m.set_J_int[(g,l_diamond,l)] # interfaced
# ) if (g,l_diamond,l) in m.set_J_und else 0
# +
# # *********************************************************
# # preselected outgonig arcs, nominal direction
# len(m.set_J_pre[(g,l,l_diamond)]
# ) if (g,l,l_diamond) in m.set_J_pre else 0
# +
# # *********************************************************
# # mandatory outgoing arcs, nominal direction
# len(m.set_J_mdt[(g,l,l_diamond)]
# ) if (g,l,l_diamond) in m.set_J_mdt else 0
# +
# # *********************************************************
# # undirected preselected arcs, reverse direction
# sum(1
# for j in m.set_J_pre[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# ) if (g,l_diamond,l) in m.set_J_pre else 0
# +
# # *********************************************************
# # undirected mandatory arcs, reverse direction
# sum(1
# for j in m.set_J_mdt[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# ) if (g,l_diamond,l) in m.set_J_mdt else 0
# # *********************************************************
# for l_diamond in m.set_L[g]
# if l_diamond not in m.set_L_imp[g]
# if l_diamond != l
# )
)
if type(temp_constr) == bool:
# trivial outcome
return pyo.Constraint.Feasible if temp_constr else pyo.Constraint.Infeasible
else:
# constraint is relevant
return temp_constr
model.constr_max_incoming_directed_arcs = pyo.Constraint(
model.set_GL, rule=rule_constr_max_incoming_directed_arcs
)
# *************************************************************************
# *************************************************************************
def rule_constr_max_outgoing_directed_arcs(m, g, l):
# check if the node is not among those subject to a limited number of outgoing arcs
if l not in m.set_L_max_out_g[g]:
# it is not, skip this constraint
return pyo.Constraint.Skip
# max number of directed outgoing arcs
n_max_dir_out = sum(
sum(
1
for j in m.set_J[(g, l, l_line)]
if j not in m.set_J_und[(g, l, l_line)]
) # directed
for l_line in m.set_L[g]
if l_line != l
# if l_line not in m.set_L_exp[g] # cannot be an export: why?
if (g, l, l_line) in m.set_J
)
# check the maximum number of incoming arcs
if n_max_dir_out <= 1:
# there can only be one outgoing arc at most: redundant constraint
# TODO: consider this condition when defining the set
return pyo.Constraint.Skip
else: # more than one outgoing arc is possible
# number of (new) incoming directed arcs in a group
b_max_out_gl = 0
# the big m
M_gl = n_max_dir_out - 1 # has to be positive since n_max_dir_out > 1
# TODO: put parenthesis to avoid funny results
temp_constr = (
sum(
# *********************************************************
# interfaced groups
sum(
sum(
1
for j in m.set_J_col[(g, l, l_circ)] # part of group
if j not in m.set_J_und[(g, l, l_circ)] # directed
if (g, l, l_circ, j) in m.set_GLLJ_col_t[t]
)
* m.var_xi_arc_inv_t[t] # in t
for t in m.set_T_int
)
+
# *********************************************************
# optional non-interfaced groups
sum(
sum(
sum(
1
for j in m.set_J_col[(g, l, l_circ)] # part of group
if j not in m.set_J_und[(g, l, l_circ)] # directed
if (g, l, l_circ, j) in m.set_GLLJ_col_t[t]
)
* m.var_delta_arc_inv_th[(t, h)]
for h in m.set_H_t[t]
)
for t in m.set_T
if t not in m.set_T_mdt # optional
if t not in m.set_T_int # not interfaced
)
+
# *********************************************************
# interfaced arcs
(sum(
m.var_xi_arc_inv_gllj[(g, l, l_circ, j_circ)]
for j_circ in m.set_J[(g, l, l_circ)]
if j_circ not in m.set_J_und[(g, l, l_circ)] # directed
if j_circ in m.set_J_int[(g, l, l_circ)] # interfaced
if j_circ not in m.set_J_col[(g, l, l_circ)] # individual
)
if (g, l, l_circ) in m.set_J
else 0) +
# *********************************************************
# optional non-interfaced arcs
(sum(
sum(
m.var_delta_arc_inv_glljh[(g, l, l_circ, j_dot, h_dot)]
for h_dot in m.set_H_gllj[(g, l, l_circ, j_dot)]
)
for j_dot in m.set_J[(g, l, l_circ)]
if j_dot not in m.set_J_und[(g, l, l_circ)] # directed
if j_dot not in m.set_J_int[(g, l, l_circ)] # not interfaced
if j_dot not in m.set_J_col[(g, l, l_circ)] # individual
if j_dot not in m.set_J_pre[(g, l, l_circ)] # new
if j_dot not in m.set_J_mdt[(g, l, l_circ)] # optional
)
if (g, l, l_circ) in m.set_J
else 0) +
# *********************************************************
# preexisting directed arcs
(sum(
1
for j_pre_dir in m.set_J_pre[(g, l, l_circ)] # preexisting
if j_pre_dir not in m.set_J_und[(g, l, l_circ)] # directed
)
if (g, l, l_circ) in m.set_J_pre
else 0) +
# *********************************************************
# mandatory directed arcs
(sum(
1
for j_mdt_dir in m.set_J_mdt[(g, l, l_circ)]
if j_mdt_dir not in m.set_J_und[(g, l, l_circ)] # directed
)
if (g, l, l_circ) in m.set_J_mdt
else 0)
# *********************************************************
for l_circ in m.set_L[g]
if l_circ not in m.set_L_imp[g]
if l_circ != l
)
<= 1 # +
# TODO: what is below has copy&pasted, must be completely revised
# M_gl*sum(
# # *********************************************************
# # outgoing arcs in interfaced groups, nominal direction
# sum(sum(1
# for j in m.set_J_col[(g,l,l_diamond)]
# #if j in m.set_J_int[(g,l,l_diamond)]
# if (g,l,l_diamond,j) in m.set_GLLJ_col_t[t]
# )*m.var_xi_arc_inv_t[t]
# for t in m.set_T_int
# ) if (g,l,l_diamond) in m.set_J_col else 0
# +
# # outgoing arcs in interfaced groups, reverse direction
# sum(sum(1
# for j in m.set_J_col[(g,l_diamond,l)]
# #if j in m.set_J_int[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# if (g,l_diamond,l,j) in m.set_GLLJ_col_t[t]
# )*m.var_xi_arc_inv_t[t]
# for t in m.set_T_int
# ) if (g,l_diamond,l) in m.set_J_col else 0
# +
# # *********************************************************
# # TODO: outgoing arcs in non-interfaced optional groups, nominal
# sum(sum(1
# for j in m.set_J_col[(g,l,l_diamond)]
# #if j in m.set_J_int[(g,l,l_diamond)]
# if (g,l,l_diamond,j) in m.set_GLLJ_col_t[t]
# )*sum(
# m.var_delta_arc_inv_th[(t,h)]
# for h in m.set_H_t[t]
# )
# for t in m.set_T
# if t not in m.set_T_mdt
# if t not in m.set_T_int
# ) if (g,l,l_diamond) in m.set_J_col else 0
# +
# # TODO: outgoing arcs in non-interfaced optional groups, reverse
# sum(sum(1
# for j in m.set_J_col[(g,l_diamond,l)]
# #if j in m.set_J_int[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# if (g,l_diamond,l,j) in m.set_GLLJ_col_t[t]
# )*sum(
# m.var_delta_arc_inv_th[(t,h)]
# for h in m.set_H_t[t]
# )
# for t in m.set_T
# if t not in m.set_T_mdt
# if t not in m.set_T_int
# ) if (g,l_diamond,l) in m.set_J_col else 0
# +
# # *********************************************************
# # interfaced individual outgoing arcs, nominal direction
# sum(m.var_xi_arc_inv_gllj[(g,l,l_diamond,j)]
# for j in m.set_J_int[(g,l,l_diamond)] # interfaced
# if j not in m.set_J_col[(g,l,l_diamond)] # individual
# ) if (g,l,l_diamond) in m.set_J_int else 0
# +
# # *********************************************************
# # interfaced individual undirected arcs, reverse direction
# sum(m.var_xi_arc_inv_gllj[(g,l,l_diamond,j)]
# for j in m.set_J_und[(g,l_diamond,l)] # undirected
# if j in m.set_J_int[(g,l_diamond,l)] # interfaced
# if j not in m.set_J_col[(g,l_diamond,l)] # individual
# ) if (g,l_diamond,l) in m.set_J_und else 0
# +
# # *********************************************************
# # outgoing non-interfaced individual optional arcs
# sum(
# sum(m.var_delta_arc_inv_glljh[(g,l,l_diamond,j,h)]
# for h in m.set_H_gllj[(g,l,l_diamond,j)])
# for j in m.set_J[(g,l,l_diamond)]
# if j not in m.set_J_col[(g,l,l_diamond)] # individual
# if j not in m.set_J_mdt[(g,l,l_diamond)] # optional
# if j not in m.set_J_int[(g,l,l_diamond)] # interfaced
# ) if (g,l,l_diamond) in m.set_J else 0
# +
# # *********************************************************
# # individual non-interfaced undirected arcs, reverse dir.
# sum(
# sum(m.var_delta_arc_inv_glljh[(g,l_diamond,l,j,h)]
# for h in m.set_H_gllj[(g,l_diamond,l,j)])
# for j in m.set_J_und[(g,l_diamond,l)] # undirected
# if j not in m.set_J_col[(g,l_diamond,l)] # individual
# if j not in m.set_J_mdt[(g,l_diamond,l)] # optional
# if j not in m.set_J_int[(g,l_diamond,l)] # interfaced
# ) if (g,l_diamond,l) in m.set_J_und else 0
# +
# # *********************************************************
# # preselected outgonig arcs, nominal direction
# len(m.set_J_pre[(g,l,l_diamond)]
# ) if (g,l,l_diamond) in m.set_J_pre else 0
# +
# # *********************************************************
# # mandatory outgoing arcs, nominal direction
# len(m.set_J_mdt[(g,l,l_diamond)]
# ) if (g,l,l_diamond) in m.set_J_mdt else 0
# +
# # *********************************************************
# # undirected preselected arcs, reverse direction
# sum(1
# for j in m.set_J_pre[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# ) if (g,l_diamond,l) in m.set_J_pre else 0
# +
# # *********************************************************
# # undirected mandatory arcs, reverse direction
# sum(1
# for j in m.set_J_mdt[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# ) if (g,l_diamond,l) in m.set_J_mdt else 0
# # *********************************************************
# for l_diamond in m.set_L[g]
# if l_diamond not in m.set_L_imp[g]
# if l_diamond != l
# )
)
if type(temp_constr) == bool:
# trivial outcome
return pyo.Constraint.Feasible if temp_constr else pyo.Constraint.Infeasible
else:
# constraint is relevant
return temp_constr
model.constr_max_outgoing_directed_arcs = pyo.Constraint(
model.set_GL, rule=rule_constr_max_outgoing_directed_arcs
)
# *************************************************************************
# *************************************************************************
return model
# *************************************************************************
# *************************************************************************
# *****************************************************************************
# *****************************************************************************
# imports
import pyomo.environ as pyo
# *****************************************************************************
# *****************************************************************************
def add_prices_block(
model: pyo.AbstractModel,
**kwargs
):
# *************************************************************************
# *************************************************************************
# model.node_price_block = pyo.Block(model.set_QPK)
price_other(model, **kwargs)
# price_block_other(model, **kwargs)
# *****************************************************************************
# *****************************************************************************
# TODO: try to implement it as a block
def price_block_other(
model: pyo.AbstractModel,
enable_default_values: bool = True,
enable_validation: bool = True,
enable_initialisation: bool = True
):
model.set_GLQPK = model.set_GL_exp_imp*model.set_QPK
def rule_node_prices(b, g, l, q, p, k):
# imported flow
def bounds_var_if_glqpks(m, g, l, q, p, k, s):
if (g, l, q, p, k, s) in m.param_v_max_glqpks:
# predefined finite capacity
return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)])
else:
# infinite capacity
return (0, None)
b.var_trans_flow_s = pyo.Var(
b.set_GLQPKS, within=pyo.NonNegativeReals, bounds=bounds_var_trans_flow_s
)
# imported flow cost
def rule_constr_imp_flow_cost(m, g, l, q, p, k):
return (
sum(
m.var_if_glqpks[(g, l, q, p, k, s)]
* m.param_p_glqpks[(g, l, q, p, k, s)]
for s in m.set_S[(g, l, q, p, k)]
)
== m.var_ifc_glqpk[(g, l, q, p, k)]
)
model.constr_imp_flow_cost = pyo.Constraint(
model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flow_cost
)
# imported flows
def rule_constr_imp_flows(m, g, l, q, p, k):
return sum(
m.var_v_glljqk[(g, l, l_star, j, q, k)]
for l_star in m.set_L[g]
if l_star not in m.set_L_imp[g]
for j in m.set_J[(g, l, l_star)] # only directed arcs
) == sum(m.var_if_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
model.constr_imp_flows = pyo.Constraint(
model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flows
)
# if (g,l) in b.parent_block().set_GL_imp:
# # import node
# pass
# elif (g,l) in b.parent_block().set_GL_exp:
# # export node
# pass
# otherwise: do nothing
model.node_price_block = pyo.Block(model.set_GLQPK, rule=rule_node_prices)
# set of price segments
model.node_price_block.set_S = pyo.Set()
# set of GLQKS tuples
def init_set_GLQPKS(m):
return (
(g, l, q, p, k, s)
# for (g,l) in m.set_GL_exp_imp
# for (q,k) in m.set_QK
for (g, l, q, p, k) in m.node_price_block.set_S
for s in m.node_price_block.set_S[(g, l, q, p, k)]
)
model.node_price_block.set_GLQPKS = pyo.Set(
dimen=6, initialize=(init_set_GLQPKS if enable_initialisation else None)
)
def init_set_GLQPKS_exp(m):
return (
glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_exp[glqpks[0]]
)
model.node_price_block.set_GLQPKS_exp = pyo.Set(
dimen=6, initialize=(init_set_GLQPKS_exp if enable_initialisation else None)
)
def init_set_GLQPKS_imp(m):
return (
glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_imp[glqpks[0]]
)
model.node_price_block.set_GLQPKS_imp = pyo.Set(
dimen=6, initialize=(init_set_GLQPKS_imp if enable_initialisation else None)
)
# *************************************************************************
# *************************************************************************
# parameters
# resource prices
model.param_p_glqpks = pyo.Param(model.set_GLQPKS, within=pyo.NonNegativeReals)
# maximum resource volumes for each prices
model.param_v_max_glqpks = pyo.Param(
model.set_GLQPKS,
within=pyo.NonNegativeReals
)
# *************************************************************************
# *************************************************************************
# variables
# *************************************************************************
# *************************************************************************
# exported flow
# TODO: validate the bounds by ensuring inf. cap. only exists in last segm.
def bounds_var_ef_glqpks(m, g, l, q, p, k, s):
if (g, l, q, p, k, s) in m.param_v_max_glqpks:
# predefined finite capacity
return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)])
else:
# infinite capacity
return (0, None)
model.var_ef_glqpks = pyo.Var(
model.set_GLQPKS_exp, within=pyo.NonNegativeReals, bounds=bounds_var_ef_glqpks
)
# *************************************************************************
# *************************************************************************
# exported flow revenue
def rule_constr_exp_flow_revenue(m, g, l, q, p, k):
return (
sum(
m.var_ef_glqpks[(g, l, q, p, k, s)]
* m.param_p_glqpks[(g, l, q, p, k, s)]
for s in m.set_S[(g, l, q, p, k)]
)
== m.var_efr_glqpk[(g, l, q, p, k)]
)
model.constr_exp_flow_revenue = pyo.Constraint(
model.set_GL_exp, model.set_QPK, rule=rule_constr_exp_flow_revenue
)
# exported flows
def rule_constr_exp_flows(m, g, l, q, p, k):
return sum(
m.var_v_glljqk[(g, l_star, l, j, q, k)]
* m.param_eta_glljqk[(g, l_star, l, j, q, k)]
for l_star in m.set_L[g]
if l_star not in m.set_L_exp[g]
for j in m.set_J[(g, l_star, l)] # only directed arcs
) == sum(m.var_ef_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
model.constr_exp_flows = pyo.Constraint(
model.set_GL_exp, model.set_QPK, rule=rule_constr_exp_flows
)
# *************************************************************************
# *************************************************************************
# # non-convex price functions
# if not convex_price_function:
# # delta variables
# model.var_active_segment_glqpks = pyo.Var(
# model.set_GLQPKS, within=pyo.Binary
# )
# # segments must be empty if the respective delta variable is zero
# def rule_constr_empty_segment_if_delta_zero_imp(m, g, l, q, p, k, s):
# return (
# m.var_if_glqpks[(g,l,q,p,k,s)] <=
# m.param_v_max_glqpks[(g,l,q,p,k,s)]*
# m.var_active_segment_glqpks[(g,l,q,p,k,s)]
# )
# model.constr_empty_segment_if_delta_zero_imp = pyo.Constraint(
# model.set_GLQPKS_imp, rule=rule_constr_empty_segment_if_delta_zero_imp
# )
# # segments must be empty if the respective delta variable is zero
# def rule_constr_empty_segment_if_delta_zero_exp(m, g, l, q, p, k, s):
# return (
# m.var_ef_glqpks[(g,l,q,p,k,s)] <=
# m.param_v_max_glqpks[(g,l,q,p,k,s)]*
# m.var_active_segment_glqpks[(g,l,q,p,k,s)]
# )
# model.constr_empty_segment_if_delta_zero_exp = pyo.Constraint(
# model.set_GLQPKS_exp, rule=rule_constr_empty_segment_if_delta_zero_exp
# )
# # if delta var is one, previous ones must be one too
# def rule_constr_delta_summing_logic(m, g, l, q, p, k, s):
# if s == len(m.set_S)-1:
# return pyo.Constraint.Skip
# return (
# m.var_active_segment_glqpks[(g,l,q,p,k,s)] >=
# m.var_active_segment_glqpks[(g,l,q,p,k,s+1)]
# )
# model.constr_delta_summing_logic = pyo.Constraint(
# model.set_GLQPKS, rule=rule_constr_delta_summing_logic
# )
# # if delta var is zero, subsequent ones must also be zero
# def rule_constr_delta_next_zeros(m, g, l, q, p, k, s):
# if s == len(m.set_S)-1:
# return pyo.Constraint.Skip
# return (
# 1-m.var_active_segment_glqpks[(g,l,q,p,k,s)] >=
# m.var_active_segment_glqpks[(g,l,q,p,k,s+1)]
# )
# model.constr_delta_next_zeros = pyo.Constraint(
# model.set_GLQPKS, rule=rule_constr_delta_next_zeros
# )
# *************************************************************************
# *************************************************************************
# *****************************************************************************
# *****************************************************************************
# def price_other2(
# model: pyo.AbstractModel,
# convex_price_function: bool = False,
# enable_default_values: bool = True,
# enable_validation: bool = True,
# enable_initialisation: bool = True
# ):
# # set of price segments
# model.set_S = pyo.Set(model.set_GL_exp_imp, model.set_QPK)
# # set of GLQKS tuples
# def init_set_GLQPKS(m):
# return (
# (g, l, q, p, k, s)
# # for (g,l) in m.set_GL_exp_imp
# # for (q,k) in m.set_QK
# for (g, l, q, p, k) in m.set_S
# for s in m.set_S[(g, l, q, p, k)]
# )
# model.set_GLQPKS = pyo.Set(
# dimen=6, initialize=(init_set_GLQPKS if enable_initialisation else None)
# )
# def init_set_GLQPKS_exp(m):
# return (
# glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_exp[glqpks[0]]
# )
# model.set_GLQPKS_exp = pyo.Set(
# dimen=6, initialize=(init_set_GLQPKS_exp if enable_initialisation else None)
# )
# def init_set_GLQPKS_imp(m):
# return (
# glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_imp[glqpks[0]]
# )
# model.set_GLQPKS_imp = pyo.Set(
# dimen=6, initialize=(init_set_GLQPKS_imp if enable_initialisation else None)
# )
# # *************************************************************************
# # *************************************************************************
# # parameters
# # resource prices
# model.param_p_glqpks = pyo.Param(model.set_GLQPKS, within=pyo.NonNegativeReals)
# # maximum resource volumes for each prices
# model.param_v_max_glqpks = pyo.Param(
# model.set_GLQPKS,
# within=pyo.NonNegativeReals
# )
# # *************************************************************************
# # *************************************************************************
# # variables
# # *************************************************************************
# # *************************************************************************
# # exported flow
# # TODO: validate the bounds by ensuring inf. cap. only exists in last segm.
# def bounds_var_ef_glqpks(m, g, l, q, p, k, s):
# if (g, l, q, p, k, s) in m.param_v_max_glqpks:
# # predefined finite capacity
# return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)])
# else:
# # infinite capacity
# return (0, None)
# model.var_ef_glqpks = pyo.Var(
# model.set_GLQPKS_exp, within=pyo.NonNegativeReals, bounds=bounds_var_ef_glqpks
# )
# # imported flow
# def bounds_var_if_glqpks(m, g, l, q, p, k, s):
# if (g, l, q, p, k, s) in m.param_v_max_glqpks:
# # predefined finite capacity
# return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)])
# else:
# # infinite capacity
# return (0, None)
# model.var_if_glqpks = pyo.Var(
# model.set_GLQPKS_imp, within=pyo.NonNegativeReals, bounds=bounds_var_if_glqpks
# )
# # *************************************************************************
# # *************************************************************************
# # exported flow revenue
# def rule_constr_exp_flow_revenue(m, g, l, q, p, k):
# return (
# sum(
# m.var_ef_glqpks[(g, l, q, p, k, s)]
# * m.param_p_glqpks[(g, l, q, p, k, s)]
# for s in m.set_S[(g, l, q, p, k)]
# )
# == m.var_efr_glqpk[(g, l, q, p, k)]
# )
# model.constr_exp_flow_revenue = pyo.Constraint(
# model.set_GL_exp, model.set_QPK, rule=rule_constr_exp_flow_revenue
# )
# # imported flow cost
# def rule_constr_imp_flow_cost(m, g, l, q, p, k):
# return (
# sum(
# m.var_if_glqpks[(g, l, q, p, k, s)]
# * m.param_p_glqpks[(g, l, q, p, k, s)]
# for s in m.set_S[(g, l, q, p, k)]
# )
# == m.var_ifc_glqpk[(g, l, q, p, k)]
# )
# model.constr_imp_flow_cost = pyo.Constraint(
# model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flow_cost
# )
# # exported flows
# def rule_constr_exp_flows(m, g, l, q, p, k):
# return sum(
# m.var_v_glljqk[(g, l_star, l, j, q, k)]
# * m.param_eta_glljqk[(g, l_star, l, j, q, k)]
# for l_star in m.set_L[g]
# if l_star not in m.set_L_exp[g]
# for j in m.set_J[(g, l_star, l)] # only directed arcs
# ) == sum(m.var_ef_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
# model.constr_exp_flows = pyo.Constraint(
# model.set_GL_exp, model.set_QPK, rule=rule_constr_exp_flows
# )
# # imported flows
# def rule_constr_imp_flows(m, g, l, q, p, k):
# return sum(
# m.var_v_glljqk[(g, l, l_star, j, q, k)]
# for l_star in m.set_L[g]
# if l_star not in m.set_L_imp[g]
# for j in m.set_J[(g, l, l_star)] # only directed arcs
# ) == sum(m.var_if_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
# model.constr_imp_flows = pyo.Constraint(
# model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flows
# )
# # *************************************************************************
# # *************************************************************************
# # non-convex price functions
# if not convex_price_function:
# # delta variables
# model.var_active_segment_glqpks = pyo.Var(
# model.set_GLQPKS, within=pyo.Binary
# )
# # segments must be empty if the respective delta variable is zero
# def rule_constr_empty_segment_if_delta_zero_imp(m, g, l, q, p, k, s):
# return (
# m.var_if_glqpks[(g,l,q,p,k,s)] <=
# m.param_v_max_glqpks[(g,l,q,p,k,s)]*
# m.var_active_segment_glqpks[(g,l,q,p,k,s)]
# )
# model.constr_empty_segment_if_delta_zero_imp = pyo.Constraint(
# model.set_GLQPKS_imp, rule=rule_constr_empty_segment_if_delta_zero_imp
# )
# # segments must be empty if the respective delta variable is zero
# def rule_constr_empty_segment_if_delta_zero_exp(m, g, l, q, p, k, s):
# return (
# m.var_ef_glqpks[(g,l,q,p,k,s)] <=
# m.param_v_max_glqpks[(g,l,q,p,k,s)]*
# m.var_active_segment_glqpks[(g,l,q,p,k,s)]
# )
# model.constr_empty_segment_if_delta_zero_exp = pyo.Constraint(
# model.set_GLQPKS_exp, rule=rule_constr_empty_segment_if_delta_zero_exp
# )
# # if delta var is one, previous ones must be one too
# # if delta var is zero, the next ones must also be zero
# def rule_constr_delta_summing_logic(m, g, l, q, p, k, s):
# if s == len(m.set_S[(g,l,q,p,k)])-1:
# # last segment, skip
# return pyo.Constraint.Skip
# return (
# m.var_active_segment_glqpks[(g,l,q,p,k,s)] >=
# m.var_active_segment_glqpks[(g,l,q,p,k,s+1)]
# )
# model.constr_delta_summing_logic = pyo.Constraint(
# model.set_GLQPKS, rule=rule_constr_delta_summing_logic
# )
# # if a segment is not completely used, the next ones must remain empty
# def rule_constr_fill_up_segment_before_next(m, g, l, q, p, k, s):
# if s == len(m.set_S[(g,l,q,p,k)])-1:
# # last segment, skip
# return pyo.Constraint.Skip
# if (g,l) in m.set_GL_imp:
# return (
# m.var_if_glqpks[(g,l,q,p,k,s)] >=
# m.var_active_segment_glqpks[(g,l,q,p,k,s+1)]*
# m.param_v_max_glqpks[(g,l,q,p,k,s)]
# )
# else:
# return (
# m.var_ef_glqpks[(g,l,q,p,k,s)] >=
# m.var_active_segment_glqpks[(g,l,q,p,k,s+1)]*
# m.param_v_max_glqpks[(g,l,q,p,k,s)]
# )
# # return (
# # m.var_if_glqpks[(g,l,q,p,k,s)]/m.param_v_max_glqpks[(g,l,q,p,k,s)] >=
# # m.var_active_segment_glqpks[(g,l,q,p,k,s+1)]
# # )
# # return (
# # m.param_v_max_glqpks[(g,l,q,p,k,s)]-m.var_if_glqpks[(g,l,q,p,k,s)] <=
# # m.param_v_max_glqpks[(g,l,q,p,k,s)]*(1- m.var_active_segment_glqpks[(g,l,q,p,k,s+1)])
# # )
# model.constr_fill_up_segment_before_next = pyo.Constraint(
# model.set_GLQPKS, rule=rule_constr_fill_up_segment_before_next
# )
# *****************************************************************************
# *****************************************************************************
def price_other(
model: pyo.AbstractModel,
convex_price_function: bool = True,
enable_default_values: bool = True,
enable_validation: bool = True,
enable_initialisation: bool = True
):
# auxiliary set for pyomo
model.set_GLQPK = model.set_GL_exp_imp*model.set_QPK
# set of price segments
model.set_S = pyo.Set(model.set_GLQPK)
# set of GLQKS tuples
def init_set_GLQPKS(m):
return (
(g, l, q, p, k, s)
# for (g,l) in m.set_GL_exp_imp
# for (q,k) in m.set_QK
for (g, l, q, p, k) in m.set_S
for s in m.set_S[(g, l, q, p, k)]
)
model.set_GLQPKS = pyo.Set(
dimen=6, initialize=(init_set_GLQPKS if enable_initialisation else None)
)
# *************************************************************************
# *************************************************************************
# parameters
# resource prices
model.param_p_glqpks = pyo.Param(model.set_GLQPKS, within=pyo.NonNegativeReals)
# price function convexity
model.param_price_function_is_convex = pyo.Param(
model.set_GLQPK,
within=pyo.Boolean
)
# maximum resource volumes for each prices
model.param_v_max_glqpks = pyo.Param(
model.set_GLQPKS,
within=pyo.NonNegativeReals
)
# *************************************************************************
# *************************************************************************
# variables
# *************************************************************************
# *************************************************************************
# import and export flows
def bounds_var_trans_flows_glqpks(m, g, l, q, p, k, s):
if (g, l, q, p, k, s) in m.param_v_max_glqpks:
# predefined finite capacity
return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)])
else:
# infinite capacity
return (0, None)
model.var_trans_flows_glqpks = pyo.Var(
model.set_GLQPKS, within=pyo.NonNegativeReals, bounds=bounds_var_trans_flows_glqpks
)
# *************************************************************************
# *************************************************************************
# import flow costs and export flow revenues
def rule_constr_trans_monetary_flows(m, g, l, q, p, k):
if (g,l) in m.set_GL_imp:
return (
sum(
m.var_trans_flows_glqpks[(g, l, q, p, k, s)]
* m.param_p_glqpks[(g, l, q, p, k, s)]
for s in m.set_S[(g, l, q, p, k)]
)
== m.var_ifc_glqpk[(g, l, q, p, k)]
)
else:
return (
sum(
m.var_trans_flows_glqpks[(g, l, q, p, k, s)]
* m.param_p_glqpks[(g, l, q, p, k, s)]
for s in m.set_S[(g, l, q, p, k)]
)
== m.var_efr_glqpk[(g, l, q, p, k)]
)
model.constr_trans_monetary_flows = pyo.Constraint(
model.set_GLQPK, rule=rule_constr_trans_monetary_flows
)
# imported and exported flows
def rule_constr_trans_flows(m, g, l, q, p, k):
if (g,l) in m.set_GL_imp:
return sum(
m.var_v_glljqk[(g, l, l_star, j, q, k)]
for l_star in m.set_L[g]
if l_star not in m.set_L_imp[g]
for j in m.set_J[(g, l, l_star)] # only directed arcs
) == sum(m.var_trans_flows_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
else:
return sum(
m.var_v_glljqk[(g, l_star, l, j, q, k)]
* m.param_eta_glljqk[(g, l_star, l, j, q, k)]
for l_star in m.set_L[g]
if l_star not in m.set_L_exp[g]
for j in m.set_J[(g, l_star, l)] # only directed arcs
) == sum(m.var_trans_flows_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
model.constr_trans_flows = pyo.Constraint(
model.set_GLQPK, rule=rule_constr_trans_flows
)
# *************************************************************************
# *************************************************************************
# non-convex price functions
# delta variables
model.var_active_segment_glqpks = pyo.Var(
model.set_GLQPKS, within=pyo.Binary
)
# segments must be empty if the respective delta variable is zero
def rule_constr_empty_segment_if_delta_zero(m, g, l, q, p, k, s):
if len(m.set_S[(g,l,q,p,k)]) == 1 or m.param_price_function_is_convex[(g,l,q,p,k)]:
# single segment, skip
# convex, skip
return pyo.Constraint.Skip
return (
m.var_trans_flows_glqpks[(g,l,q,p,k,s)] <=
m.param_v_max_glqpks[(g,l,q,p,k,s)]*
m.var_active_segment_glqpks[(g,l,q,p,k,s)]
)
model.constr_empty_segment_if_delta_zero = pyo.Constraint(
model.set_GLQPKS, rule=rule_constr_empty_segment_if_delta_zero
)
# if delta var is one, previous ones must be one too
# if delta var is zero, the next ones must also be zero
def rule_constr_delta_summing_logic(m, g, l, q, p, k, s):
if s == len(m.set_S[(g,l,q,p,k)])-1 or m.param_price_function_is_convex[(g,l,q,p,k)]:
# last segment, skip
# convex, skip
return pyo.Constraint.Skip
return (
m.var_active_segment_glqpks[(g,l,q,p,k,s)] >=
m.var_active_segment_glqpks[(g,l,q,p,k,s+1)]
)
model.constr_delta_summing_logic = pyo.Constraint(
model.set_GLQPKS, rule=rule_constr_delta_summing_logic
)
# if a segment is not completely used, the next ones must remain empty
def rule_constr_fill_up_segment_before_next(m, g, l, q, p, k, s):
if s == len(m.set_S[(g,l,q,p,k)])-1 or m.param_price_function_is_convex[(g,l,q,p,k)]:
# last segment, skip
# convex, skip
return pyo.Constraint.Skip
return (
m.var_trans_flows_glqpks[(g,l,q,p,k,s)] >=
m.var_active_segment_glqpks[(g,l,q,p,k,s+1)]*
m.param_v_max_glqpks[(g,l,q,p,k,s)]
)
# return (
# m.var_if_glqpks[(g,l,q,p,k,s)]/m.param_v_max_glqpks[(g,l,q,p,k,s)] >=
# m.var_active_segment_glqpks[(g,l,q,p,k,s+1)]
# )
# return (
# m.param_v_max_glqpks[(g,l,q,p,k,s)]-m.var_if_glqpks[(g,l,q,p,k,s)] <=
# m.param_v_max_glqpks[(g,l,q,p,k,s)]*(1- m.var_active_segment_glqpks[(g,l,q,p,k,s+1)])
# )
model.constr_fill_up_segment_before_next = pyo.Constraint(
model.set_GLQPKS, rule=rule_constr_fill_up_segment_before_next
)
# *****************************************************************************
# *****************************************************************************
def price_block_lambda(model: pyo.AbstractModel, **kwargs):
raise NotImplementedError
# *****************************************************************************
# *****************************************************************************
def price_block_delta(model: pyo.AbstractModel, **kwargs):
raise NotImplementedError
# *****************************************************************************
# *****************************************************************************
\ No newline at end of file
......@@ -2,7 +2,8 @@
import pyomo.environ as pyo
from math import isfinite, inf
from .blocks.networks import add_network_restrictions
from .blocks.prices import add_prices_block
# *****************************************************************************
# *****************************************************************************
......@@ -22,7 +23,7 @@ def create_model(
# create model object
model = pyo.AbstractModel(name)
# *************************************************************************
# *************************************************************************
......@@ -84,14 +85,7 @@ def create_model(
# set of exporting nodes on each network
model.set_L_exp = pyo.Set(model.set_G, within=model.set_L)
# set of nodes on network g incompatible with having more than one incoming
# arc unless there are outgoing arcs too
model.set_L_max_in_g = pyo.Set(
model.set_G, within=model.set_L
) # should inherently exclude import nodes
# *************************************************************************
# *************************************************************************
......@@ -395,45 +389,6 @@ def create_model(
# *************************************************************************
# set of price segments
model.set_S = pyo.Set(model.set_GL_exp_imp, model.set_QPK)
# set of GLQKS tuples
def init_set_GLQPKS(m):
return (
(g, l, q, p, k, s)
# for (g,l) in m.set_GL_exp_imp
# for (q,k) in m.set_QK
for (g, l, q, p, k) in m.set_S
for s in m.set_S[(g, l, q, p, k)]
)
model.set_GLQPKS = pyo.Set(
dimen=6, initialize=(init_set_GLQPKS if enable_initialisation else None)
)
def init_set_GLQPKS_exp(m):
return (
glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_exp[glqpks[0]]
)
model.set_GLQPKS_exp = pyo.Set(
dimen=6, initialize=(init_set_GLQPKS_exp if enable_initialisation else None)
)
def init_set_GLQPKS_imp(m):
return (
glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_imp[glqpks[0]]
)
model.set_GLQPKS_imp = pyo.Set(
dimen=6, initialize=(init_set_GLQPKS_imp if enable_initialisation else None)
)
# *************************************************************************
# all arcs
# set of GLLJ tuples for all arcs (undirected arcs appear twice)
......@@ -1445,14 +1400,6 @@ def create_model(
model.set_QPK, within=pyo.PositiveReals, default=1
)
# resource prices
model.param_p_glqpks = pyo.Param(model.set_GLQPKS, within=pyo.NonNegativeReals)
# maximum resource volumes for each prices
model.param_v_max_glqpks = pyo.Param(model.set_GLQPKS, within=pyo.NonNegativeReals)
# converters
# externality cost per input unit
......@@ -1617,26 +1564,6 @@ def create_model(
model.set_GL_not_exp_imp, model.set_QK, within=pyo.Reals, default=0
)
# maximum number of arcs per node pair
model.param_max_number_parallel_arcs = pyo.Param(
model.set_GLL,
# within=pyo.PositiveIntegers,
within=pyo.PositiveReals,
default=inf,
)
def init_set_GLL_arc_max(m):
return (
(g, l1, l2)
for (g, l1, l2) in m.param_max_number_parallel_arcs
if isfinite(m.param_max_number_parallel_arcs[(g, l1, l2)])
)
model.set_GLL_arc_max = pyo.Set(
dimen=3, within=model.set_GLL, initialize=init_set_GLL_arc_max
)
# effect of system inputs on specific network and node pairs
model.param_a_nw_glimqk = pyo.Param(
......@@ -1835,36 +1762,6 @@ def create_model(
model.set_GL_imp, model.set_QPK, within=pyo.NonNegativeReals
)
# exported flow
# TODO: validate the bounds by ensuring inf. cap. only exists in last segm.
def bounds_var_ef_glqpks(m, g, l, q, p, k, s):
if (g, l, q, p, k, s) in m.param_v_max_glqpks:
# predefined finite capacity
return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)])
else:
# infinite capacity
return (0, None)
model.var_ef_glqpks = pyo.Var(
model.set_GLQPKS_exp, within=pyo.NonNegativeReals, bounds=bounds_var_ef_glqpks
)
# imported flow
def bounds_var_if_glqpks(m, g, l, q, p, k, s):
if (g, l, q, p, k, s) in m.param_v_max_glqpks:
# predefined finite capacity
return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)])
else:
# infinite capacity
return (0, None)
model.var_if_glqpks = pyo.Var(
model.set_GLQPKS_imp, within=pyo.NonNegativeReals, bounds=bounds_var_if_glqpks
)
# *************************************************************************
# arcs
......@@ -2127,67 +2024,6 @@ def create_model(
model.constr_sdncf_q = pyo.Constraint(model.set_Q, rule=rule_sdncf_q)
# exported flow revenue
def rule_constr_exp_flow_revenue(m, g, l, q, p, k):
return (
sum(
m.var_ef_glqpks[(g, l, q, p, k, s)]
* m.param_p_glqpks[(g, l, q, p, k, s)]
for s in m.set_S[(g, l, q, p, k)]
)
== m.var_efr_glqpk[(g, l, q, p, k)]
)
model.constr_exp_flow_revenue = pyo.Constraint(
model.set_GL_exp, model.set_QPK, rule=rule_constr_exp_flow_revenue
)
# imported flow cost
def rule_constr_imp_flow_cost(m, g, l, q, p, k):
return (
sum(
m.var_if_glqpks[(g, l, q, p, k, s)]
* m.param_p_glqpks[(g, l, q, p, k, s)]
for s in m.set_S[(g, l, q, p, k)]
)
== m.var_ifc_glqpk[(g, l, q, p, k)]
)
model.constr_imp_flow_cost = pyo.Constraint(
model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flow_cost
)
# exported flows
def rule_constr_exp_flows(m, g, l, q, p, k):
return sum(
m.var_v_glljqk[(g, l_star, l, j, q, k)]
* m.param_eta_glljqk[(g, l_star, l, j, q, k)]
for l_star in m.set_L[g]
if l_star not in m.set_L_exp[g]
for j in m.set_J[(g, l_star, l)] # only directed arcs
) == sum(m.var_ef_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
model.constr_exp_flows = pyo.Constraint(
model.set_GL_exp, model.set_QPK, rule=rule_constr_exp_flows
)
# imported flows
def rule_constr_imp_flows(m, g, l, q, p, k):
return sum(
m.var_v_glljqk[(g, l, l_star, j, q, k)]
for l_star in m.set_L[g]
if l_star not in m.set_L_imp[g]
for j in m.set_J[(g, l, l_star)] # only directed arcs
) == sum(m.var_if_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
model.constr_imp_flows = pyo.Constraint(
model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flows
)
# *************************************************************************
# sum of discounted externalities
......@@ -2325,6 +2161,9 @@ def create_model(
model.constr_capex_system = pyo.Constraint(
model.set_I_new, rule=rule_capex_converter
)
# prices
add_prices_block(model)
# *************************************************************************
# *************************************************************************
......@@ -2475,577 +2314,9 @@ def create_model(
)
# *************************************************************************
# limit number of directed arcs per direction
def rule_constr_limited_parallel_arcs_per_direction(m, g, l1, l2):
# cases:
# 1) the number of options is lower than or equal to the limit (skip)
# 2) the number of preexisting and new mandatory arcs exceeds
# the limit (infeasible: pyo.Constraint.Infeasible)
# 3) all other cases (constraint)
# number of preexisting arcs going from l1 to l2
number_arcs_pre_nom = (
len(m.set_J_pre[(g, l1, l2)]) if (g, l1, l2) in m.set_J_pre else 0
)
number_arcs_pre_rev = (
sum(1 for j in m.set_J_pre[(g, l2, l1)] if j in m.set_J_und[(g, l2, l1)])
if (g, l2, l1) in m.set_J_pre
else 0
)
# number of mandatory arcs going from l1 to l2
number_arcs_mdt_nom = (
len(m.set_J_mdt[(g, l1, l2)]) if (g, l1, l2) in m.set_J_mdt else 0
)
number_arcs_mdt_rev = (
sum(1 for j in m.set_J_mdt[(g, l2, l1)] if j in m.set_J_und[(g, l2, l1)])
if (g, l2, l1) in m.set_J_mdt
else 0
)
# number of optional arcs going from l1 to l2
number_arcs_opt_nom = (
sum(
1
for j in m.set_J[(g, l1, l2)]
if j not in m.set_J_pre[(g, l1, l2)]
if j not in m.set_J_mdt[(g, l1, l2)]
)
if (g, l1, l2) in m.set_J
else 0
)
number_arcs_opt_rev = (
sum(
1
for j in m.set_J[(g, l2, l1)]
if j not in m.set_J_pre[(g, l2, l1)]
if j not in m.set_J_mdt[(g, l2, l1)]
if j in m.set_J_und[(g, l2, l1)]
)
if (g, l2, l1) in m.set_J
else 0
)
# build the constraints
if (
number_arcs_mdt_nom
+ number_arcs_mdt_rev
+ number_arcs_pre_nom
+ number_arcs_pre_rev
> m.param_max_number_parallel_arcs[(g, l1, l2)]
):
# the number of unavoidable arcs already exceeds the limit
return pyo.Constraint.Infeasible
elif (
number_arcs_opt_nom
+ number_arcs_opt_rev
+ number_arcs_mdt_nom
+ number_arcs_mdt_rev
+ number_arcs_pre_nom
+ number_arcs_pre_rev
> m.param_max_number_parallel_arcs[(g, l1, l2)]
):
# the number of potential arcs exceeds the limit: cannot be skipped
return (
# preexisting arcs
number_arcs_pre_nom + number_arcs_pre_rev +
# mandatory arcs
number_arcs_mdt_nom + number_arcs_mdt_rev +
# arcs within an (optional) group that uses interfaces
sum(
(
sum(
1
for j in m.set_J_col[(g, l1, l2)]
if (g, l1, l2, j) in m.set_GLLJ_col_t[t]
)
if (g, l1, l2) in m.set_J_col
else 0
+ sum(
1
for j in m.set_J_col[(g, l2, l1)]
if j in m.set_J_und[(g, l2, l1)]
if (g, l2, l1, j) in m.set_GLLJ_col_t[t]
)
if ((g, l2, l1) in m.set_J_col and (g, l2, l1) in m.set_J_und)
else 0
)
* m.var_xi_arc_inv_t[t]
for t in m.set_T_int
)
+
# arcs within an (optional) group that does not use interfaces
sum(
(
sum(
1
for j in m.set_J_col[(g, l1, l2)]
if (g, l1, l2, j) in m.set_GLLJ_col_t[t]
)
if (g, l1, l2) in m.set_J_col
else 0
+ sum(
1
for j in m.set_J_col[(g, l2, l1)]
if j in m.set_J_und[(g, l2, l1)]
if (g, l2, l1, j) in m.set_GLLJ_col_t[t]
)
if ((g, l2, l1) in m.set_J_col and (g, l2, l1) in m.set_J_und)
else 0
)
* sum(m.var_delta_arc_inv_th[(t, h)] for h in m.set_H_t[t])
for t in m.set_T # new
if t not in m.set_T_mdt # optional
if t not in m.set_T_int # not interfaced
)
+
# optional individual arcs using interfaces, nominal direction
sum(
m.var_xi_arc_inv_gllj[(g, l1, l2, j)]
for j in m.set_J_int[(g, l1, l2)] # interfaced
if j not in m.set_J_col[(g, l1, l2)] # individual
)
if (g, l1, l2) in m.set_J_int
else 0 +
# optional individual arcs using interfaces, reverse direction
sum(
m.var_xi_arc_inv_gllj[(g, l2, l1, j)]
for j in m.set_J_int[(g, l2, l1)] # interfaced
if j in m.set_J_und[(g, l2, l1)] # undirected
if j not in m.set_J_col[(g, l1, l2)] # individual
)
if ((g, l2, l1) in m.set_J_int and (g, l2, l1) in m.set_J_und)
else 0 +
# optional individual arcs not using interfaces, nominal dir.
sum(
sum(
m.var_delta_arc_inv_glljh[(g, l1, l2, j, h)]
for h in m.set_H_gllj[(g, l1, l2, j)]
)
for j in m.set_J[(g, l1, l2)]
if j not in m.set_J_pre[(g, l1, l2)] # not preexisting
if j not in m.set_J_mdt[(g, l1, l2)] # not mandatory
if j not in m.set_J_int[(g, l1, l2)] # not interfaced
if j not in m.set_J_col[(g, l1, l2)] # individual
)
if (g, l1, l2) in m.set_J
else 0 +
# optional individual arcs not using interfaces, reverse dir.
sum(
sum(
m.var_delta_arc_inv_glljh[(g, l2, l1, j, h)]
for h in m.set_H_gllj[(g, l2, l1, j)]
)
for j in m.set_J_opt[(g, l2, l1)]
if j in m.set_J_und[(g, l2, l1)]
if j not in m.set_J_pre[(g, l2, l1)] # not preexisting
if j not in m.set_J_mdt[(g, l2, l1)] # not mandatory
if j not in m.set_J_int[(g, l2, l1)] # not interfaced
if j not in m.set_J_col[(g, l2, l1)] # individual
)
if (g, l2, l1) in m.set_J
else 0 <= m.param_max_number_parallel_arcs[(g, l1, l2)]
)
else: # the number of options is lower than or equal to the limit: skip
return pyo.Constraint.Skip
model.constr_limited_parallel_arcs_per_direction = pyo.Constraint(
model.set_GLL_arc_max, rule=rule_constr_limited_parallel_arcs_per_direction
)
# *************************************************************************
# there can only one incoming arc at most, if there are no outgoing arcs
def rule_constr_max_incoming_directed_arcs(m, g, l):
# check if the node is not among those subject to a limited number of incoming arcs
if l not in m.set_L_max_in_g[g]:
# it is not, skip this constraint
return pyo.Constraint.Skip
# max number of directed incoming arcs
n_max_dir_in = sum(
sum(
1
for j in m.set_J[(g, l_line, l)]
if j not in m.set_J_und[(g, l_line, l)]
) # directed
for l_line in m.set_L[g]
if l_line != l
if l_line not in m.set_L_imp[g]
if (g, l_line, l) in m.set_J
)
# check the maximum number of incoming arcs
if n_max_dir_in <= 1:
# there can only be one incoming arc at most: redundant constraint
return pyo.Constraint.Skip
else: # more than one incoming arc is possible
# *****************************************************************
# number of (new) incoming directed arcs in a group
# *****************************************************************
b_max_in_gl = 0
# the big m
M_gl = n_max_dir_in - 1 # has to be positive since n_max_dir_in > 1
# TODO: put parenthesis to avoid funny results
temp_constr = (
sum(
# *********************************************************
# interfaced groups
sum(
sum(
1
for j in m.set_J_col[(g, l_circ, l)] # part of group
if j not in m.set_J_und[(g, l_circ, l)] # directed
if (g, l_circ, l, j) in m.set_GLLJ_col_t[t]
)
* m.var_xi_arc_inv_t[t] # in t
for t in m.set_T_int
)
+
# *********************************************************
# optional non-interfaced groups
sum(
sum(
sum(
1
for j in m.set_J_col[(g, l_circ, l)] # part of group
if j not in m.set_J_und[(g, l_circ, l)] # directed
if (g, l_circ, l, j) in m.set_GLLJ_col_t[t]
)
* m.var_delta_arc_inv_th[(t, h)]
for h in m.set_H_t[t]
)
for t in m.set_T
if t not in m.set_T_mdt # optional
if t not in m.set_T_int # not interfaced
)
+
# *********************************************************
# interfaced arcs
(sum(
m.var_xi_arc_inv_gllj[(g, l_circ, l, j_circ)]
for j_circ in m.set_J[(g, l_circ, l)]
if j_circ not in m.set_J_und[(g, l_circ, l)] # directed
if j_circ in m.set_J_int[(g, l_circ, l)] # interfaced
if j_circ not in m.set_J_col[(g, l_circ, l)] # individual
)
if (g, l_circ, l) in m.set_J
else 0) +
# *********************************************************
# optional non-interfaced arcs
(sum(
sum(
m.var_delta_arc_inv_glljh[(g, l_circ, l, j_dot, h_dot)]
for h_dot in m.set_H_gllj[(g, l_circ, l, j_dot)]
)
for j_dot in m.set_J[(g, l_circ, l)]
if j_dot not in m.set_J_und[(g, l_circ, l)] # directed
if j_dot not in m.set_J_int[(g, l_circ, l)] # not interfaced
if j_dot not in m.set_J_col[(g, l_circ, l)] # individual
if j_dot not in m.set_J_mdt[(g, l_circ, l)] # optional
)
if (g, l_circ, l) in m.set_J
else 0) +
# *********************************************************
# preexisting directed arcs
(sum(
1
for j_pre_dir in m.set_J_pre[(g, l_circ, l)] # preexisting
if j_pre_dir not in m.set_J_und[(g, l_circ, l)] # directed
)
if (g, l_circ, l) in m.set_J_pre
else 0) +
# *********************************************************
# mandatory directed arcs
(sum(
1
for j_mdt_dir in m.set_J_mdt[(g, l_circ, l)]
if j_mdt_dir not in m.set_J_und[(g, l_circ, l)] # directed
)
if (g, l_circ, l) in m.set_J_mdt
else 0)
# *********************************************************
for l_circ in m.set_L[g]
if l_circ not in m.set_L_exp[g]
if l_circ != l
)
<= 1 # +
# M_gl*sum(
# # *********************************************************
# # outgoing arcs in interfaced groups, nominal direction
# sum(sum(1
# for j in m.set_J_col[(g,l,l_diamond)]
# #if j in m.set_J_int[(g,l,l_diamond)]
# if (g,l,l_diamond,j) in m.set_GLLJ_col_t[t]
# )*m.var_xi_arc_inv_t[t]
# for t in m.set_T_int
# ) if (g,l,l_diamond) in m.set_J_col else 0
# +
# # outgoing arcs in interfaced groups, reverse direction
# sum(sum(1
# for j in m.set_J_col[(g,l_diamond,l)]
# #if j in m.set_J_int[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# if (g,l_diamond,l,j) in m.set_GLLJ_col_t[t]
# )*m.var_xi_arc_inv_t[t]
# for t in m.set_T_int
# ) if (g,l_diamond,l) in m.set_J_col else 0
# +
# # *********************************************************
# # TODO: outgoing arcs in non-interfaced optional groups, nominal
# sum(sum(1
# for j in m.set_J_col[(g,l,l_diamond)]
# #if j in m.set_J_int[(g,l,l_diamond)]
# if (g,l,l_diamond,j) in m.set_GLLJ_col_t[t]
# )*sum(
# m.var_delta_arc_inv_th[(t,h)]
# for h in m.set_H_t[t]
# )
# for t in m.set_T
# if t not in m.set_T_mdt
# if t not in m.set_T_int
# ) if (g,l,l_diamond) in m.set_J_col else 0
# +
# # TODO: outgoing arcs in non-interfaced optional groups, reverse
# sum(sum(1
# for j in m.set_J_col[(g,l_diamond,l)]
# #if j in m.set_J_int[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# if (g,l_diamond,l,j) in m.set_GLLJ_col_t[t]
# )*sum(
# m.var_delta_arc_inv_th[(t,h)]
# for h in m.set_H_t[t]
# )
# for t in m.set_T
# if t not in m.set_T_mdt
# if t not in m.set_T_int
# ) if (g,l_diamond,l) in m.set_J_col else 0
# +
# # *********************************************************
# # interfaced individual outgoing arcs, nominal direction
# sum(m.var_xi_arc_inv_gllj[(g,l,l_diamond,j)]
# for j in m.set_J_int[(g,l,l_diamond)] # interfaced
# if j not in m.set_J_col[(g,l,l_diamond)] # individual
# ) if (g,l,l_diamond) in m.set_J_int else 0
# +
# # *********************************************************
# # interfaced individual undirected arcs, reverse direction
# sum(m.var_xi_arc_inv_gllj[(g,l,l_diamond,j)]
# for j in m.set_J_und[(g,l_diamond,l)] # undirected
# if j in m.set_J_int[(g,l_diamond,l)] # interfaced
# if j not in m.set_J_col[(g,l_diamond,l)] # individual
# ) if (g,l_diamond,l) in m.set_J_und else 0
# +
# # *********************************************************
# # outgoing non-interfaced individual optional arcs
# sum(
# sum(m.var_delta_arc_inv_glljh[(g,l,l_diamond,j,h)]
# for h in m.set_H_gllj[(g,l,l_diamond,j)])
# for j in m.set_J[(g,l,l_diamond)]
# if j not in m.set_J_col[(g,l,l_diamond)] # individual
# if j not in m.set_J_mdt[(g,l,l_diamond)] # optional
# if j not in m.set_J_int[(g,l,l_diamond)] # interfaced
# ) if (g,l,l_diamond) in m.set_J else 0
# +
# # *********************************************************
# # individual non-interfaced undirected arcs, reverse dir.
# sum(
# sum(m.var_delta_arc_inv_glljh[(g,l_diamond,l,j,h)]
# for h in m.set_H_gllj[(g,l_diamond,l,j)])
# for j in m.set_J_und[(g,l_diamond,l)] # undirected
# if j not in m.set_J_col[(g,l_diamond,l)] # individual
# if j not in m.set_J_mdt[(g,l_diamond,l)] # optional
# if j not in m.set_J_int[(g,l_diamond,l)] # interfaced
# ) if (g,l_diamond,l) in m.set_J_und else 0
# +
# # *********************************************************
# # preselected outgonig arcs, nominal direction
# len(m.set_J_pre[(g,l,l_diamond)]
# ) if (g,l,l_diamond) in m.set_J_pre else 0
# +
# # *********************************************************
# # mandatory outgoing arcs, nominal direction
# len(m.set_J_mdt[(g,l,l_diamond)]
# ) if (g,l,l_diamond) in m.set_J_mdt else 0
# +
# # *********************************************************
# # undirected preselected arcs, reverse direction
# sum(1
# for j in m.set_J_pre[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# ) if (g,l_diamond,l) in m.set_J_pre else 0
# +
# # *********************************************************
# # undirected mandatory arcs, reverse direction
# sum(1
# for j in m.set_J_mdt[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# ) if (g,l_diamond,l) in m.set_J_mdt else 0
# # *********************************************************
# for l_diamond in m.set_L[g]
# if l_diamond not in m.set_L_imp[g]
# if l_diamond != l
# )
)
if type(temp_constr) == bool:
# trivial outcome
return pyo.Constraint.Feasible if temp_constr else pyo.Constraint.Infeasible
else:
# constraint is relevant
return temp_constr
model.constr_max_incoming_directed_arcs = pyo.Constraint(
model.set_GL_not_exp_imp, rule=rule_constr_max_incoming_directed_arcs
)
# *************************************************************************
# def rule_constr_max_outgoing_directed_arcs(m, g, l):
# pass
# model.constr_max_outgoing_directed_arcs = pyo.Constraint(
# model.set_GL_not_exp_imp,
# rule=rule_constr_max_outgoing_directed_arcs
# )
# # *************************************************************************
# # there can only one outgoing arc at most, if there are no incoming arcs
# def rule_constr_max_outgoing_arcs(m,g,l):
# # the number of predefined incoming arcs
# n_in_pre = sum(
# len(m.set_J_pre[(g,l_star,l)]) # = n_in_pre
# for l_star in m.set_L[g]
# if l_star not in m.set_L_exp[g]
# if l_star != l
# )
# # if there is at least one predefined incoming arc, skip constraint
# if n_in_pre >= 1:
# return pyo.Constraint.Skip
# # the number of non-predefined incoming arcs
# n_in_opt = sum(
# len(m.set_J_new[(g,l_star,l)]) # = n_in_pre
# for l_star in m.set_L[g]
# if l_star not in m.set_L_exp[g]
# if l_star != l
# )
# n_in_max = n_in_pre + n_in_opt
# # the number of predefined outgoing arcs
# n_out_pre = sum(
# len(m.set_J_pre[(g,l,l_line)])
# for l_line in m.set_L[g]
# if l_line not in m.set_L_imp[g]
# if l_line != l
# )
# # the constraint is infeasible if the maximum number of incoming arcs
# # is zero and the number of predefined outgoing arcs is bigger than 1
# if n_in_max == 0 and n_out_pre >= 2:
# return pyo.Constraint.Infeasible
# # DONE: it is also infeasible if the maximum number of incoming arcs is
# # zero and the number of predefined outgoing arcs is one and the poten-
# # tial outgoing arcs include mandatory arcs (i.e. sum(...)=1 )
# n_out_fcd = sum(
# len(m.set_J_mdt[(g,l,l_line)])
# for l_line in m.set_L[g]
# if l_line not in m.set_L_imp[g]
# if l_line != l
# )
# if n_in_max == 0 and n_out_pre == 1 and n_out_fcd >= 1:
# return pyo.Constraint.Infeasible
# # the number of non-predefined outgoing arcs
# n_out_opt = sum(
# len(m.set_J_new[(g,l,l_line)])
# for l_line in m.set_L[g]
# if l_line not in m.set_L_imp[g]
# if l_line != l
# )
# n_out_max = n_out_pre + n_out_opt
# if n_out_max <= 1:
# # there can only be one outgoing arc at most: redundant constraint
# return pyo.Constraint.Skip
# else: # more than one outgoing arc is possible
# M_gl = n_out_max - 1
# return (
# sum(
# sum(
# sum(m.var_delta_arc_inv_glljh[(g,l,l_diamond,j,s)]
# for s in m.set_H_gllj[(g,l,l_diamond,j)])
# for j in m.set_J_new[(g,l,l_diamond)]
# )
# #+len(m.set_J_pre[(g,l,l_diamond)]) # = n_out_pre
# for l_diamond in m.set_L[g]
# if l_diamond not in m.set_L_imp[g]
# if l_diamond != l
# )+n_out_pre
# <= 1 + M_gl*
# sum(
# sum(
# sum(m.var_delta_arc_inv_glljh[
# (g,l_star,l,j_star,s_star)]
# for s_star in m.set_H_gllj[(g,l_star,l,j_star)])
# for j_star in m.set_J_new[(g,l_star,l)]
# )
# #+len(m.set_J_pre[(g,l_star,l)]) # = n_in_pre
# for l_star in m.set_L[g]
# if l_star not in m.set_L_exp[g]
# if l_star != l
# )+n_in_pre
# )
# model.constr_max_outgoing_arcs = pyo.Constraint(
# model.set_GL_not_exp_imp,
# rule=rule_constr_max_outgoing_arcs)
add_network_restrictions(model)
# *************************************************************************
# *************************************************************************
......
......@@ -612,21 +612,62 @@ class Network(nx.MultiDiGraph):
KEY_ARC_TECH_CAPACITY_INSTANTANEOUS,
KEY_ARC_TECH_STATIC_LOSS,
)
NET_TYPE_HYBRID = 0
NET_TYPE_TREE = 1
NET_TYPE_REV_TREE = 2
NET_TYPES = (
NET_TYPE_HYBRID,
NET_TYPE_TREE,
NET_TYPE_REV_TREE
)
def __init__(self, incoming_graph_data=None, **attr):
def __init__(self, network_type = NET_TYPE_HYBRID, **kwargs):
# run base class init routine
nx.MultiDiGraph.__init__(self, incoming_graph_data=incoming_graph_data, **attr)
nx.MultiDiGraph.__init__(self, **kwargs)
# identify node types
self.identify_node_types()
# declare variables for the nodes without directed arc limitations
self.network_type = network_type
self.nodes_w_in_dir_arc_limitations = dict()
self.nodes_w_out_dir_arc_limitations = dict()
# *************************************************************************
# *************************************************************************
def _set_up_node(self, node_key, max_number_in_arcs: int = None, max_number_out_arcs: int = None):
if self.should_be_tree_network():
# nodes have to be part of a tree: one incoming arc per node at most
self.nodes_w_in_dir_arc_limitations[node_key] = 1
elif self.should_be_reverse_tree_network():
# nodes have to be part of a reverse tree: one outgoing arc per node at most
self.nodes_w_out_dir_arc_limitations[node_key] = 1
else:
# nodes have no peculiar restrictions or they are defined 1 by 1
if type(max_number_in_arcs) != type(None):
self.nodes_w_in_dir_arc_limitations[node_key] = max_number_in_arcs
if type(max_number_out_arcs) != type(None):
self.nodes_w_out_dir_arc_limitations[node_key] = max_number_out_arcs
self.nodes_wo_in_dir_arc_limitations = []
# *************************************************************************
# *************************************************************************
def should_be_tree_network(self) -> bool:
return self.network_type == self.NET_TYPE_TREE
self.nodes_wo_out_dir_arc_limitations = []
# *************************************************************************
# *************************************************************************
def should_be_reverse_tree_network(self) -> bool:
return self.network_type == self.NET_TYPE_REV_TREE
# *************************************************************************
# *************************************************************************
......@@ -661,23 +702,25 @@ class Network(nx.MultiDiGraph):
# add a new supply/demand node
def add_source_sink_node(self, node_key, base_flow: dict):
def add_source_sink_node(self, node_key, base_flow: dict, **kwargs):
node_dict = {
self.KEY_NODE_TYPE: self.KEY_NODE_TYPE_SOURCE_SINK,
self.KEY_NODE_BASE_FLOW: base_flow,
}
self.add_node(node_key, **node_dict)
self._set_up_node(node_key, **kwargs)
# *************************************************************************
# *************************************************************************
# add a new waypoint node
def add_waypoint_node(self, node_key):
def add_waypoint_node(self, node_key, **kwargs):
node_dict = {self.KEY_NODE_TYPE: self.KEY_NODE_TYPE_WAY}
self.add_node(node_key, **node_dict)
self._set_up_node(node_key, **kwargs)
# *************************************************************************
# *************************************************************************
......
......@@ -63,6 +63,15 @@ class InfrastructurePlanningProblem(EnergySystem):
STATIC_LOSS_MODE_US,
STATIC_LOSS_MODE_DS,
)
NODE_PRICE_LAMBDA = 1
NODE_PRICE_DELTA = 2
NODE_PRICE_OTHER = 3
NODE_PRICES = (
NODE_PRICE_LAMBDA,
NODE_PRICE_DELTA,
NODE_PRICE_OTHER
)
# *************************************************************************
# *************************************************************************
......@@ -80,6 +89,7 @@ class InfrastructurePlanningProblem(EnergySystem):
converters: dict = None,
prepare_model: bool = True,
validate_inputs: bool = True,
node_price_model = NODE_PRICE_DELTA
): # TODO: switch to False when everything is more mature
# *********************************************************************
......@@ -1830,22 +1840,14 @@ class InfrastructurePlanningProblem(EnergySystem):
}
set_L_max_in_g = {
g: tuple(
l
for l in self.networks[g].nodes
if l not in self.networks[g].nodes_wo_in_dir_arc_limitations
)
g: tuple(self.networks[g].nodes_w_in_dir_arc_limitations.keys())
for g in self.networks.keys()
}
}
# set_L_max_out_g = {
# g: tuple(
# l
# for l in self.networks[g].nodes
# if l not in self.networks[g].nodes_wo_out_dir_arc_limitations
# )
# for g in self.networks.keys()
# }
set_L_max_out_g = {
g: tuple(self.networks[g].nodes_w_out_dir_arc_limitations.keys())
for g in self.networks.keys()
}
set_GL = tuple((g, l) for g in set_G for l in set_L[g])
......@@ -1897,7 +1899,7 @@ class InfrastructurePlanningProblem(EnergySystem):
for (g, l) in set_GL_exp_imp
for (q, p, k) in set_QPK
}
# set of GLKS tuples
set_GLQPKS = tuple(
(*glqpk, s) for glqpk, s_tuple in set_S.items() for s in s_tuple
......@@ -2547,6 +2549,17 @@ class InfrastructurePlanningProblem(EnergySystem):
for s in set_S[(g, l, q, p, k)]
}
# price function convexity
param_price_function_is_convex = {
(g, l, q, p, k): (
self.networks[g].nodes[l][Network.KEY_NODE_PRICES][(q, p, k)].price_monotonically_increasing_with_volume()
if l in set_L_imp[g] else
self.networks[g].nodes[l][Network.KEY_NODE_PRICES][(q, p, k)].price_monotonically_decreasing_with_volume()
)
for (g, l, q, p, k) in set_S
}
# maximum resource volume per segment (infinity is the default)
param_v_max_glqpks = {
......@@ -3317,7 +3330,7 @@ class InfrastructurePlanningProblem(EnergySystem):
"set_L_imp": set_L_imp,
"set_L_exp": set_L_exp,
"set_L_max_in_g": set_L_max_in_g,
#'set_L_max_out_g': set_L_max_out_g,
'set_L_max_out_g': set_L_max_out_g,
"set_GL": set_GL,
"set_GL_exp": set_GL_exp,
"set_GL_imp": set_GL_imp,
......@@ -3449,6 +3462,7 @@ class InfrastructurePlanningProblem(EnergySystem):
"param_c_df_qp": param_c_df_qp,
"param_c_time_qpk": param_c_time_qpk,
"param_p_glqpks": param_p_glqpks,
"param_price_function_is_convex": param_price_function_is_convex,
"param_v_max_glqpks": param_v_max_glqpks,
# *****************************************************************
# converters
......
......@@ -12,7 +12,11 @@ from numbers import Real
class ResourcePrice:
"""A class for piece-wise linear resource prices in network problems."""
def __init__(self, prices: list or int, volumes: list = None):
def __init__(
self,
prices: list or int,
volumes: list = None
):
# how do we keep the size of the object as small as possible
# if the tariff is time-invariant, how can information be stored?
# - a flag
......@@ -206,30 +210,10 @@ class ResourcePrice:
# *************************************************************************
# *************************************************************************
def is_equivalent(self, other) -> bool:
"""Returns True if a given ResourcePrice is equivalent to another."""
# resources are equivalent if:
# 1) the prices are the same
# 2) the volume limits are the same
# the number of segments has to match
if self.number_segments() != other.number_segments():
return False # the number of segments do not match
# check the prices
if self.prices != other.prices:
return False # prices are different
# prices match, check the volumes
if self.volumes != other.volumes:
return False # volumes are different
return True # all conditions have been met
# *************************************************************************
# *************************************************************************
def __eq__(self, o) -> bool:
"""Returns True if a given ResourcePrice is equivalent to another."""
return self.is_equivalent(o)
return hash(self) == hash(o)
def __hash__(self):
return hash(
......@@ -260,9 +244,7 @@ def are_prices_time_invariant(resource_prices_qpk: dict) -> bool:
# check if the tariffs per period and assessment are equivalent
for qp, qpk_list in qpk_qp.items():
for i in range(len(qpk_list) - 1):
if not resource_prices_qpk[qpk_list[0]].is_equivalent(
resource_prices_qpk[qpk_list[i + 1]]
):
if not resource_prices_qpk[qpk_list[0]] == resource_prices_qpk[qpk_list[i + 1]]:
return False
# all tariffs are equivalent per period and assessment: they are invariant
return True
......
......@@ -99,7 +99,7 @@ def statistics(ipp: InfrastructurePlanningProblem,
imports_qpk = {
qpk: pyo.value(
sum(
ipp.instance.var_if_glqpks[(g,l_imp,*qpk, s)]
ipp.instance.var_trans_flows_glqpks[(g,l_imp,*qpk, s)]
for g, l_imp in import_node_keys
# for g in ipp.networks
# for l_imp in ipp.networks[g].import_nodes
......@@ -114,7 +114,7 @@ def statistics(ipp: InfrastructurePlanningProblem,
exports_qpk = {
qpk: pyo.value(
sum(
ipp.instance.var_ef_glqpks[(g,l_exp,*qpk, s)]
ipp.instance.var_trans_flows_glqpks[(g,l_exp,*qpk, s)]
for g, l_exp in export_node_keys
# for g in ipp.networks
# for l_exp in ipp.networks[g].export_nodes
......
......@@ -2170,12 +2170,10 @@ class TestNetwork:
# *************************************************************************
def test_tree_topology(self):
# create a network object with a tree topology
tree_network = binomial_tree(3, create_using=MultiDiGraph)
network = Network(tree_network)
network = Network(incoming_graph_data=tree_network)
for edge_key in network.edges(keys=True):
arc = ArcsWithoutLosses(
name=str(edge_key),
......@@ -2184,44 +2182,36 @@ class TestNetwork:
specific_capacity_cost=0,
capacity_is_instantaneous=False,
)
network.add_edge(*edge_key, **{Network.KEY_ARC_TECH: arc})
# assert that it does not have a tree topology
assert not network.has_tree_topology()
# select all the nodes
for edge_key in network.edges(keys=True):
network.edges[edge_key][Network.KEY_ARC_TECH].options_selected[0] = True
# assert that it has a tree topology
assert network.has_tree_topology()
# *************************************************************************
# *************************************************************************
def test_pseudo_unique_key_generation(self):
# create network
network = Network()
# add node A
network.add_waypoint_node(node_key="A")
# add node B
network.add_waypoint_node(node_key="B")
# identify nodes
network.identify_node_types()
# add arcs
key_list = [
"3e225573-4e78-48c8-bb08-efbeeb795c22",
"f6d30428-15d1-41e9-a952-0742eaaa5a31",
......
# imports
# standard
import math
# local
# import numpy as np
# import networkx as nx
import pyomo.environ as pyo
# import src.topupopt.problems.esipp.utils as utils
from src.topupopt.data.misc.utils import generate_pseudo_unique_key
from src.topupopt.problems.esipp.problem import InfrastructurePlanningProblem
from src.topupopt.problems.esipp.network import Arcs, Network
from src.topupopt.problems.esipp.resource import ResourcePrice
# from src.topupopt.problems.esipp.utils import compute_cost_volume_metrics
from src.topupopt.problems.esipp.utils import statistics
from src.topupopt.problems.esipp.time import EconomicTimeFrame
# from src.topupopt.problems.esipp.converter import Converter
# *****************************************************************************
# *****************************************************************************
class TestESIPPProblem:
solver = 'glpk'
# solver = 'scip'
# solver = 'cbc'
def build_solve_ipp(
self,
solver: str = None,
solver_options: dict = None,
use_sos_arcs: bool = False,
arc_sos_weight_key: str = (InfrastructurePlanningProblem.SOS1_ARC_WEIGHTS_NONE),
arc_use_real_variables_if_possible: bool = False,
use_sos_sense: bool = False,
sense_sos_weight_key: int = (
InfrastructurePlanningProblem.SOS1_SENSE_WEIGHT_NOMINAL_HIGHER
),
sense_use_real_variables_if_possible: bool = False,
sense_use_arc_interfaces: bool = False,
perform_analysis: bool = False,
plot_results: bool = False,
print_solver_output: bool = False,
time_frame: EconomicTimeFrame = None,
networks: dict = None,
converters: dict = None,
static_losses_mode=None,
mandatory_arcs: list = None,
max_number_parallel_arcs: dict = None,
arc_groups_dict: dict = None,
init_aux_sets: bool = False,
# discount_rates: dict = None,
assessment_weights: dict = None,
simplify_problem: bool = False,
):
if type(solver) == type(None):
solver = self.solver
if type(assessment_weights) != dict:
assessment_weights = {} # default
if type(converters) != dict:
converters = {}
# time weights
# relative weight of time period
# one interval twice as long as the average is worth twice
# one interval half as long as the average is worth half
# time_weights = [
# [time_period_duration/average_time_interval_duration
# for time_period_duration in intraperiod_time_interval_duration]
# for p in range(number_periods)]
time_weights = None # nothing yet
normalised_time_interval_duration = None # nothing yet
# create problem object
ipp = InfrastructurePlanningProblem(
# discount_rates=discount_rates,
time_frame=time_frame,
# reporting_periods=time_frame.reporting_periods,
# time_intervals=time_frame.time_interval_durations,
time_weights=time_weights,
normalised_time_interval_duration=normalised_time_interval_duration,
assessment_weights=assessment_weights,
)
# add networks and systems
for netkey, net in networks.items():
ipp.add_network(network_key=netkey, network=net)
# add converters
for cvtkey, cvt in converters.items():
ipp.add_converter(converter_key=cvtkey, converter=cvt)
# define arcs as mandatory
if type(mandatory_arcs) == list:
for full_arc_key in mandatory_arcs:
ipp.make_arc_mandatory(full_arc_key[0], full_arc_key[1:])
# if make_all_arcs_mandatory:
# for network_key in ipp.networks:
# for arc_key in ipp.networks[network_key].edges(keys=True):
# # preexisting arcs are no good
# if ipp.networks[network_key].edges[arc_key][
# Network.KEY_ARC_TECH].has_been_selected():
# continue
# ipp.make_arc_mandatory(network_key, arc_key)
# set up the use of sos for arc selection
if use_sos_arcs:
for network_key in ipp.networks:
for arc_key in ipp.networks[network_key].edges(keys=True):
if (
ipp.networks[network_key]
.edges[arc_key][Network.KEY_ARC_TECH]
.has_been_selected()
):
continue
ipp.use_sos1_for_arc_selection(
network_key,
arc_key,
use_real_variables_if_possible=(
arc_use_real_variables_if_possible
),
sos1_weight_method=arc_sos_weight_key,
)
# set up the use of sos for flow sense determination
if use_sos_sense:
for network_key in ipp.networks:
for arc_key in ipp.networks[network_key].edges(keys=True):
if not ipp.networks[network_key].edges[arc_key][
Network.KEY_ARC_UND
]:
continue
ipp.use_sos1_for_flow_senses(
network_key,
arc_key,
use_real_variables_if_possible=(
sense_use_real_variables_if_possible
),
use_interface_variables=sense_use_arc_interfaces,
sos1_weight_method=sense_sos_weight_key,
)
elif sense_use_arc_interfaces: # set up the use of arc interfaces w/o sos1
for network_key in ipp.networks:
for arc_key in ipp.networks[network_key].edges(keys=True):
if (
ipp.networks[network_key]
.edges[arc_key][Network.KEY_ARC_TECH]
.has_been_selected()
):
continue
ipp.use_interface_variables_for_arc_selection(network_key, arc_key)
# static losses
if static_losses_mode == ipp.STATIC_LOSS_MODE_ARR:
ipp.place_static_losses_arrival_node()
elif static_losses_mode == ipp.STATIC_LOSS_MODE_DEP:
ipp.place_static_losses_departure_node()
elif static_losses_mode == ipp.STATIC_LOSS_MODE_US:
ipp.place_static_losses_upstream()
elif static_losses_mode == ipp.STATIC_LOSS_MODE_DS:
ipp.place_static_losses_downstream()
else:
raise ValueError("Unknown static loss modelling mode.")
# *********************************************************************
# groups
if type(arc_groups_dict) != type(None):
for key in arc_groups_dict:
ipp.create_arc_group(arc_groups_dict[key])
# *********************************************************************
# maximum number of parallel arcs
for key in max_number_parallel_arcs:
ipp.set_maximum_number_parallel_arcs(
network_key=key[0],
node_a=key[1],
node_b=key[2],
limit=max_number_parallel_arcs[key],
)
# *********************************************************************
if simplify_problem:
ipp.simplify_peak_total_assessments()
# *********************************************************************
# instantiate (disable the default case v-a-v fixed losses)
# ipp.instantiate(place_fixed_losses_upstream_if_possible=False)
ipp.instantiate(initialise_ancillary_sets=init_aux_sets)
# ipp.instance.pprint()
# optimise
ipp.optimise(
solver_name=solver,
solver_options=solver_options,
output_options={},
print_solver_output=print_solver_output,
)
# ipp.instance.pprint()
# return the problem object
return ipp
# *********************************************************************
# *********************************************************************
# *************************************************************************
# *************************************************************************
def test_problem_increasing_imp_prices(self):
# assessment
q = 0
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,)},
time_interval_durations={q: (1,)},
)
# 2 nodes: one import, one regular
mynet = Network()
# import node
node_IMP = 'I'
mynet.add_import_node(
node_key=node_IMP,
prices={
qpk: ResourcePrice(prices=[1.0, 2.0], volumes=[0.5, None])
for qpk in tf.qpk()
},
)
# other nodes
node_A = 'A'
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0})
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False
)
assert not ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 10
assert ipp.results["Problem"][0]["Number of variables"] == 11
assert ipp.results["Problem"][0]["Number of nonzeros"] == 20
# *********************************************************************
# *********************************************************************
# validation
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# the flows should be 1.0, 0.0 and 2.0
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]),
2.0,
abs_tol=1e-6,
)
# arc amplitude should be two
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]),
2.0,
abs_tol=0.01,
)
# capex should be four
assert math.isclose(pyo.value(ipp.instance.var_capex), 4.0, abs_tol=1e-3)
# sdncf should be -3.5
assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -3.5, abs_tol=1e-3)
# the objective function should be -7.5
assert math.isclose(pyo.value(ipp.instance.obj_f), -7.5, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
def test_problem_decreasing_imp_prices(self):
# assessment
q = 0
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,)},
time_interval_durations={q: (1,)},
)
# 2 nodes: one import, one regular
mynet = Network()
# import node
node_IMP = 'I'
mynet.add_import_node(
node_key=node_IMP,
prices={
qpk: ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, 3.0])
for qpk in tf.qpk()
},
)
# other nodes
node_A = 'A'
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0})
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False
)
assert not ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 14 # 10 prior to nonconvex block
assert ipp.results["Problem"][0]["Number of variables"] == 13 # 11 prior to nonconvex block
assert ipp.results["Problem"][0]["Number of nonzeros"] == 28 # 20 prior to nonconvex block
# *********************************************************************
# *********************************************************************
# validation
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# the flows should be 1.0, 0.0 and 2.0
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]),
2.0,
abs_tol=1e-6,
)
# arc amplitude should be two
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]),
2.0,
abs_tol=0.01,
)
# capex should be four
assert math.isclose(pyo.value(ipp.instance.var_capex), 4.0, abs_tol=1e-3)
# sdncf should be -2.5
assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -2.5, abs_tol=1e-3)
# the objective function should be -7.5
assert math.isclose(pyo.value(ipp.instance.obj_f), -6.5, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
def test_problem_decreasing_imp_prices_infinite_capacity(self):
# assessment
q = 0
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,)},
time_interval_durations={q: (1,)},
)
# 2 nodes: one import, one regular
mynet = Network()
# import node
node_IMP = 'I'
mynet.add_import_node(
node_key=node_IMP,
prices={
qpk: ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, None])
for qpk in tf.qpk()
},
)
# other nodes
node_A = 'A'
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0})
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA)
# identify node types
mynet.identify_node_types()
# trigger the error
error_raised = False
try:
# no sos, regular time intervals
self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False,
)
except Exception:
error_raised = True
assert error_raised
# *************************************************************************
# *************************************************************************
def test_problem_decreasing_exp_prices(self):
# assessment
q = 0
# time
number_intervals = 1
# periods
number_periods = 1
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,)},
time_interval_durations={q: (1,)},
)
# 2 nodes: one export, one regular
mynet = Network()
# import node
node_EXP = generate_pseudo_unique_key(mynet.nodes())
mynet.add_export_node(
node_key=node_EXP,
prices={
(q, p, k): ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, None])
for p in range(number_periods)
for k in range(number_intervals)
},
)
# other nodes
node_A = 'A'
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): -1.0})
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_IA)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False,
)
assert not ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 10
assert ipp.results["Problem"][0]["Number of variables"] == 11
assert ipp.results["Problem"][0]["Number of nonzeros"] == 20
# *********************************************************************
# *********************************************************************
# validation
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_A, node_EXP, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# the flows should be 1.0, 0.0 and 2.0
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 0)]),
1.0,
abs_tol=1e-6,
)
# arc amplitude should be two
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_A, node_EXP, 0)]),
1.0,
abs_tol=0.01,
)
# capex should be four
assert math.isclose(pyo.value(ipp.instance.var_capex), 3.0, abs_tol=1e-3)
# sdncf should be 1.0
assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), 1.0, abs_tol=1e-3)
# the objective function should be -7.5
assert math.isclose(pyo.value(ipp.instance.obj_f), -2.0, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
def test_problem_increasing_exp_prices(self):
# assessment
q = 0
# time
number_intervals = 1
# periods
number_periods = 1
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,)},
time_interval_durations={q: (1,)},
)
# 2 nodes: one export, one regular
mynet = Network()
# import node
node_EXP = generate_pseudo_unique_key(mynet.nodes())
mynet.add_export_node(
node_key=node_EXP,
prices={
(q, p, k): ResourcePrice(prices=[1.0, 2.0], volumes=[0.25, 3.0])
for p in range(number_periods)
for k in range(number_intervals)
},
)
# other nodes
node_A = 'A'
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): -1.0})
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_IA)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False,
)
assert not ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 14 # 10 before nonconvex block
assert ipp.results["Problem"][0]["Number of variables"] == 13 # 11 before nonconvex block
assert ipp.results["Problem"][0]["Number of nonzeros"] == 28 # 20 before nonconvex block
# *********************************************************************
# *********************************************************************
# validation
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_A, node_EXP, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# the flows should be 1.0, 0.0 and 2.0
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 0)]),
1.0,
abs_tol=1e-6,
)
# arc amplitude should be two
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_A, node_EXP, 0)]),
1.0,
abs_tol=0.01,
)
# capex should be four
assert math.isclose(pyo.value(ipp.instance.var_capex), 3.0, abs_tol=1e-3)
# sdncf should be 0.75
assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), 0.75, abs_tol=1e-3)
# the objective function should be -2.25
assert math.isclose(pyo.value(ipp.instance.obj_f), -2.25, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
def test_problem_increasing_exp_prices_infinite_capacity(self):
# assessment
q = 0
# time
number_intervals = 1
# periods
number_periods = 1
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,)},
time_interval_durations={q: (1,)},
)
# 2 nodes: one export, one regular
mynet = Network()
# import node
node_EXP = generate_pseudo_unique_key(mynet.nodes())
mynet.add_export_node(
node_key=node_EXP,
prices={
(q, p, k): ResourcePrice(prices=[1.0, 2.0], volumes=[0.25, None])
for p in range(number_periods)
for k in range(number_intervals)
},
)
# other nodes
node_A = 'A'
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): -1.0})
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_IA)
# identify node types
mynet.identify_node_types()
# trigger the error
error_raised = False
try:
# no sos, regular time intervals
self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False,
)
except Exception:
error_raised = True
assert error_raised
# *************************************************************************
# *************************************************************************
def test_problem_increasing_imp_decreasing_exp_prices(self):
# scenario
q = 0
# time
number_intervals = 2
# periods
number_periods = 1
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,1)},
time_interval_durations={q: (1,1)},
)
# 3 nodes: one import, one export, one regular
mynet = Network()
# import node
node_IMP = 'I'
mynet.add_import_node(
node_key=node_IMP,
prices={
(q, p, k): ResourcePrice(prices=[1.0, 2.0], volumes=[0.5, None])
for p in range(number_periods)
for k in range(number_intervals)
},
)
# export node
node_EXP = generate_pseudo_unique_key(mynet.nodes())
mynet.add_export_node(
node_key=node_EXP,
prices={
(q, p, k): ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, None])
for p in range(number_periods)
for k in range(number_intervals)
},
)
# other nodes
node_A = 'A'
mynet.add_source_sink_node(
node_key=node_A, base_flow={(q, 0): 1.0, (q, 1): -1.0}
)
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5, (q, 1): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA)
# arc AE
arc_tech_AE = Arcs(
name="any",
efficiency={(q, 0): 0.5, (q, 1): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_AE)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False,
# discount_rates={0: (0.0,)},
)
assert not ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 23
assert ipp.results["Problem"][0]["Number of variables"] == 26
assert ipp.results["Problem"][0]["Number of nonzeros"] == 57
# *********************************************************************
# *********************************************************************
# validation
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_A, node_EXP, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# interval 0: import only
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]),
2.0,
abs_tol=1e-6,
)
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 0)]),
0.0,
abs_tol=1e-6,
)
# interval 1: export only
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 1)]),
0.0,
abs_tol=1e-6,
)
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 1)]),
1.0,
abs_tol=1e-6,
)
# IA amplitude
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]),
2.0,
abs_tol=0.01,
)
# AE amplitude
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_A, node_EXP, 0)]),
1.0,
abs_tol=0.01,
)
# capex should be 7.0: 4+3
assert math.isclose(pyo.value(ipp.instance.var_capex), 7.0, abs_tol=1e-3)
# sdncf should be -2.5: -3.5+1.0
assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -2.5, abs_tol=1e-3)
# the objective function should be -9.5: -7.5-2.5
assert math.isclose(pyo.value(ipp.instance.obj_f), -9.5, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
def test_direct_imp_exp_network_higher_exp_prices(self):
# time frame
q = 0
tf = EconomicTimeFrame(
discount_rate=3.5/100,
reporting_periods={q: (0,1)},
reporting_period_durations={q: (365 * 24 * 3600,365 * 24 * 3600)},
time_intervals={q: (0,1)},
time_interval_durations={q: (1,1)},
)
# 4 nodes: one import, one export, two supply/demand nodes
mynet = Network()
# import node
imp_node_key = 'thatimpnode'
imp_prices = {
qpk: ResourcePrice(
prices=0.5,
volumes=None,
)
for qpk in tf.qpk()
}
mynet.add_import_node(
node_key=imp_node_key,
prices=imp_prices
)
# export node
exp_node_key = 'thatexpnode'
exp_prices = {
qpk: ResourcePrice(
prices=1.5,
volumes=None,
)
for qpk in tf.qpk()
}
mynet.add_export_node(
node_key=exp_node_key,
prices=exp_prices,
)
# add arc without fixed losses from import node to export
arc_tech_IE = Arcs(
name="IE",
# efficiency=[1, 1, 1, 1],
efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1},
efficiency_reverse=None,
static_loss=None,
validate=False,
capacity=[0.5, 1.0, 2.0],
minimum_cost=[5, 5.1, 5.2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
)
mynet.add_directed_arc(
node_key_a=imp_node_key, node_key_b=exp_node_key, arcs=arc_tech_IE
)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
networks={"mynet": mynet},
time_frame=tf,
static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP,
mandatory_arcs=[],
max_number_parallel_arcs={}
)
# export prices are higher: it makes sense to install the arc since the
# revenue (@ max. cap.) exceeds the cost of installing the arc
assert (
True
in ipp.networks["mynet"]
.edges[(imp_node_key, exp_node_key, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# overview
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
# there should be no imports
abs_tol = 1e-6
abs_tol = 1e-3
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert imports_qp > 0.0 - abs_tol
abs_tol = 1e-3
import_costs_qp = sum(import_costs_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert import_costs_qp > 0.0 - abs_tol
# there should be no exports
abs_tol = 1e-2
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
export_revenue_qp = sum(export_revenue_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert exports_qp > 0.0 - abs_tol
assert export_revenue_qp > 0.0 - abs_tol
# the revenue should exceed the costs
abs_tol = 1e-2
assert (
export_revenue_qp > import_costs_qp - abs_tol
)
# the capex should be positive
abs_tol = 1e-6
assert pyo.value(ipp.instance.var_capex) > 0 - abs_tol
# *************************************************************************
# *************************************************************************
# *****************************************************************************
# *****************************************************************************
\ No newline at end of file
......@@ -597,389 +597,6 @@ class TestESIPPProblem:
# *************************************************************************
# *************************************************************************
def test_problem_increasing_imp_prices(self):
# assessment
q = 0
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,)},
time_interval_durations={q: (1,)},
)
# 2 nodes: one import, one regular
mynet = Network()
# import node
node_IMP = generate_pseudo_unique_key(mynet.nodes())
mynet.add_import_node(
node_key=node_IMP,
prices={
# (q, p, k): ResourcePrice(prices=[1.0, 2.0], volumes=[0.5, None])
# for p in range(number_periods)
# for k in range(number_intervals)
qpk: ResourcePrice(prices=[1.0, 2.0], volumes=[0.5, None])
for qpk in tf.qpk()
},
)
# other nodes
node_A = generate_pseudo_unique_key(mynet.nodes())
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0})
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False
)
assert not ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 10
assert ipp.results["Problem"][0]["Number of variables"] == 11
assert ipp.results["Problem"][0]["Number of nonzeros"] == 20
# *********************************************************************
# *********************************************************************
# validation
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# the flows should be 1.0, 0.0 and 2.0
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]),
2.0,
abs_tol=1e-6,
)
# arc amplitude should be two
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]),
2.0,
abs_tol=0.01,
)
# capex should be four
assert math.isclose(pyo.value(ipp.instance.var_capex), 4.0, abs_tol=1e-3)
# sdncf should be -3.5
assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -3.5, abs_tol=1e-3)
# the objective function should be -7.5
assert math.isclose(pyo.value(ipp.instance.obj_f), -7.5, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
def test_problem_decreasing_exp_prices(self):
# assessment
q = 0
# time
number_intervals = 1
# periods
number_periods = 1
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,)},
time_interval_durations={q: (1,)},
)
# 2 nodes: one export, one regular
mynet = Network()
# import node
node_EXP = generate_pseudo_unique_key(mynet.nodes())
mynet.add_export_node(
node_key=node_EXP,
prices={
(q, p, k): ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, None])
for p in range(number_periods)
for k in range(number_intervals)
},
)
# other nodes
node_A = generate_pseudo_unique_key(mynet.nodes())
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): -1.0})
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_IA)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False,
)
assert not ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 10
assert ipp.results["Problem"][0]["Number of variables"] == 11
assert ipp.results["Problem"][0]["Number of nonzeros"] == 20
# *********************************************************************
# *********************************************************************
# validation
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_A, node_EXP, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# the flows should be 1.0, 0.0 and 2.0
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 0)]),
1.0,
abs_tol=1e-6,
)
# arc amplitude should be two
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_A, node_EXP, 0)]),
1.0,
abs_tol=0.01,
)
# capex should be four
assert math.isclose(pyo.value(ipp.instance.var_capex), 3.0, abs_tol=1e-3)
# sdncf should be 1.0
assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), 1.0, abs_tol=1e-3)
# the objective function should be -7.5
assert math.isclose(pyo.value(ipp.instance.obj_f), -2.0, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
def test_problem_increasing_imp_decreasing_exp_prices(self):
# scenario
q = 0
# time
number_intervals = 2
# periods
number_periods = 1
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,1)},
time_interval_durations={q: (1,1)},
)
# 3 nodes: one import, one export, one regular
mynet = Network()
# import node
node_IMP = generate_pseudo_unique_key(mynet.nodes())
mynet.add_import_node(
node_key=node_IMP,
prices={
(q, p, k): ResourcePrice(prices=[1.0, 2.0], volumes=[0.5, None])
for p in range(number_periods)
for k in range(number_intervals)
},
)
# export node
node_EXP = generate_pseudo_unique_key(mynet.nodes())
mynet.add_export_node(
node_key=node_EXP,
prices={
(q, p, k): ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, None])
for p in range(number_periods)
for k in range(number_intervals)
},
)
# other nodes
node_A = generate_pseudo_unique_key(mynet.nodes())
mynet.add_source_sink_node(
node_key=node_A, base_flow={(q, 0): 1.0, (q, 1): -1.0}
)
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5, (q, 1): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA)
# arc AE
arc_tech_AE = Arcs(
name="any",
efficiency={(q, 0): 0.5, (q, 1): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_AE)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False,
# discount_rates={0: (0.0,)},
)
assert not ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 23
assert ipp.results["Problem"][0]["Number of variables"] == 26
assert ipp.results["Problem"][0]["Number of nonzeros"] == 57
# *********************************************************************
# *********************************************************************
# validation
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_A, node_EXP, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# interval 0: import only
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]),
2.0,
abs_tol=1e-6,
)
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 0)]),
0.0,
abs_tol=1e-6,
)
# interval 1: export only
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 1)]),
0.0,
abs_tol=1e-6,
)
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 1)]),
1.0,
abs_tol=1e-6,
)
# IA amplitude
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]),
2.0,
abs_tol=0.01,
)
# AE amplitude
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_A, node_EXP, 0)]),
1.0,
abs_tol=0.01,
)
# capex should be 7.0: 4+3
assert math.isclose(pyo.value(ipp.instance.var_capex), 7.0, abs_tol=1e-3)
# sdncf should be -2.5: -3.5+1.0
assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -2.5, abs_tol=1e-3)
# the objective function should be -9.5: -7.5-2.5
assert math.isclose(pyo.value(ipp.instance.obj_f), -9.5, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
def test_problem_two_scenarios(self):
......@@ -1781,7 +1398,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -1791,7 +1408,7 @@ class TestESIPPProblem:
)
# export node
exp_node_key = generate_pseudo_unique_key(mynet.nodes())
exp_node_key = 'thatexpnode'
mynet.add_export_node(
node_key=exp_node_key,
prices={
......@@ -1951,7 +1568,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -1961,7 +1578,7 @@ class TestESIPPProblem:
)
# export node
exp_node_key = generate_pseudo_unique_key(mynet.nodes())
exp_node_key = 'thatexpnode'
mynet.add_export_node(
node_key=exp_node_key,
prices={
......@@ -2119,7 +1736,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -2129,7 +1746,7 @@ class TestESIPPProblem:
)
# export node
exp_node_key = generate_pseudo_unique_key(mynet.nodes())
exp_node_key = 'thatexpnode'
mynet.add_export_node(
node_key=exp_node_key,
prices={
......@@ -2259,7 +1876,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -2269,7 +1886,7 @@ class TestESIPPProblem:
)
# export node
exp_node_key = generate_pseudo_unique_key(mynet.nodes())
exp_node_key = 'thatexpnode'
mynet.add_export_node(
node_key=exp_node_key,
prices={
......@@ -2349,7 +1966,7 @@ class TestESIPPProblem:
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
solver_options={},solver='scip',
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
......@@ -3059,7 +2676,7 @@ class TestESIPPProblem:
mynet = Network()
# import nodes
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -3375,7 +2992,7 @@ class TestESIPPProblem:
mynet = Network()
# import nodes
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -3638,7 +3255,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
imp_prices = {
qpk: ResourcePrice(
prices=1.5,
......@@ -3652,7 +3269,7 @@ class TestESIPPProblem:
)
# export node
exp_node_key = generate_pseudo_unique_key(mynet.nodes())
exp_node_key = 'thatexpnode'
exp_prices = {
qpk: ResourcePrice(
prices=0.5,
......@@ -3745,141 +3362,6 @@ class TestESIPPProblem:
# there should be no capex
abs_tol = 1e-6
assert math.isclose(pyo.value(ipp.instance.var_capex), 0.0, abs_tol=abs_tol)
# *************************************************************************
# *************************************************************************
def test_direct_imp_exp_network_higher_exp_prices(self):
# time frame
q = 0
tf = EconomicTimeFrame(
discount_rate=3.5/100,
reporting_periods={q: (0,1)},
reporting_period_durations={q: (365 * 24 * 3600,365 * 24 * 3600)},
time_intervals={q: (0,1)},
time_interval_durations={q: (1,1)},
)
# 4 nodes: one import, one export, two supply/demand nodes
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_prices = {
qpk: ResourcePrice(
prices=0.5,
volumes=None,
)
for qpk in tf.qpk()
}
mynet.add_import_node(
node_key=imp_node_key,
prices=imp_prices
)
# export node
exp_node_key = generate_pseudo_unique_key(mynet.nodes())
exp_prices = {
qpk: ResourcePrice(
prices=1.5,
volumes=None,
)
for qpk in tf.qpk()
}
mynet.add_export_node(
node_key=exp_node_key,
prices=exp_prices,
)
# add arc without fixed losses from import node to export
arc_tech_IE = Arcs(
name="IE",
# efficiency=[1, 1, 1, 1],
efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1},
efficiency_reverse=None,
static_loss=None,
validate=False,
capacity=[0.5, 1.0, 2.0],
minimum_cost=[5, 5.1, 5.2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
)
mynet.add_directed_arc(
node_key_a=imp_node_key, node_key_b=exp_node_key, arcs=arc_tech_IE
)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
networks={"mynet": mynet},
time_frame=tf,
static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP,
mandatory_arcs=[],
max_number_parallel_arcs={}
)
# export prices are higher: it makes sense to install the arc since the
# revenue (@ max. cap.) exceeds the cost of installing the arc
assert (
True
in ipp.networks["mynet"]
.edges[(imp_node_key, exp_node_key, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# overview
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
# there should be no imports
abs_tol = 1e-6
abs_tol = 1e-3
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert imports_qp > 0.0 - abs_tol
abs_tol = 1e-3
import_costs_qp = sum(import_costs_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert import_costs_qp > 0.0 - abs_tol
# there should be no exports
abs_tol = 1e-2
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
export_revenue_qp = sum(export_revenue_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert exports_qp > 0.0 - abs_tol
assert export_revenue_qp > 0.0 - abs_tol
# the revenue should exceed the costs
abs_tol = 1e-2
assert (
export_revenue_qp > import_costs_qp - abs_tol
)
# the capex should be positive
abs_tol = 1e-6
assert pyo.value(ipp.instance.var_capex) > 0 - abs_tol
# *************************************************************************
# *************************************************************************
......@@ -5938,7 +5420,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -5996,7 +5478,7 @@ class TestESIPPProblem:
solver_options={},
perform_analysis=False,
plot_results=False,
print_solver_output=True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=static_losses_mode,
......@@ -6087,7 +5569,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -6147,7 +5629,7 @@ class TestESIPPProblem:
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=static_losses_mode,
......@@ -6240,7 +5722,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -6523,7 +6005,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -6800,7 +6282,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -7084,7 +6566,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -7705,11 +7187,10 @@ class TestESIPPProblem:
number_periods = 2
# 4 nodes: one import, one export, two supply/demand nodes
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -7720,7 +7201,7 @@ class TestESIPPProblem:
)
# other nodes
node_A = generate_pseudo_unique_key(mynet.nodes())
node_A = 'A'
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0})
# add arcs
......@@ -7840,11 +7321,10 @@ class TestESIPPProblem:
number_periods = 2
# 4 nodes: one import, one export, two supply/demand nodes
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -7855,7 +7335,7 @@ class TestESIPPProblem:
)
# other nodes
node_A = generate_pseudo_unique_key(mynet.nodes())
node_A = 'A'
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0})
# add arcs
......@@ -8129,7 +7609,7 @@ class TestESIPPProblem:
)
# 2 nodes: one import, one regular
mynet = Network()
mynet = Network(network_type=Network.NET_TYPE_TREE)
# import node
node_IMP = "thatimpnode"
......@@ -8223,11 +7703,8 @@ class TestESIPPProblem:
max_number_parallel_arcs={},
simplify_problem=True,
)
print('wowowowow')
ipp.instance.constr_max_incoming_directed_arcs.pprint()
assert ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 61
assert ipp.results["Problem"][0]["Number of constraints"] == 61
assert ipp.results["Problem"][0]["Number of variables"] == 53
assert ipp.results["Problem"][0]["Number of nonzeros"] == 143
......@@ -8278,8 +7755,8 @@ class TestESIPPProblem:
)
# 2 nodes: one import, one regular
mynet = Network()
mynet = Network(network_type=Network.NET_TYPE_REV_TREE)
# export node
node_EXP = "thatexpnode"
mynet.add_export_node(
......@@ -8309,12 +7786,12 @@ class TestESIPPProblem:
base_flow={(q, 0): -1.25},
)
list_imp_arcs = [
list_exp_arcs = [
(node_A, node_EXP), # AE
(node_B, node_EXP), # BE
(node_C, node_EXP), # CE
]
for i, node_pair in enumerate(list_imp_arcs):
for i, node_pair in enumerate(list_exp_arcs):
# import arcs: AE, BE, CE
new_arc = Arcs(
......@@ -8372,10 +7849,8 @@ class TestESIPPProblem:
max_number_parallel_arcs={},
simplify_problem=True,
)
print('owowowowow')
ipp.instance.constr_max_incoming_directed_arcs.pprint()
assert ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 61
assert ipp.results["Problem"][0]["Number of constraints"] == 61
assert ipp.results["Problem"][0]["Number of variables"] == 53
assert ipp.results["Problem"][0]["Number of nonzeros"] == 143 #
......@@ -8384,9 +7859,9 @@ class TestESIPPProblem:
# validation
# only the IA arc should be installed
true_imp_arcs_selected = [True, False, False]
for node_pair, true_arc_decision in zip(list_imp_arcs, true_imp_arcs_selected):
# only the AE arc should be installed
true_exp_arcs_selected = [True, False, False]
for node_pair, true_arc_decision in zip(list_exp_arcs, true_exp_arcs_selected):
assert (
true_arc_decision
in ipp.networks["mynet"]
......
......@@ -132,8 +132,8 @@ class TestResourcePrice:
volumes = None
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=[prices], volumes=[volumes])
assert res_p1.is_equivalent(res_p2)
assert res_p2.is_equivalent(res_p1)
assert res_p1 == res_p2
assert res_p2 == res_p1
# *********************************************************************
......@@ -144,8 +144,8 @@ class TestResourcePrice:
volumes = None
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=[prices + 1], volumes=[volumes])
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
......@@ -156,8 +156,8 @@ class TestResourcePrice:
volumes = None
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=prices, volumes=volumes)
assert res_p1.is_equivalent(res_p2)
assert res_p2.is_equivalent(res_p1)
assert res_p1 == res_p2
assert res_p2 == res_p1
# *********************************************************************
......@@ -168,8 +168,8 @@ class TestResourcePrice:
volumes = None
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=prices + 1, volumes=volumes)
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
# *********************************************************************
......@@ -183,8 +183,8 @@ class TestResourcePrice:
volumes = 1
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=[prices], volumes=[volumes])
assert res_p1.is_equivalent(res_p2)
assert res_p2.is_equivalent(res_p1)
assert res_p1 == res_p2
assert res_p2 == res_p1
# *********************************************************************
......@@ -195,8 +195,8 @@ class TestResourcePrice:
volumes = 1
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=[prices + 1], volumes=[volumes])
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
......@@ -207,8 +207,8 @@ class TestResourcePrice:
volumes = 1
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=prices, volumes=volumes)
assert res_p1.is_equivalent(res_p2)
assert res_p2.is_equivalent(res_p1)
assert res_p1 == res_p2
assert res_p2 == res_p1
# *********************************************************************
......@@ -219,8 +219,8 @@ class TestResourcePrice:
volumes = 1
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=prices + 1, volumes=volumes)
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
......@@ -231,8 +231,8 @@ class TestResourcePrice:
volumes = 1
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=[prices], volumes=[volumes + 1])
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
......@@ -243,8 +243,8 @@ class TestResourcePrice:
volumes = 1
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=prices, volumes=volumes + 1)
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
......@@ -255,8 +255,8 @@ class TestResourcePrice:
volumes = 1
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=[prices], volumes=[None])
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
......@@ -267,8 +267,8 @@ class TestResourcePrice:
volumes = 1
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=prices, volumes=None)
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
# *********************************************************************
......@@ -294,8 +294,8 @@ class TestResourcePrice:
volumes = [1, None]
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=prices, volumes=volumes)
assert res_p1.is_equivalent(res_p2)
assert res_p2.is_equivalent(res_p1)
assert res_p1 == res_p2
assert res_p2 == res_p1
# two segments, no volume limit, same format
# prices do not match = False
......@@ -306,8 +306,8 @@ class TestResourcePrice:
prices = [2, 3]
volumes = [1, None]
res_p2 = ResourcePrice(prices=prices, volumes=volumes)
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
......@@ -320,8 +320,8 @@ class TestResourcePrice:
volumes = [1, 3]
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=prices, volumes=volumes)
assert res_p1.is_equivalent(res_p2)
assert res_p2.is_equivalent(res_p1)
assert res_p1 == res_p2
assert res_p2 == res_p1
# two segments, volume limit, same format: False
# prices do not match = False
......@@ -332,8 +332,8 @@ class TestResourcePrice:
prices = [1, 4]
volumes = [1, 4]
res_p2 = ResourcePrice(prices=prices, volumes=volumes)
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
......@@ -348,8 +348,8 @@ class TestResourcePrice:
prices = [1, 3]
volumes = [1, 5]
res_p2 = ResourcePrice(prices=prices, volumes=volumes)
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# single segment, volume limit, same format
# volumes do not match = False
......@@ -360,8 +360,8 @@ class TestResourcePrice:
prices = [1, 3]
volumes = [1, None]
res_p2 = ResourcePrice(prices=prices, volumes=volumes)
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
# *********************************************************************
......@@ -374,8 +374,8 @@ class TestResourcePrice:
prices = [1, 3, 5]
volumes = [1, 4, None]
res_p2 = ResourcePrice(prices=prices, volumes=volumes)
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
# *********************************************************************
......