diff --git a/src/topupopt/problems/esipp/blocks/prices.py b/src/topupopt/problems/esipp/blocks/prices.py
index e64883481ff52ef192b141dbfc132c55d20ca7b7..48aae65e479ff7737c9096bc039c55ad87bc0582 100644
--- a/src/topupopt/problems/esipp/blocks/prices.py
+++ b/src/topupopt/problems/esipp/blocks/prices.py
@@ -6,34 +6,101 @@ import pyomo.environ as pyo
 
 def add_prices_block(
     model: pyo.AbstractModel,
-    enable_default_values: bool = True,
-    enable_validation: bool = True,
-    enable_initialisation: bool = True,
+    **kwargs
 ):  
     
     # *************************************************************************
     # *************************************************************************
+    
+    # model.node_price_block = pyo.Block(model.set_QPK)
 
-    # sparse index sets
+    price_other(model, **kwargs)
+    # price_block_other(model, **kwargs)
 
-    # *************************************************************************
+# *****************************************************************************
+# *****************************************************************************
+# TODO: try to implement it as a block
+def price_block_other(
+    model: pyo.AbstractModel,
+    enable_default_values: bool = True,
+    enable_validation: bool = True,
+    enable_initialisation: bool = True
+    ):
+    
+    model.set_GLQPK = model.set_GL_exp_imp*model.set_QPK
+    
+    def rule_node_prices(b, g, l, q, p, k):
+        
+        # imported flow
+        def bounds_var_if_glqpks(m, g, l, q, p, k, s):
+            if (g, l, q, p, k, s) in m.param_v_max_glqpks:
+                # predefined finite capacity
+                return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)])
+            else:
+                # infinite capacity
+                return (0, None)
+
+        b.var_trans_flow_s = pyo.Var(
+            b.set_GLQPKS, within=pyo.NonNegativeReals, bounds=bounds_var_trans_flow_s
+        )
+        # imported flow cost
+        def rule_constr_imp_flow_cost(m, g, l, q, p, k):
+            return (
+                sum(
+                    m.var_if_glqpks[(g, l, q, p, k, s)]
+                    * m.param_p_glqpks[(g, l, q, p, k, s)]
+                    for s in m.set_S[(g, l, q, p, k)]
+                )
+                == m.var_ifc_glqpk[(g, l, q, p, k)]
+            )
 
-    # set of price segments
+        model.constr_imp_flow_cost = pyo.Constraint(
+            model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flow_cost
+        )
 
-    model.set_S = pyo.Set(model.set_GL_exp_imp, model.set_QPK)
+        # imported flows
+        def rule_constr_imp_flows(m, g, l, q, p, k):
+            return sum(
+                m.var_v_glljqk[(g, l, l_star, j, q, k)]
+                for l_star in m.set_L[g]
+                if l_star not in m.set_L_imp[g]
+                for j in m.set_J[(g, l, l_star)]  # only directed arcs
+            ) == sum(m.var_if_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
+
+        model.constr_imp_flows = pyo.Constraint(
+            model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flows
+        )
+        
+        
+        
+        
+        # if (g,l) in b.parent_block().set_GL_imp:
+        #     # import node
+            
+            
+
+        #     pass
+        # elif (g,l) in b.parent_block().set_GL_exp:
+        #     # export node
+        #     pass
+        # otherwise: do nothing
+    
+    model.node_price_block = pyo.Block(model.set_GLQPK, rule=rule_node_prices)
 
-    # set of GLQKS tuples
+    # set of price segments
+    model.node_price_block.set_S = pyo.Set()
 
+    # set of GLQKS tuples
     def init_set_GLQPKS(m):
         return (
             (g, l, q, p, k, s)
             # for (g,l) in m.set_GL_exp_imp
             # for (q,k) in m.set_QK
-            for (g, l, q, p, k) in m.set_S
-            for s in m.set_S[(g, l, q, p, k)]
+            for (g, l, q, p, k) in m.node_price_block.set_S
+            for s in m.node_price_block.set_S[(g, l, q, p, k)]
         )
 
-    model.set_GLQPKS = pyo.Set(
+    model.node_price_block.set_GLQPKS = pyo.Set(
         dimen=6, initialize=(init_set_GLQPKS if enable_initialisation else None)
     )
 
@@ -42,7 +109,7 @@ def add_prices_block(
             glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_exp[glqpks[0]]
         )
 
-    model.set_GLQPKS_exp = pyo.Set(
+    model.node_price_block.set_GLQPKS_exp = pyo.Set(
         dimen=6, initialize=(init_set_GLQPKS_exp if enable_initialisation else None)
     )
 
@@ -51,7 +118,7 @@ def add_prices_block(
             glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_imp[glqpks[0]]
         )
 
-    model.set_GLQPKS_imp = pyo.Set(
+    model.node_price_block.set_GLQPKS_imp = pyo.Set(
         dimen=6, initialize=(init_set_GLQPKS_imp if enable_initialisation else None)
     )
 
@@ -60,45 +127,206 @@ def add_prices_block(
 
     # parameters
 
+    # resource prices
+
+    model.param_p_glqpks = pyo.Param(model.set_GLQPKS, within=pyo.NonNegativeReals)
+
+    # maximum resource volumes for each prices
+
+    model.param_v_max_glqpks = pyo.Param(
+        model.set_GLQPKS, 
+        within=pyo.NonNegativeReals
+        )
+
     # *************************************************************************
     # *************************************************************************
 
-    # objective function
+    # variables
 
+    # *************************************************************************
     # *************************************************************************
 
-    # resource prices
+    # exported flow
 
-    model.param_p_glqpks = pyo.Param(model.set_GLQPKS, within=pyo.NonNegativeReals)
+    # TODO: validate the bounds by ensuring inf. cap. only exists in last segm.
 
-    # maximum resource volumes for each prices
+    def bounds_var_ef_glqpks(m, g, l, q, p, k, s):
+        if (g, l, q, p, k, s) in m.param_v_max_glqpks:
+            # predefined finite capacity
+            return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)])
+        else:
+            # infinite capacity
+            return (0, None)
+
+    model.var_ef_glqpks = pyo.Var(
+        model.set_GLQPKS_exp, within=pyo.NonNegativeReals, bounds=bounds_var_ef_glqpks
+    )
 
-    model.param_v_max_glqpks = pyo.Param(model.set_GLQPKS, within=pyo.NonNegativeReals)
+    
 
     # *************************************************************************
     # *************************************************************************
 
-    # variables
+    # exported flow revenue
+    def rule_constr_exp_flow_revenue(m, g, l, q, p, k):
+        return (
+            sum(
+                m.var_ef_glqpks[(g, l, q, p, k, s)]
+                * m.param_p_glqpks[(g, l, q, p, k, s)]
+                for s in m.set_S[(g, l, q, p, k)]
+            )
+            == m.var_efr_glqpk[(g, l, q, p, k)]
+        )
+
+    model.constr_exp_flow_revenue = pyo.Constraint(
+        model.set_GL_exp, model.set_QPK, rule=rule_constr_exp_flow_revenue
+    )
+
+   
+
+    # exported flows
+    def rule_constr_exp_flows(m, g, l, q, p, k):
+        return sum(
+            m.var_v_glljqk[(g, l_star, l, j, q, k)]
+            * m.param_eta_glljqk[(g, l_star, l, j, q, k)]
+            for l_star in m.set_L[g]
+            if l_star not in m.set_L_exp[g]
+            for j in m.set_J[(g, l_star, l)]  # only directed arcs
+        ) == sum(m.var_ef_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
+
+    model.constr_exp_flows = pyo.Constraint(
+        model.set_GL_exp, model.set_QPK, rule=rule_constr_exp_flows
+    )
+    # *************************************************************************
+    # *************************************************************************
+    
+    # # non-convex price functions
+    
+    # if not convex_price_function:
+        
+    #     # delta variables
+    #     model.var_delta_glqpks = pyo.Var(
+    #         model.set_GLQPKS, within=pyo.Binary
+    #     )
+        
+    #     # segments must be empty if the respective delta variable is zero
+    #     def rule_constr_empty_segment_if_delta_zero_imp(m, g, l, q, p, k, s):
+    #         return (
+    #             m.var_if_glqpks[(g,l,q,p,k,s)] <= 
+    #             m.param_v_max_glqpks[(g,l,q,p,k,s)]*
+    #             m.var_delta_glqpks[(g,l,q,p,k,s)]
+    #             )
+    #     model.constr_empty_segment_if_delta_zero_imp = pyo.Constraint(
+    #         model.set_GLQPKS_imp, rule=rule_constr_empty_segment_if_delta_zero_imp
+    #         )
+            
+    #     # segments must be empty if the respective delta variable is zero
+    #     def rule_constr_empty_segment_if_delta_zero_exp(m, g, l, q, p, k, s):
+    #         return (
+    #             m.var_ef_glqpks[(g,l,q,p,k,s)] <= 
+    #             m.param_v_max_glqpks[(g,l,q,p,k,s)]*
+    #             m.var_delta_glqpks[(g,l,q,p,k,s)]
+    #             )
+    #     model.constr_empty_segment_if_delta_zero_exp = pyo.Constraint(
+    #         model.set_GLQPKS_exp, rule=rule_constr_empty_segment_if_delta_zero_exp
+    #         )
+        
+    #     # if delta var is one, previous ones must be one too
+    #     def rule_constr_delta_summing_logic(m, g, l, q, p, k, s):
+    #         if s == len(m.set_S)-1:
+    #             return pyo.Constraint.Skip
+    #         return (
+    #             m.var_delta_glqpks[(g,l,q,p,k,s)] >= 
+    #             m.var_delta_glqpks[(g,l,q,p,k,s+1)]
+    #             )
+    #     model.constr_delta_summing_logic = pyo.Constraint(
+    #         model.set_GLQPKS, rule=rule_constr_delta_summing_logic
+    #         )
+    #     # if delta var is zero, subsequent ones must also be zero
+    #     def rule_constr_delta_next_zeros(m, g, l, q, p, k, s):
+    #         if s == len(m.set_S)-1:
+    #             return pyo.Constraint.Skip
+    #         return (
+    #             1-m.var_delta_glqpks[(g,l,q,p,k,s)] >= 
+    #             m.var_delta_glqpks[(g,l,q,p,k,s+1)]
+    #             )
+    #     model.constr_delta_next_zeros = pyo.Constraint(
+    #         model.set_GLQPKS, rule=rule_constr_delta_next_zeros
+    #         )
 
     # *************************************************************************
     # *************************************************************************
 
-    # objective function
+# *****************************************************************************
+# *****************************************************************************
 
-    # capex
+def price_other(
+    model: pyo.AbstractModel,
+    convex_price_function: bool = False,
+    enable_default_values: bool = True,
+    enable_validation: bool = True,
+    enable_initialisation: bool = True
+    ):
 
-    # exported flow revenue
+    # set of price segments
+    model.set_S = pyo.Set(model.set_GL_exp_imp, model.set_QPK)
 
-    model.var_efr_glqpk = pyo.Var(
-        model.set_GL_exp, model.set_QPK, within=pyo.NonNegativeReals
+    # set of GLQKS tuples
+    def init_set_GLQPKS(m):
+        return (
+            (g, l, q, p, k, s)
+            # for (g,l) in m.set_GL_exp_imp
+            # for (q,k) in m.set_QK
+            for (g, l, q, p, k) in m.set_S
+            for s in m.set_S[(g, l, q, p, k)]
+        )
+
+    model.set_GLQPKS = pyo.Set(
+        dimen=6, initialize=(init_set_GLQPKS if enable_initialisation else None)
     )
 
-    # imported flow cost
+    def init_set_GLQPKS_exp(m):
+        return (
+            glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_exp[glqpks[0]]
+        )
 
-    model.var_ifc_glqpk = pyo.Var(
-        model.set_GL_imp, model.set_QPK, within=pyo.NonNegativeReals
+    model.set_GLQPKS_exp = pyo.Set(
+        dimen=6, initialize=(init_set_GLQPKS_exp if enable_initialisation else None)
     )
 
+    def init_set_GLQPKS_imp(m):
+        return (
+            glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_imp[glqpks[0]]
+        )
+
+    model.set_GLQPKS_imp = pyo.Set(
+        dimen=6, initialize=(init_set_GLQPKS_imp if enable_initialisation else None)
+    )
+
+    # *************************************************************************
+    # *************************************************************************
+
+    # parameters
+
+    # resource prices
+
+    model.param_p_glqpks = pyo.Param(model.set_GLQPKS, within=pyo.NonNegativeReals)
+
+    # maximum resource volumes for each prices
+
+    model.param_v_max_glqpks = pyo.Param(
+        model.set_GLQPKS, 
+        within=pyo.NonNegativeReals
+        )
+
+    # *************************************************************************
+    # *************************************************************************
+
+    # variables
+
+    # *************************************************************************
+    # *************************************************************************
+
     # exported flow
 
     # TODO: validate the bounds by ensuring inf. cap. only exists in last segm.
@@ -188,18 +416,97 @@ def add_prices_block(
     model.constr_imp_flows = pyo.Constraint(
         model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flows
     )
-    
-    # *************************************************************************
-    # *************************************************************************
-
-    return model
 
     # *************************************************************************
     # *************************************************************************
-
+    
+    # non-convex price functions
+    
+    if not convex_price_function:
+        
+        # delta variables
+        model.var_delta_glqpks = pyo.Var(
+            model.set_GLQPKS, within=pyo.Binary
+        )
+        
+        # segments must be empty if the respective delta variable is zero
+        def rule_constr_empty_segment_if_delta_zero_imp(m, g, l, q, p, k, s):
+            return (
+                m.var_if_glqpks[(g,l,q,p,k,s)] <= 
+                m.param_v_max_glqpks[(g,l,q,p,k,s)]*
+                m.var_delta_glqpks[(g,l,q,p,k,s)]
+                )
+        model.constr_empty_segment_if_delta_zero_imp = pyo.Constraint(
+            model.set_GLQPKS_imp, rule=rule_constr_empty_segment_if_delta_zero_imp
+            )
+            
+        # segments must be empty if the respective delta variable is zero
+        def rule_constr_empty_segment_if_delta_zero_exp(m, g, l, q, p, k, s):
+            return (
+                m.var_ef_glqpks[(g,l,q,p,k,s)] <= 
+                m.param_v_max_glqpks[(g,l,q,p,k,s)]*
+                m.var_delta_glqpks[(g,l,q,p,k,s)]
+                )
+        model.constr_empty_segment_if_delta_zero_exp = pyo.Constraint(
+            model.set_GLQPKS_exp, rule=rule_constr_empty_segment_if_delta_zero_exp
+            )
+        
+        # if delta var is one, previous ones must be one too
+        # if delta var is zero, the next ones must also be zero
+        def rule_constr_delta_summing_logic(m, g, l, q, p, k, s):
+            if s == len(m.set_S[(g,l,q,p,k)])-1:
+                # last segment, skip
+                return pyo.Constraint.Skip
+            return (
+                m.var_delta_glqpks[(g,l,q,p,k,s)] >= 
+                m.var_delta_glqpks[(g,l,q,p,k,s+1)]
+                )
+        model.constr_delta_summing_logic = pyo.Constraint(
+            model.set_GLQPKS, rule=rule_constr_delta_summing_logic
+            )
+        
+        # if a segment is not completely used, the next ones must remain empty
+        def rule_constr_fill_up_segment_before_next(m, g, l, q, p, k, s):
+            if s == len(m.set_S[(g,l,q,p,k)])-1:
+                # last segment, skip
+                return pyo.Constraint.Skip
+            if (g,l) in m.set_GL_imp:
+                return (
+                    m.var_if_glqpks[(g,l,q,p,k,s)] >= 
+                    m.var_delta_glqpks[(g,l,q,p,k,s+1)]*
+                    m.param_v_max_glqpks[(g,l,q,p,k,s)]
+                    )
+            else:
+                return (
+                    m.var_ef_glqpks[(g,l,q,p,k,s)] >= 
+                    m.var_delta_glqpks[(g,l,q,p,k,s+1)]*
+                    m.param_v_max_glqpks[(g,l,q,p,k,s)]
+                    )
+            # return (
+            #     m.var_if_glqpks[(g,l,q,p,k,s)]/m.param_v_max_glqpks[(g,l,q,p,k,s)] >= 
+            #     m.var_delta_glqpks[(g,l,q,p,k,s+1)]
+            #     )
+            # return (
+            #     m.param_v_max_glqpks[(g,l,q,p,k,s)]-m.var_if_glqpks[(g,l,q,p,k,s)] <= 
+            #     m.param_v_max_glqpks[(g,l,q,p,k,s)]*(1- m.var_delta_glqpks[(g,l,q,p,k,s+1)])
+            #     )
+        model.constr_fill_up_segment_before_next = pyo.Constraint(
+            model.set_GLQPKS, rule=rule_constr_fill_up_segment_before_next
+            )
 
 # *****************************************************************************
 # *****************************************************************************
+
+def price_block_lambda(model: pyo.AbstractModel, **kwargs):
+    
+    raise NotImplementedError
+
 # *****************************************************************************
 # *****************************************************************************
+
+def price_block_delta(model: pyo.AbstractModel, **kwargs):
+    
+    raise NotImplementedError
+
 # *****************************************************************************
+# *****************************************************************************
\ No newline at end of file
diff --git a/src/topupopt/problems/esipp/model.py b/src/topupopt/problems/esipp/model.py
index 566e228db2b7a3e02d36b23414655b2fdf852a85..62e387cbcdb247594a8f94602fb96ed87c52a242 100644
--- a/src/topupopt/problems/esipp/model.py
+++ b/src/topupopt/problems/esipp/model.py
@@ -3,6 +3,7 @@
 import pyomo.environ as pyo
 
 from .blocks.networks import add_network_restrictions
+from .blocks.prices import add_prices_block
 
 # *****************************************************************************
 # *****************************************************************************
@@ -22,7 +23,7 @@ def create_model(
     # create model object
 
     model = pyo.AbstractModel(name)
-
+    
     # *************************************************************************
     # *************************************************************************
 
@@ -84,7 +85,7 @@ def create_model(
     # set of exporting nodes on each network
 
     model.set_L_exp = pyo.Set(model.set_G, within=model.set_L)
-
+    
     # *************************************************************************
     # *************************************************************************
 
@@ -388,45 +389,6 @@ def create_model(
 
     # *************************************************************************
 
-    # set of price segments
-
-    model.set_S = pyo.Set(model.set_GL_exp_imp, model.set_QPK)
-
-    # set of GLQKS tuples
-
-    def init_set_GLQPKS(m):
-        return (
-            (g, l, q, p, k, s)
-            # for (g,l) in m.set_GL_exp_imp
-            # for (q,k) in m.set_QK
-            for (g, l, q, p, k) in m.set_S
-            for s in m.set_S[(g, l, q, p, k)]
-        )
-
-    model.set_GLQPKS = pyo.Set(
-        dimen=6, initialize=(init_set_GLQPKS if enable_initialisation else None)
-    )
-
-    def init_set_GLQPKS_exp(m):
-        return (
-            glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_exp[glqpks[0]]
-        )
-
-    model.set_GLQPKS_exp = pyo.Set(
-        dimen=6, initialize=(init_set_GLQPKS_exp if enable_initialisation else None)
-    )
-
-    def init_set_GLQPKS_imp(m):
-        return (
-            glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_imp[glqpks[0]]
-        )
-
-    model.set_GLQPKS_imp = pyo.Set(
-        dimen=6, initialize=(init_set_GLQPKS_imp if enable_initialisation else None)
-    )
-
-    # *************************************************************************
-
     # all arcs
 
     # set of GLLJ tuples for all arcs (undirected arcs appear twice)
@@ -1438,14 +1400,6 @@ def create_model(
         model.set_QPK, within=pyo.PositiveReals, default=1
     )
 
-    # resource prices
-
-    model.param_p_glqpks = pyo.Param(model.set_GLQPKS, within=pyo.NonNegativeReals)
-
-    # maximum resource volumes for each prices
-
-    model.param_v_max_glqpks = pyo.Param(model.set_GLQPKS, within=pyo.NonNegativeReals)
-
     # converters
 
     # externality cost per input unit
@@ -1808,36 +1762,6 @@ def create_model(
         model.set_GL_imp, model.set_QPK, within=pyo.NonNegativeReals
     )
 
-    # exported flow
-
-    # TODO: validate the bounds by ensuring inf. cap. only exists in last segm.
-
-    def bounds_var_ef_glqpks(m, g, l, q, p, k, s):
-        if (g, l, q, p, k, s) in m.param_v_max_glqpks:
-            # predefined finite capacity
-            return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)])
-        else:
-            # infinite capacity
-            return (0, None)
-
-    model.var_ef_glqpks = pyo.Var(
-        model.set_GLQPKS_exp, within=pyo.NonNegativeReals, bounds=bounds_var_ef_glqpks
-    )
-
-    # imported flow
-
-    def bounds_var_if_glqpks(m, g, l, q, p, k, s):
-        if (g, l, q, p, k, s) in m.param_v_max_glqpks:
-            # predefined finite capacity
-            return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)])
-        else:
-            # infinite capacity
-            return (0, None)
-
-    model.var_if_glqpks = pyo.Var(
-        model.set_GLQPKS_imp, within=pyo.NonNegativeReals, bounds=bounds_var_if_glqpks
-    )
-
     # *************************************************************************
 
     # arcs
@@ -2100,67 +2024,6 @@ def create_model(
 
     model.constr_sdncf_q = pyo.Constraint(model.set_Q, rule=rule_sdncf_q)
 
-    # exported flow revenue
-
-    def rule_constr_exp_flow_revenue(m, g, l, q, p, k):
-        return (
-            sum(
-                m.var_ef_glqpks[(g, l, q, p, k, s)]
-                * m.param_p_glqpks[(g, l, q, p, k, s)]
-                for s in m.set_S[(g, l, q, p, k)]
-            )
-            == m.var_efr_glqpk[(g, l, q, p, k)]
-        )
-
-    model.constr_exp_flow_revenue = pyo.Constraint(
-        model.set_GL_exp, model.set_QPK, rule=rule_constr_exp_flow_revenue
-    )
-
-    # imported flow cost
-
-    def rule_constr_imp_flow_cost(m, g, l, q, p, k):
-        return (
-            sum(
-                m.var_if_glqpks[(g, l, q, p, k, s)]
-                * m.param_p_glqpks[(g, l, q, p, k, s)]
-                for s in m.set_S[(g, l, q, p, k)]
-            )
-            == m.var_ifc_glqpk[(g, l, q, p, k)]
-        )
-
-    model.constr_imp_flow_cost = pyo.Constraint(
-        model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flow_cost
-    )
-
-    # exported flows
-
-    def rule_constr_exp_flows(m, g, l, q, p, k):
-        return sum(
-            m.var_v_glljqk[(g, l_star, l, j, q, k)]
-            * m.param_eta_glljqk[(g, l_star, l, j, q, k)]
-            for l_star in m.set_L[g]
-            if l_star not in m.set_L_exp[g]
-            for j in m.set_J[(g, l_star, l)]  # only directed arcs
-        ) == sum(m.var_ef_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
-
-    model.constr_exp_flows = pyo.Constraint(
-        model.set_GL_exp, model.set_QPK, rule=rule_constr_exp_flows
-    )
-
-    # imported flows
-
-    def rule_constr_imp_flows(m, g, l, q, p, k):
-        return sum(
-            m.var_v_glljqk[(g, l, l_star, j, q, k)]
-            for l_star in m.set_L[g]
-            if l_star not in m.set_L_imp[g]
-            for j in m.set_J[(g, l, l_star)]  # only directed arcs
-        ) == sum(m.var_if_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
-
-    model.constr_imp_flows = pyo.Constraint(
-        model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flows
-    )
-
     # *************************************************************************
 
     # sum of discounted externalities
@@ -2298,6 +2161,9 @@ def create_model(
     model.constr_capex_system = pyo.Constraint(
         model.set_I_new, rule=rule_capex_converter
     )
+    
+    # prices
+    add_prices_block(model)
 
     # *************************************************************************
     # *************************************************************************
diff --git a/src/topupopt/problems/esipp/problem.py b/src/topupopt/problems/esipp/problem.py
index 206a17c632d64d0cae265b6c929f54811312d389..5b2a5707492400360d4b7c66ff1df11c319b87f9 100644
--- a/src/topupopt/problems/esipp/problem.py
+++ b/src/topupopt/problems/esipp/problem.py
@@ -63,6 +63,15 @@ class InfrastructurePlanningProblem(EnergySystem):
         STATIC_LOSS_MODE_US,
         STATIC_LOSS_MODE_DS,
     )
+    
+    NODE_PRICE_LAMBDA = 1
+    NODE_PRICE_DELTA = 2
+    NODE_PRICE_OTHER = 3
+    NODE_PRICES = (
+        NODE_PRICE_LAMBDA,
+        NODE_PRICE_DELTA,
+        NODE_PRICE_OTHER
+        )
 
     # *************************************************************************
     # *************************************************************************
@@ -80,6 +89,7 @@ class InfrastructurePlanningProblem(EnergySystem):
         converters: dict = None,
         prepare_model: bool = True,
         validate_inputs: bool = True,
+        node_price_model = NODE_PRICE_DELTA
     ):  # TODO: switch to False when everything is more mature
         # *********************************************************************
 
@@ -1889,7 +1899,7 @@ class InfrastructurePlanningProblem(EnergySystem):
             for (g, l) in set_GL_exp_imp
             for (q, p, k) in set_QPK
         }
-
+        
         # set of GLKS tuples
         set_GLQPKS = tuple(
             (*glqpk, s) for glqpk, s_tuple in set_S.items() for s in s_tuple
diff --git a/src/topupopt/problems/esipp/resource.py b/src/topupopt/problems/esipp/resource.py
index bcd49ad2be59073b9c76ca653b54861dc82f1fa3..6fa1ded44a584a01d61b2b7b84acb4ba203e3951 100644
--- a/src/topupopt/problems/esipp/resource.py
+++ b/src/topupopt/problems/esipp/resource.py
@@ -12,7 +12,11 @@ from numbers import Real
 class ResourcePrice:
     """A class for piece-wise linear resource prices in network problems."""
 
-    def __init__(self, prices: list or int, volumes: list = None):
+    def __init__(
+            self, 
+            prices: list or int, 
+            volumes: list = None
+            ):
         # how do we keep the size of the object as small as possible
         # if the tariff is time-invariant, how can information be stored?
         # - a flag
@@ -206,30 +210,10 @@ class ResourcePrice:
 
     # *************************************************************************
     # *************************************************************************
-
-    def is_equivalent(self, other) -> bool:
-        """Returns True if a given ResourcePrice is equivalent to another."""
-        # resources are equivalent if:
-        # 1) the prices are the same
-        # 2) the volume limits are the same
-
-        # the number of segments has to match
-        if self.number_segments() != other.number_segments():
-            return False  # the number of segments do not match
-        # check the prices
-        if self.prices != other.prices:
-            return False  # prices are different
-        # prices match, check the volumes
-        if self.volumes != other.volumes:
-            return False  # volumes are different
-        return True  # all conditions have been met
-
-    # *************************************************************************
-    # *************************************************************************
     
     def __eq__(self, o) -> bool:
         """Returns True if a given ResourcePrice is equivalent to another."""
-        return self.is_equivalent(o)
+        return hash(self) == hash(o)
     
     def __hash__(self):
         return hash(
@@ -260,9 +244,7 @@ def are_prices_time_invariant(resource_prices_qpk: dict) -> bool:
     # check if the tariffs per period and assessment are equivalent
     for qp, qpk_list in qpk_qp.items():
         for i in range(len(qpk_list) - 1):
-            if not resource_prices_qpk[qpk_list[0]].is_equivalent(
-                resource_prices_qpk[qpk_list[i + 1]]
-            ):
+            if not resource_prices_qpk[qpk_list[0]] == resource_prices_qpk[qpk_list[i + 1]]:
                 return False
     # all tariffs are equivalent per period and assessment: they are invariant
     return True
diff --git a/tests/test_esipp_prices.py b/tests/test_esipp_prices.py
new file mode 100644
index 0000000000000000000000000000000000000000..31e4c55d7b5638d1a10fe0cf5896545cf1656f72
--- /dev/null
+++ b/tests/test_esipp_prices.py
@@ -0,0 +1,1123 @@
+# imports
+
+# standard
+import math
+
+# local
+# import numpy as np
+# import networkx as nx
+import pyomo.environ as pyo
+
+# import src.topupopt.problems.esipp.utils as utils
+from src.topupopt.data.misc.utils import generate_pseudo_unique_key
+from src.topupopt.problems.esipp.problem import InfrastructurePlanningProblem
+from src.topupopt.problems.esipp.network import Arcs, Network
+from src.topupopt.problems.esipp.resource import ResourcePrice
+# from src.topupopt.problems.esipp.utils import compute_cost_volume_metrics
+from src.topupopt.problems.esipp.utils import statistics
+from src.topupopt.problems.esipp.time import EconomicTimeFrame
+# from src.topupopt.problems.esipp.converter import Converter
+
+# *****************************************************************************
+# *****************************************************************************
+
+class TestESIPPProblem:
+    
+    solver = 'glpk'
+    # solver = 'scip'
+    # solver = 'cbc'
+    
+    def build_solve_ipp(
+        self,
+        solver: str = None,
+        solver_options: dict = None,
+        use_sos_arcs: bool = False,
+        arc_sos_weight_key: str = (InfrastructurePlanningProblem.SOS1_ARC_WEIGHTS_NONE),
+        arc_use_real_variables_if_possible: bool = False,
+        use_sos_sense: bool = False,
+        sense_sos_weight_key: int = (
+            InfrastructurePlanningProblem.SOS1_SENSE_WEIGHT_NOMINAL_HIGHER
+        ),
+        sense_use_real_variables_if_possible: bool = False,
+        sense_use_arc_interfaces: bool = False,
+        perform_analysis: bool = False,
+        plot_results: bool = False,
+        print_solver_output: bool = False,
+        time_frame: EconomicTimeFrame = None,
+        networks: dict = None,
+        converters: dict = None,
+        static_losses_mode=None,
+        mandatory_arcs: list = None,
+        max_number_parallel_arcs: dict = None,
+        arc_groups_dict: dict = None,
+        init_aux_sets: bool = False,
+        # discount_rates: dict = None,
+        assessment_weights: dict = None,
+        simplify_problem: bool = False,
+    ):
+        if type(solver) == type(None):
+            solver = self.solver
+        
+        if type(assessment_weights) != dict:
+            assessment_weights = {}  # default
+
+        if type(converters) != dict:
+            converters = {}
+            
+        # time weights
+
+        # relative weight of time period
+
+        # one interval twice as long as the average is worth twice
+        # one interval half as long as the average is worth half
+
+        # time_weights = [
+        #     [time_period_duration/average_time_interval_duration
+        #       for time_period_duration in intraperiod_time_interval_duration]
+        #     for p in range(number_periods)]
+
+        time_weights = None  # nothing yet
+
+        normalised_time_interval_duration = None  # nothing yet
+
+        # create problem object
+
+        ipp = InfrastructurePlanningProblem(
+            # discount_rates=discount_rates,
+            time_frame=time_frame,
+            # reporting_periods=time_frame.reporting_periods,
+            # time_intervals=time_frame.time_interval_durations,
+            time_weights=time_weights,
+            normalised_time_interval_duration=normalised_time_interval_duration,
+            assessment_weights=assessment_weights,
+        )
+
+        # add networks and systems
+
+        for netkey, net in networks.items():
+            ipp.add_network(network_key=netkey, network=net)
+
+        # add converters
+
+        for cvtkey, cvt in converters.items():
+            ipp.add_converter(converter_key=cvtkey, converter=cvt)
+
+        # define arcs as mandatory
+
+        if type(mandatory_arcs) == list:
+            for full_arc_key in mandatory_arcs:
+                ipp.make_arc_mandatory(full_arc_key[0], full_arc_key[1:])
+
+        # if make_all_arcs_mandatory:
+
+        #     for network_key in ipp.networks:
+
+        #         for arc_key in ipp.networks[network_key].edges(keys=True):
+
+        #             # preexisting arcs are no good
+
+        #             if ipp.networks[network_key].edges[arc_key][
+        #                     Network.KEY_ARC_TECH].has_been_selected():
+
+        #                 continue
+
+        #             ipp.make_arc_mandatory(network_key, arc_key)
+
+        # set up the use of sos for arc selection
+
+        if use_sos_arcs:
+            for network_key in ipp.networks:
+                for arc_key in ipp.networks[network_key].edges(keys=True):
+                    if (
+                        ipp.networks[network_key]
+                        .edges[arc_key][Network.KEY_ARC_TECH]
+                        .has_been_selected()
+                    ):
+                        continue
+
+                    ipp.use_sos1_for_arc_selection(
+                        network_key,
+                        arc_key,
+                        use_real_variables_if_possible=(
+                            arc_use_real_variables_if_possible
+                        ),
+                        sos1_weight_method=arc_sos_weight_key,
+                    )
+
+        # set up the use of sos for flow sense determination
+
+        if use_sos_sense:
+            for network_key in ipp.networks:
+                for arc_key in ipp.networks[network_key].edges(keys=True):
+                    if not ipp.networks[network_key].edges[arc_key][
+                        Network.KEY_ARC_UND
+                    ]:
+                        continue
+
+                    ipp.use_sos1_for_flow_senses(
+                        network_key,
+                        arc_key,
+                        use_real_variables_if_possible=(
+                            sense_use_real_variables_if_possible
+                        ),
+                        use_interface_variables=sense_use_arc_interfaces,
+                        sos1_weight_method=sense_sos_weight_key,
+                    )
+
+        elif sense_use_arc_interfaces:  # set up the use of arc interfaces w/o sos1
+            for network_key in ipp.networks:
+                for arc_key in ipp.networks[network_key].edges(keys=True):
+                    if (
+                        ipp.networks[network_key]
+                        .edges[arc_key][Network.KEY_ARC_TECH]
+                        .has_been_selected()
+                    ):
+                        continue
+
+                    ipp.use_interface_variables_for_arc_selection(network_key, arc_key)
+
+        # static losses
+
+        if static_losses_mode == ipp.STATIC_LOSS_MODE_ARR:
+            ipp.place_static_losses_arrival_node()
+
+        elif static_losses_mode == ipp.STATIC_LOSS_MODE_DEP:
+            ipp.place_static_losses_departure_node()
+
+        elif static_losses_mode == ipp.STATIC_LOSS_MODE_US:
+            ipp.place_static_losses_upstream()
+
+        elif static_losses_mode == ipp.STATIC_LOSS_MODE_DS:
+            ipp.place_static_losses_downstream()
+
+        else:
+            raise ValueError("Unknown static loss modelling mode.")
+
+        # *********************************************************************
+
+        # groups
+
+        if type(arc_groups_dict) != type(None):
+            for key in arc_groups_dict:
+                ipp.create_arc_group(arc_groups_dict[key])
+
+        # *********************************************************************
+
+        # maximum number of parallel arcs
+
+        for key in max_number_parallel_arcs:
+            ipp.set_maximum_number_parallel_arcs(
+                network_key=key[0],
+                node_a=key[1],
+                node_b=key[2],
+                limit=max_number_parallel_arcs[key],
+            )
+
+        # *********************************************************************
+
+        if simplify_problem:
+            ipp.simplify_peak_total_assessments()
+
+        # *********************************************************************
+        
+        # instantiate (disable the default case v-a-v fixed losses)
+
+        # ipp.instantiate(place_fixed_losses_upstream_if_possible=False)
+
+        ipp.instantiate(initialise_ancillary_sets=init_aux_sets)
+        # ipp.instance.pprint()
+        # optimise
+        ipp.optimise(
+            solver_name=solver,
+            solver_options=solver_options,
+            output_options={},
+            print_solver_output=print_solver_output,
+        )
+        # ipp.instance.pprint()
+        # return the problem object
+        return ipp
+
+        # *********************************************************************
+        # *********************************************************************
+
+    # *************************************************************************
+    # *************************************************************************
+
+    def test_problem_increasing_imp_prices(self):
+        
+        # assessment
+        q = 0
+
+        tf = EconomicTimeFrame(
+            discount_rate=0.0,
+            reporting_periods={q: (0,)},
+            reporting_period_durations={q: (365 * 24 * 3600,)},
+            time_intervals={q: (0,)},
+            time_interval_durations={q: (1,)},
+        )
+
+        # 2 nodes: one import, one regular
+        mynet = Network()
+
+        # import node
+        node_IMP = 'I'
+        mynet.add_import_node(
+            node_key=node_IMP,
+            prices={
+                qpk: ResourcePrice(prices=[1.0, 2.0], volumes=[0.5, None])
+                for qpk in tf.qpk()
+            },
+        )
+
+        # other nodes
+        node_A = 'A'
+        mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0})
+
+        # arc IA
+        arc_tech_IA = Arcs(
+            name="any",
+            efficiency={(q, 0): 0.5},
+            efficiency_reverse=None,
+            static_loss=None,
+            capacity=[3],
+            minimum_cost=[2],
+            specific_capacity_cost=1,
+            capacity_is_instantaneous=False,
+            validate=False,
+        )
+        mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA)
+
+        # identify node types
+        mynet.identify_node_types()
+
+        # no sos, regular time intervals
+        ipp = self.build_solve_ipp(
+            solver_options={},
+            perform_analysis=False,
+            plot_results=False,  # True,
+            print_solver_output=False,
+            time_frame=tf,
+            networks={"mynet": mynet},
+            static_losses_mode=True,  # just to reach a line,
+            mandatory_arcs=[],
+            max_number_parallel_arcs={},
+            simplify_problem=False
+        )
+
+        assert not ipp.has_peak_total_assessments()
+        assert ipp.results["Problem"][0]["Number of constraints"] == 10
+        assert ipp.results["Problem"][0]["Number of variables"] == 11
+        assert ipp.results["Problem"][0]["Number of nonzeros"] == 20
+
+        # *********************************************************************
+        # *********************************************************************
+
+        # validation
+
+        # the arc should be installed since it is required for feasibility
+        assert (
+            True
+            in ipp.networks["mynet"]
+            .edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH]
+            .options_selected
+        )
+
+        # the flows should be 1.0, 0.0 and 2.0
+        assert math.isclose(
+            pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]),
+            2.0,
+            abs_tol=1e-6,
+        )
+
+        # arc amplitude should be two
+        assert math.isclose(
+            pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]),
+            2.0,
+            abs_tol=0.01,
+        )
+
+        # capex should be four
+        assert math.isclose(pyo.value(ipp.instance.var_capex), 4.0, abs_tol=1e-3)
+
+        # sdncf should be -3.5
+        assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -3.5, abs_tol=1e-3)
+
+        # the objective function should be -7.5
+        assert math.isclose(pyo.value(ipp.instance.obj_f), -7.5, abs_tol=1e-3)
+        
+    # *************************************************************************
+    # *************************************************************************
+
+    def test_problem_decreasing_imp_prices(self):
+        
+        # assessment
+        q = 0
+
+        tf = EconomicTimeFrame(
+            discount_rate=0.0,
+            reporting_periods={q: (0,)},
+            reporting_period_durations={q: (365 * 24 * 3600,)},
+            time_intervals={q: (0,)},
+            time_interval_durations={q: (1,)},
+        )
+
+        # 2 nodes: one import, one regular
+        mynet = Network()
+
+        # import node
+        node_IMP = 'I'
+        mynet.add_import_node(
+            node_key=node_IMP,
+            prices={
+                qpk: ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, 3.0])
+                for qpk in tf.qpk()
+            },
+        )
+
+        # other nodes
+        node_A = 'A'
+        mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0})
+
+        # arc IA
+        arc_tech_IA = Arcs(
+            name="any",
+            efficiency={(q, 0): 0.5},
+            efficiency_reverse=None,
+            static_loss=None,
+            capacity=[3],
+            minimum_cost=[2],
+            specific_capacity_cost=1,
+            capacity_is_instantaneous=False,
+            validate=False,
+        )
+        mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA)
+
+        # identify node types
+        mynet.identify_node_types()
+
+        # no sos, regular time intervals
+        ipp = self.build_solve_ipp(
+            solver_options={},
+            perform_analysis=False,
+            plot_results=False,  # True,
+            print_solver_output=False, 
+            time_frame=tf,
+            networks={"mynet": mynet},
+            static_losses_mode=True,  # just to reach a line,
+            mandatory_arcs=[],
+            max_number_parallel_arcs={},
+            simplify_problem=False
+        )
+
+        assert not ipp.has_peak_total_assessments()
+        assert ipp.results["Problem"][0]["Number of constraints"] == 14 # 10 prior to nonconvex block
+        assert ipp.results["Problem"][0]["Number of variables"] == 13 # 11 prior to nonconvex block
+        assert ipp.results["Problem"][0]["Number of nonzeros"] == 28 # 20 prior to nonconvex block
+
+        # *********************************************************************
+        # *********************************************************************
+
+        # validation
+
+        # the arc should be installed since it is required for feasibility
+        assert (
+            True
+            in ipp.networks["mynet"]
+            .edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH]
+            .options_selected
+        )
+
+        # the flows should be 1.0, 0.0 and 2.0
+        assert math.isclose(
+            pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]),
+            2.0,
+            abs_tol=1e-6,
+        )
+
+        # arc amplitude should be two
+        assert math.isclose(
+            pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]),
+            2.0,
+            abs_tol=0.01,
+        )
+
+        # capex should be four
+        assert math.isclose(pyo.value(ipp.instance.var_capex), 4.0, abs_tol=1e-3)
+
+        # sdncf should be -2.5
+        assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -2.5, abs_tol=1e-3)
+
+        # the objective function should be -7.5
+        assert math.isclose(pyo.value(ipp.instance.obj_f), -6.5, abs_tol=1e-3)
+                
+    # *************************************************************************
+    # *************************************************************************
+
+    def test_problem_decreasing_imp_prices_infinite_capacity(self):
+        
+        # assessment
+        q = 0
+
+        tf = EconomicTimeFrame(
+            discount_rate=0.0,
+            reporting_periods={q: (0,)},
+            reporting_period_durations={q: (365 * 24 * 3600,)},
+            time_intervals={q: (0,)},
+            time_interval_durations={q: (1,)},
+        )
+
+        # 2 nodes: one import, one regular
+        mynet = Network()
+
+        # import node
+        node_IMP = 'I'
+        mynet.add_import_node(
+            node_key=node_IMP,
+            prices={
+                qpk: ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, None])
+                for qpk in tf.qpk()
+            },
+        )
+
+        # other nodes
+        node_A = 'A'
+        mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0})
+
+        # arc IA
+        arc_tech_IA = Arcs(
+            name="any",
+            efficiency={(q, 0): 0.5},
+            efficiency_reverse=None,
+            static_loss=None,
+            capacity=[3],
+            minimum_cost=[2],
+            specific_capacity_cost=1,
+            capacity_is_instantaneous=False,
+            validate=False,
+        )
+        mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA)
+
+        # identify node types
+        mynet.identify_node_types()
+        
+        # trigger the error
+        error_raised = False
+        try:
+            # no sos, regular time intervals
+            self.build_solve_ipp(
+                solver_options={},
+                perform_analysis=False,
+                plot_results=False,  # True,
+                print_solver_output=False,
+                time_frame=tf,
+                networks={"mynet": mynet},
+                static_losses_mode=True,  # just to reach a line,
+                mandatory_arcs=[],
+                max_number_parallel_arcs={},
+                simplify_problem=False,
+            )
+        except Exception:
+            error_raised = True
+        assert error_raised
+
+    # *************************************************************************
+    # *************************************************************************
+
+    def test_problem_decreasing_exp_prices(self):
+        # assessment
+        q = 0
+        # time
+        number_intervals = 1
+        # periods
+        number_periods = 1
+
+        tf = EconomicTimeFrame(
+            discount_rate=0.0,
+            reporting_periods={q: (0,)},
+            reporting_period_durations={q: (365 * 24 * 3600,)},
+            time_intervals={q: (0,)},
+            time_interval_durations={q: (1,)},
+        )
+
+        # 2 nodes: one export, one regular
+        mynet = Network()
+
+        # import node
+        node_EXP = generate_pseudo_unique_key(mynet.nodes())
+        mynet.add_export_node(
+            node_key=node_EXP,
+            prices={
+                (q, p, k): ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, None])
+                for p in range(number_periods)
+                for k in range(number_intervals)
+            },
+        )
+
+        # other nodes
+        node_A = 'A'
+        mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): -1.0})
+
+        # arc IA
+        arc_tech_IA = Arcs(
+            name="any",
+            efficiency={(q, 0): 0.5},
+            efficiency_reverse=None,
+            static_loss=None,
+            capacity=[3],
+            minimum_cost=[2],
+            specific_capacity_cost=1,
+            capacity_is_instantaneous=False,
+            validate=False,
+        )
+        mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_IA)
+
+        # identify node types
+        mynet.identify_node_types()
+
+        # no sos, regular time intervals
+        ipp = self.build_solve_ipp(
+            solver_options={},
+            perform_analysis=False,
+            plot_results=False,  # True,
+            print_solver_output=False,
+            time_frame=tf,
+            networks={"mynet": mynet},
+            static_losses_mode=True,  # just to reach a line,
+            mandatory_arcs=[],
+            max_number_parallel_arcs={},
+            simplify_problem=False,
+        )
+
+        assert not ipp.has_peak_total_assessments()
+        assert ipp.results["Problem"][0]["Number of constraints"] == 10
+        assert ipp.results["Problem"][0]["Number of variables"] == 11
+        assert ipp.results["Problem"][0]["Number of nonzeros"] == 20
+
+        # *********************************************************************
+        # *********************************************************************
+
+        # validation
+
+        # the arc should be installed since it is required for feasibility
+        assert (
+            True
+            in ipp.networks["mynet"]
+            .edges[(node_A, node_EXP, 0)][Network.KEY_ARC_TECH]
+            .options_selected
+        )
+
+        # the flows should be 1.0, 0.0 and 2.0
+        assert math.isclose(
+            pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 0)]),
+            1.0,
+            abs_tol=1e-6,
+        )
+
+        # arc amplitude should be two
+        assert math.isclose(
+            pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_A, node_EXP, 0)]),
+            1.0,
+            abs_tol=0.01,
+        )
+
+        # capex should be four
+        assert math.isclose(pyo.value(ipp.instance.var_capex), 3.0, abs_tol=1e-3)
+
+        # sdncf should be 1.0
+        assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), 1.0, abs_tol=1e-3)
+
+        # the objective function should be -7.5
+        assert math.isclose(pyo.value(ipp.instance.obj_f), -2.0, abs_tol=1e-3)
+        
+    # TODO: trigger errors with infinite capacity segments in non-convex functions
+        
+    # *************************************************************************
+    # *************************************************************************
+
+    def test_problem_increasing_exp_prices(self):
+        # assessment
+        q = 0
+        # time
+        number_intervals = 1
+        # periods
+        number_periods = 1
+
+        tf = EconomicTimeFrame(
+            discount_rate=0.0,
+            reporting_periods={q: (0,)},
+            reporting_period_durations={q: (365 * 24 * 3600,)},
+            time_intervals={q: (0,)},
+            time_interval_durations={q: (1,)},
+        )
+
+        # 2 nodes: one export, one regular
+        mynet = Network()
+
+        # import node
+        node_EXP = generate_pseudo_unique_key(mynet.nodes())
+        mynet.add_export_node(
+            node_key=node_EXP,
+            prices={
+                (q, p, k): ResourcePrice(prices=[1.0, 2.0], volumes=[0.25, 3.0])
+                for p in range(number_periods)
+                for k in range(number_intervals)
+            },
+        )
+
+        # other nodes
+        node_A = 'A'
+        mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): -1.0})
+
+        # arc IA
+        arc_tech_IA = Arcs(
+            name="any",
+            efficiency={(q, 0): 0.5},
+            efficiency_reverse=None,
+            static_loss=None,
+            capacity=[3],
+            minimum_cost=[2],
+            specific_capacity_cost=1,
+            capacity_is_instantaneous=False,
+            validate=False,
+        )
+        mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_IA)
+
+        # identify node types
+        mynet.identify_node_types()
+
+        # no sos, regular time intervals
+        ipp = self.build_solve_ipp(
+            solver_options={},
+            perform_analysis=False,
+            plot_results=False,  # True,
+            print_solver_output=False,
+            time_frame=tf,
+            networks={"mynet": mynet},
+            static_losses_mode=True,  # just to reach a line,
+            mandatory_arcs=[],
+            max_number_parallel_arcs={},
+            simplify_problem=False,
+        )
+
+        assert not ipp.has_peak_total_assessments()
+        assert ipp.results["Problem"][0]["Number of constraints"] == 14 # 10 before nonconvex block
+        assert ipp.results["Problem"][0]["Number of variables"] == 13 # 11 before nonconvex block
+        assert ipp.results["Problem"][0]["Number of nonzeros"] == 28 # 20 before nonconvex block
+
+        # *********************************************************************
+        # *********************************************************************
+
+        # validation
+
+        # the arc should be installed since it is required for feasibility
+        assert (
+            True
+            in ipp.networks["mynet"]
+            .edges[(node_A, node_EXP, 0)][Network.KEY_ARC_TECH]
+            .options_selected
+        )
+
+        # the flows should be 1.0, 0.0 and 2.0
+        assert math.isclose(
+            pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 0)]),
+            1.0,
+            abs_tol=1e-6,
+        )
+
+        # arc amplitude should be two
+        assert math.isclose(
+            pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_A, node_EXP, 0)]),
+            1.0,
+            abs_tol=0.01,
+        )
+
+        # capex should be four
+        assert math.isclose(pyo.value(ipp.instance.var_capex), 3.0, abs_tol=1e-3)
+
+        # sdncf should be 0.75
+        assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), 0.75, abs_tol=1e-3)
+
+        # the objective function should be -2.25
+        assert math.isclose(pyo.value(ipp.instance.obj_f), -2.25, abs_tol=1e-3)
+                
+    # *************************************************************************
+    # *************************************************************************
+
+    def test_problem_increasing_exp_prices_infinite_capacity(self):
+        # assessment
+        q = 0
+        # time
+        number_intervals = 1
+        # periods
+        number_periods = 1
+
+        tf = EconomicTimeFrame(
+            discount_rate=0.0,
+            reporting_periods={q: (0,)},
+            reporting_period_durations={q: (365 * 24 * 3600,)},
+            time_intervals={q: (0,)},
+            time_interval_durations={q: (1,)},
+        )
+
+        # 2 nodes: one export, one regular
+        mynet = Network()
+
+        # import node
+        node_EXP = generate_pseudo_unique_key(mynet.nodes())
+        mynet.add_export_node(
+            node_key=node_EXP,
+            prices={
+                (q, p, k): ResourcePrice(prices=[1.0, 2.0], volumes=[0.25, None])
+                for p in range(number_periods)
+                for k in range(number_intervals)
+            },
+        )
+
+        # other nodes
+        node_A = 'A'
+        mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): -1.0})
+
+        # arc IA
+        arc_tech_IA = Arcs(
+            name="any",
+            efficiency={(q, 0): 0.5},
+            efficiency_reverse=None,
+            static_loss=None,
+            capacity=[3],
+            minimum_cost=[2],
+            specific_capacity_cost=1,
+            capacity_is_instantaneous=False,
+            validate=False,
+        )
+        mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_IA)
+
+        # identify node types
+        mynet.identify_node_types()
+        
+        # trigger the error
+        error_raised = False
+        try:
+            # no sos, regular time intervals
+            self.build_solve_ipp(
+                solver_options={},
+                perform_analysis=False,
+                plot_results=False,  # True,
+                print_solver_output=False,
+                time_frame=tf,
+                networks={"mynet": mynet},
+                static_losses_mode=True,  # just to reach a line,
+                mandatory_arcs=[],
+                max_number_parallel_arcs={},
+                simplify_problem=False,
+            )
+        except Exception:
+            error_raised = True
+        assert error_raised
+
+    # *************************************************************************
+    # *************************************************************************
+
+    def test_problem_increasing_imp_decreasing_exp_prices(self):
+        # scenario
+        q = 0
+        # time
+        number_intervals = 2
+        # periods
+        number_periods = 1
+
+        tf = EconomicTimeFrame(
+            discount_rate=0.0,
+            reporting_periods={q: (0,)},
+            reporting_period_durations={q: (365 * 24 * 3600,)},
+            time_intervals={q: (0,1)},
+            time_interval_durations={q: (1,1)},
+        )
+
+        # 3 nodes: one import, one export, one regular
+        mynet = Network()
+
+        # import node
+        node_IMP = 'I'
+        mynet.add_import_node(
+            node_key=node_IMP,
+            prices={
+                (q, p, k): ResourcePrice(prices=[1.0, 2.0], volumes=[0.5, None])
+                for p in range(number_periods)
+                for k in range(number_intervals)
+            },
+        )
+
+        # export node
+        node_EXP = generate_pseudo_unique_key(mynet.nodes())
+        mynet.add_export_node(
+            node_key=node_EXP,
+            prices={
+                (q, p, k): ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, None])
+                for p in range(number_periods)
+                for k in range(number_intervals)
+            },
+        )
+
+        # other nodes
+        node_A = 'A'
+        mynet.add_source_sink_node(
+            node_key=node_A, base_flow={(q, 0): 1.0, (q, 1): -1.0}
+        )
+
+        # arc IA
+        arc_tech_IA = Arcs(
+            name="any",
+            efficiency={(q, 0): 0.5, (q, 1): 0.5},
+            efficiency_reverse=None,
+            static_loss=None,
+            capacity=[3],
+            minimum_cost=[2],
+            specific_capacity_cost=1,
+            capacity_is_instantaneous=False,
+            validate=False,
+        )
+        mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA)
+
+        # arc AE
+        arc_tech_AE = Arcs(
+            name="any",
+            efficiency={(q, 0): 0.5, (q, 1): 0.5},
+            efficiency_reverse=None,
+            static_loss=None,
+            capacity=[3],
+            minimum_cost=[2],
+            specific_capacity_cost=1,
+            capacity_is_instantaneous=False,
+            validate=False,
+        )
+        mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_AE)
+
+        # identify node types
+        mynet.identify_node_types()
+
+        # no sos, regular time intervals
+        ipp = self.build_solve_ipp(
+            solver_options={},
+            perform_analysis=False,
+            plot_results=False,  # True,
+            print_solver_output=False,
+            time_frame=tf,
+            networks={"mynet": mynet},
+            static_losses_mode=True,  # just to reach a line,
+            mandatory_arcs=[],
+            max_number_parallel_arcs={},
+            simplify_problem=False,
+            # discount_rates={0: (0.0,)},
+        )
+
+        assert not ipp.has_peak_total_assessments()
+        assert ipp.results["Problem"][0]["Number of constraints"] == 23
+        assert ipp.results["Problem"][0]["Number of variables"] == 26
+        assert ipp.results["Problem"][0]["Number of nonzeros"] == 57
+
+        # *********************************************************************
+        # *********************************************************************
+
+        # validation
+
+        # the arc should be installed since it is required for feasibility
+        assert (
+            True
+            in ipp.networks["mynet"]
+            .edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH]
+            .options_selected
+        )
+        # the arc should be installed since it is required for feasibility
+        assert (
+            True
+            in ipp.networks["mynet"]
+            .edges[(node_A, node_EXP, 0)][Network.KEY_ARC_TECH]
+            .options_selected
+        )
+
+        # interval 0: import only
+        assert math.isclose(
+            pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]),
+            2.0,
+            abs_tol=1e-6,
+        )
+        assert math.isclose(
+            pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 0)]),
+            0.0,
+            abs_tol=1e-6,
+        )
+        # interval 1: export only
+        assert math.isclose(
+            pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 1)]),
+            0.0,
+            abs_tol=1e-6,
+        )
+        assert math.isclose(
+            pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 1)]),
+            1.0,
+            abs_tol=1e-6,
+        )
+
+        # IA amplitude
+        assert math.isclose(
+            pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]),
+            2.0,
+            abs_tol=0.01,
+        )
+        # AE amplitude
+        assert math.isclose(
+            pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_A, node_EXP, 0)]),
+            1.0,
+            abs_tol=0.01,
+        )
+
+        # capex should be 7.0: 4+3
+        assert math.isclose(pyo.value(ipp.instance.var_capex), 7.0, abs_tol=1e-3)
+
+        # sdncf should be -2.5: -3.5+1.0
+        assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -2.5, abs_tol=1e-3)
+
+        # the objective function should be -9.5: -7.5-2.5
+        assert math.isclose(pyo.value(ipp.instance.obj_f), -9.5, abs_tol=1e-3)
+
+            
+    # *************************************************************************
+    # *************************************************************************
+        
+    def test_direct_imp_exp_network_higher_exp_prices(self):
+        
+        # time frame
+        q = 0
+        tf = EconomicTimeFrame(
+            discount_rate=3.5/100,
+            reporting_periods={q: (0,1)},
+            reporting_period_durations={q: (365 * 24 * 3600,365 * 24 * 3600)},
+            time_intervals={q: (0,1)},
+            time_interval_durations={q: (1,1)},
+        )    
+        
+        # 4 nodes: one import, one export, two supply/demand nodes
+        mynet = Network()
+    
+        # import node
+        imp_node_key = 'thatimpnode'
+        imp_prices = {
+            qpk: ResourcePrice(
+                prices=0.5,
+                volumes=None,
+            )
+            for qpk in tf.qpk()
+            }
+        mynet.add_import_node(
+            node_key=imp_node_key,
+            prices=imp_prices
+        )
+    
+        # export node
+        exp_node_key = 'thatexpnode'
+        exp_prices = {
+            qpk: ResourcePrice(
+                prices=1.5,
+                volumes=None,
+            )
+            for qpk in tf.qpk()
+            }
+        mynet.add_export_node(
+            node_key=exp_node_key,
+            prices=exp_prices,
+        )
+        
+        # add arc without fixed losses from import node to export
+        arc_tech_IE = Arcs(
+            name="IE",
+            # efficiency=[1, 1, 1, 1],
+            efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1},
+            efficiency_reverse=None,
+            static_loss=None,
+            validate=False,
+            capacity=[0.5, 1.0, 2.0],
+            minimum_cost=[5, 5.1, 5.2],
+            specific_capacity_cost=1,
+            capacity_is_instantaneous=False,
+        )
+        mynet.add_directed_arc(
+            node_key_a=imp_node_key, node_key_b=exp_node_key, arcs=arc_tech_IE
+        )
+    
+        # identify node types
+        mynet.identify_node_types()
+    
+        # no sos, regular time intervals
+        ipp = self.build_solve_ipp(
+            solver_options={},
+            perform_analysis=False,
+            plot_results=False,  # True,
+            print_solver_output=False,
+            networks={"mynet": mynet},
+            time_frame=tf,
+            static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP,
+            mandatory_arcs=[],
+            max_number_parallel_arcs={}
+        )
+    
+        # export prices are higher: it makes sense to install the arc since the
+        # revenue (@ max. cap.) exceeds the cost of installing the arc
+
+        assert (
+            True
+            in ipp.networks["mynet"]
+            .edges[(imp_node_key, exp_node_key, 0)][Network.KEY_ARC_TECH]
+            .options_selected
+        )
+
+        # overview
+        (imports_qpk, 
+         exports_qpk, 
+         balance_qpk, 
+         import_costs_qpk, 
+         export_revenue_qpk, 
+         ncf_qpk, 
+         aggregate_static_demand_qpk,
+         aggregate_static_supply_qpk,
+         aggregate_static_balance_qpk) = statistics(ipp)
+
+        # there should be no imports
+
+        abs_tol = 1e-6
+        
+        abs_tol = 1e-3
+        imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
+        assert imports_qp > 0.0 - abs_tol
+
+        abs_tol = 1e-3
+        import_costs_qp = sum(import_costs_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
+        assert import_costs_qp > 0.0 - abs_tol
+
+        # there should be no exports
+
+        abs_tol = 1e-2
+
+        exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
+        export_revenue_qp = sum(export_revenue_qpk[(q, 0, k)] for k in tf.time_intervals[q])
+        assert exports_qp > 0.0 - abs_tol
+        assert export_revenue_qp > 0.0 - abs_tol
+
+        # the revenue should exceed the costs
+
+        abs_tol = 1e-2
+
+        assert (
+            export_revenue_qp > import_costs_qp - abs_tol
+        )
+
+        # the capex should be positive
+
+        abs_tol = 1e-6
+
+        assert pyo.value(ipp.instance.var_capex) > 0 - abs_tol
+        
+    # *************************************************************************
+    # *************************************************************************
+
+# *****************************************************************************
+# *****************************************************************************
\ No newline at end of file
diff --git a/tests/test_esipp_problem.py b/tests/test_esipp_problem.py
index 8cf5126c30f3986ec2034a22686ecc7a7f4f4458..3f69ee09d398608ebd37a34d7d1801926353ecf8 100644
--- a/tests/test_esipp_problem.py
+++ b/tests/test_esipp_problem.py
@@ -597,389 +597,6 @@ class TestESIPPProblem:
 
     # *************************************************************************
     # *************************************************************************
-
-    def test_problem_increasing_imp_prices(self):
-        
-        # assessment
-        q = 0
-
-        tf = EconomicTimeFrame(
-            discount_rate=0.0,
-            reporting_periods={q: (0,)},
-            reporting_period_durations={q: (365 * 24 * 3600,)},
-            time_intervals={q: (0,)},
-            time_interval_durations={q: (1,)},
-        )
-
-        # 2 nodes: one import, one regular
-        mynet = Network()
-
-        # import node
-        node_IMP = generate_pseudo_unique_key(mynet.nodes())
-        mynet.add_import_node(
-            node_key=node_IMP,
-            prices={
-                # (q, p, k): ResourcePrice(prices=[1.0, 2.0], volumes=[0.5, None])
-                # for p in range(number_periods)
-                # for k in range(number_intervals)
-                qpk: ResourcePrice(prices=[1.0, 2.0], volumes=[0.5, None])
-                for qpk in tf.qpk()
-            },
-        )
-
-        # other nodes
-        node_A = generate_pseudo_unique_key(mynet.nodes())
-        mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0})
-
-        # arc IA
-        arc_tech_IA = Arcs(
-            name="any",
-            efficiency={(q, 0): 0.5},
-            efficiency_reverse=None,
-            static_loss=None,
-            capacity=[3],
-            minimum_cost=[2],
-            specific_capacity_cost=1,
-            capacity_is_instantaneous=False,
-            validate=False,
-        )
-        mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA)
-
-        # identify node types
-        mynet.identify_node_types()
-
-        # no sos, regular time intervals
-        ipp = self.build_solve_ipp(
-            solver_options={},
-            perform_analysis=False,
-            plot_results=False,  # True,
-            print_solver_output=False,
-            time_frame=tf,
-            networks={"mynet": mynet},
-            static_losses_mode=True,  # just to reach a line,
-            mandatory_arcs=[],
-            max_number_parallel_arcs={},
-            simplify_problem=False
-        )
-
-        assert not ipp.has_peak_total_assessments()
-        assert ipp.results["Problem"][0]["Number of constraints"] == 10
-        assert ipp.results["Problem"][0]["Number of variables"] == 11
-        assert ipp.results["Problem"][0]["Number of nonzeros"] == 20
-
-        # *********************************************************************
-        # *********************************************************************
-
-        # validation
-
-        # the arc should be installed since it is required for feasibility
-        assert (
-            True
-            in ipp.networks["mynet"]
-            .edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH]
-            .options_selected
-        )
-
-        # the flows should be 1.0, 0.0 and 2.0
-        assert math.isclose(
-            pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]),
-            2.0,
-            abs_tol=1e-6,
-        )
-
-        # arc amplitude should be two
-        assert math.isclose(
-            pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]),
-            2.0,
-            abs_tol=0.01,
-        )
-
-        # capex should be four
-        assert math.isclose(pyo.value(ipp.instance.var_capex), 4.0, abs_tol=1e-3)
-
-        # sdncf should be -3.5
-        assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -3.5, abs_tol=1e-3)
-
-        # the objective function should be -7.5
-        assert math.isclose(pyo.value(ipp.instance.obj_f), -7.5, abs_tol=1e-3)
-
-    # *************************************************************************
-    # *************************************************************************
-
-    def test_problem_decreasing_exp_prices(self):
-        # assessment
-        q = 0
-        # time
-        number_intervals = 1
-        # periods
-        number_periods = 1
-
-        tf = EconomicTimeFrame(
-            discount_rate=0.0,
-            reporting_periods={q: (0,)},
-            reporting_period_durations={q: (365 * 24 * 3600,)},
-            time_intervals={q: (0,)},
-            time_interval_durations={q: (1,)},
-        )
-
-        # 2 nodes: one export, one regular
-        mynet = Network()
-
-        # import node
-        node_EXP = generate_pseudo_unique_key(mynet.nodes())
-        mynet.add_export_node(
-            node_key=node_EXP,
-            prices={
-                (q, p, k): ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, None])
-                for p in range(number_periods)
-                for k in range(number_intervals)
-            },
-        )
-
-        # other nodes
-        node_A = generate_pseudo_unique_key(mynet.nodes())
-        mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): -1.0})
-
-        # arc IA
-        arc_tech_IA = Arcs(
-            name="any",
-            efficiency={(q, 0): 0.5},
-            efficiency_reverse=None,
-            static_loss=None,
-            capacity=[3],
-            minimum_cost=[2],
-            specific_capacity_cost=1,
-            capacity_is_instantaneous=False,
-            validate=False,
-        )
-        mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_IA)
-
-        # identify node types
-        mynet.identify_node_types()
-
-        # no sos, regular time intervals
-        ipp = self.build_solve_ipp(
-            solver_options={},
-            perform_analysis=False,
-            plot_results=False,  # True,
-            print_solver_output=False,
-            time_frame=tf,
-            networks={"mynet": mynet},
-            static_losses_mode=True,  # just to reach a line,
-            mandatory_arcs=[],
-            max_number_parallel_arcs={},
-            simplify_problem=False,
-        )
-
-        assert not ipp.has_peak_total_assessments()
-        assert ipp.results["Problem"][0]["Number of constraints"] == 10
-        assert ipp.results["Problem"][0]["Number of variables"] == 11
-        assert ipp.results["Problem"][0]["Number of nonzeros"] == 20
-
-        # *********************************************************************
-        # *********************************************************************
-
-        # validation
-
-        # the arc should be installed since it is required for feasibility
-        assert (
-            True
-            in ipp.networks["mynet"]
-            .edges[(node_A, node_EXP, 0)][Network.KEY_ARC_TECH]
-            .options_selected
-        )
-
-        # the flows should be 1.0, 0.0 and 2.0
-        assert math.isclose(
-            pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 0)]),
-            1.0,
-            abs_tol=1e-6,
-        )
-
-        # arc amplitude should be two
-        assert math.isclose(
-            pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_A, node_EXP, 0)]),
-            1.0,
-            abs_tol=0.01,
-        )
-
-        # capex should be four
-        assert math.isclose(pyo.value(ipp.instance.var_capex), 3.0, abs_tol=1e-3)
-
-        # sdncf should be 1.0
-        assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), 1.0, abs_tol=1e-3)
-
-        # the objective function should be -7.5
-        assert math.isclose(pyo.value(ipp.instance.obj_f), -2.0, abs_tol=1e-3)
-
-    # *************************************************************************
-    # *************************************************************************
-
-    def test_problem_increasing_imp_decreasing_exp_prices(self):
-        # scenario
-        q = 0
-        # time
-        number_intervals = 2
-        # periods
-        number_periods = 1
-
-        tf = EconomicTimeFrame(
-            discount_rate=0.0,
-            reporting_periods={q: (0,)},
-            reporting_period_durations={q: (365 * 24 * 3600,)},
-            time_intervals={q: (0,1)},
-            time_interval_durations={q: (1,1)},
-        )
-
-        # 3 nodes: one import, one export, one regular
-        mynet = Network()
-
-        # import node
-        node_IMP = generate_pseudo_unique_key(mynet.nodes())
-        mynet.add_import_node(
-            node_key=node_IMP,
-            prices={
-                (q, p, k): ResourcePrice(prices=[1.0, 2.0], volumes=[0.5, None])
-                for p in range(number_periods)
-                for k in range(number_intervals)
-            },
-        )
-
-        # export node
-        node_EXP = generate_pseudo_unique_key(mynet.nodes())
-        mynet.add_export_node(
-            node_key=node_EXP,
-            prices={
-                (q, p, k): ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, None])
-                for p in range(number_periods)
-                for k in range(number_intervals)
-            },
-        )
-
-        # other nodes
-        node_A = generate_pseudo_unique_key(mynet.nodes())
-        mynet.add_source_sink_node(
-            node_key=node_A, base_flow={(q, 0): 1.0, (q, 1): -1.0}
-        )
-
-        # arc IA
-        arc_tech_IA = Arcs(
-            name="any",
-            efficiency={(q, 0): 0.5, (q, 1): 0.5},
-            efficiency_reverse=None,
-            static_loss=None,
-            capacity=[3],
-            minimum_cost=[2],
-            specific_capacity_cost=1,
-            capacity_is_instantaneous=False,
-            validate=False,
-        )
-        mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA)
-
-        # arc AE
-        arc_tech_AE = Arcs(
-            name="any",
-            efficiency={(q, 0): 0.5, (q, 1): 0.5},
-            efficiency_reverse=None,
-            static_loss=None,
-            capacity=[3],
-            minimum_cost=[2],
-            specific_capacity_cost=1,
-            capacity_is_instantaneous=False,
-            validate=False,
-        )
-        mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_AE)
-
-        # identify node types
-        mynet.identify_node_types()
-
-        # no sos, regular time intervals
-        ipp = self.build_solve_ipp(
-            solver_options={},
-            perform_analysis=False,
-            plot_results=False,  # True,
-            print_solver_output=False,
-            time_frame=tf,
-            networks={"mynet": mynet},
-            static_losses_mode=True,  # just to reach a line,
-            mandatory_arcs=[],
-            max_number_parallel_arcs={},
-            simplify_problem=False,
-            # discount_rates={0: (0.0,)},
-        )
-
-        assert not ipp.has_peak_total_assessments()
-        assert ipp.results["Problem"][0]["Number of constraints"] == 23
-        assert ipp.results["Problem"][0]["Number of variables"] == 26
-        assert ipp.results["Problem"][0]["Number of nonzeros"] == 57
-
-        # *********************************************************************
-        # *********************************************************************
-
-        # validation
-
-        # the arc should be installed since it is required for feasibility
-        assert (
-            True
-            in ipp.networks["mynet"]
-            .edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH]
-            .options_selected
-        )
-        # the arc should be installed since it is required for feasibility
-        assert (
-            True
-            in ipp.networks["mynet"]
-            .edges[(node_A, node_EXP, 0)][Network.KEY_ARC_TECH]
-            .options_selected
-        )
-
-        # interval 0: import only
-        assert math.isclose(
-            pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]),
-            2.0,
-            abs_tol=1e-6,
-        )
-        assert math.isclose(
-            pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 0)]),
-            0.0,
-            abs_tol=1e-6,
-        )
-        # interval 1: export only
-        assert math.isclose(
-            pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 1)]),
-            0.0,
-            abs_tol=1e-6,
-        )
-        assert math.isclose(
-            pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 1)]),
-            1.0,
-            abs_tol=1e-6,
-        )
-
-        # IA amplitude
-        assert math.isclose(
-            pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]),
-            2.0,
-            abs_tol=0.01,
-        )
-        # AE amplitude
-        assert math.isclose(
-            pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_A, node_EXP, 0)]),
-            1.0,
-            abs_tol=0.01,
-        )
-
-        # capex should be 7.0: 4+3
-        assert math.isclose(pyo.value(ipp.instance.var_capex), 7.0, abs_tol=1e-3)
-
-        # sdncf should be -2.5: -3.5+1.0
-        assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -2.5, abs_tol=1e-3)
-
-        # the objective function should be -9.5: -7.5-2.5
-        assert math.isclose(pyo.value(ipp.instance.obj_f), -9.5, abs_tol=1e-3)
-
-    # *************************************************************************
-    # *************************************************************************
     
     def test_problem_two_scenarios(self):
         
@@ -3745,141 +3362,6 @@ class TestESIPPProblem:
         # there should be no capex
         abs_tol = 1e-6
         assert math.isclose(pyo.value(ipp.instance.var_capex), 0.0, abs_tol=abs_tol)
-            
-    # *************************************************************************
-    # *************************************************************************
-        
-    def test_direct_imp_exp_network_higher_exp_prices(self):
-        
-        # time frame
-        q = 0
-        tf = EconomicTimeFrame(
-            discount_rate=3.5/100,
-            reporting_periods={q: (0,1)},
-            reporting_period_durations={q: (365 * 24 * 3600,365 * 24 * 3600)},
-            time_intervals={q: (0,1)},
-            time_interval_durations={q: (1,1)},
-        )    
-        
-        # 4 nodes: one import, one export, two supply/demand nodes
-        mynet = Network()
-    
-        # import node
-        imp_node_key = 'thatimpnode'
-        imp_prices = {
-            qpk: ResourcePrice(
-                prices=0.5,
-                volumes=None,
-            )
-            for qpk in tf.qpk()
-            }
-        mynet.add_import_node(
-            node_key=imp_node_key,
-            prices=imp_prices
-        )
-    
-        # export node
-        exp_node_key = 'thatexpnode'
-        exp_prices = {
-            qpk: ResourcePrice(
-                prices=1.5,
-                volumes=None,
-            )
-            for qpk in tf.qpk()
-            }
-        mynet.add_export_node(
-            node_key=exp_node_key,
-            prices=exp_prices,
-        )
-        
-        # add arc without fixed losses from import node to export
-        arc_tech_IE = Arcs(
-            name="IE",
-            # efficiency=[1, 1, 1, 1],
-            efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1},
-            efficiency_reverse=None,
-            static_loss=None,
-            validate=False,
-            capacity=[0.5, 1.0, 2.0],
-            minimum_cost=[5, 5.1, 5.2],
-            specific_capacity_cost=1,
-            capacity_is_instantaneous=False,
-        )
-        mynet.add_directed_arc(
-            node_key_a=imp_node_key, node_key_b=exp_node_key, arcs=arc_tech_IE
-        )
-    
-        # identify node types
-        mynet.identify_node_types()
-    
-        # no sos, regular time intervals
-        ipp = self.build_solve_ipp(
-            solver_options={},
-            perform_analysis=False,
-            plot_results=False,  # True,
-            print_solver_output=False,
-            networks={"mynet": mynet},
-            time_frame=tf,
-            static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP,
-            mandatory_arcs=[],
-            max_number_parallel_arcs={}
-        )
-    
-        # export prices are higher: it makes sense to install the arc since the
-        # revenue (@ max. cap.) exceeds the cost of installing the arc
-
-        assert (
-            True
-            in ipp.networks["mynet"]
-            .edges[(imp_node_key, exp_node_key, 0)][Network.KEY_ARC_TECH]
-            .options_selected
-        )
-
-        # overview
-        (imports_qpk, 
-         exports_qpk, 
-         balance_qpk, 
-         import_costs_qpk, 
-         export_revenue_qpk, 
-         ncf_qpk, 
-         aggregate_static_demand_qpk,
-         aggregate_static_supply_qpk,
-         aggregate_static_balance_qpk) = statistics(ipp)
-
-        # there should be no imports
-
-        abs_tol = 1e-6
-        
-        abs_tol = 1e-3
-        imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
-        assert imports_qp > 0.0 - abs_tol
-
-        abs_tol = 1e-3
-        import_costs_qp = sum(import_costs_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
-        assert import_costs_qp > 0.0 - abs_tol
-
-        # there should be no exports
-
-        abs_tol = 1e-2
-
-        exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
-        export_revenue_qp = sum(export_revenue_qpk[(q, 0, k)] for k in tf.time_intervals[q])
-        assert exports_qp > 0.0 - abs_tol
-        assert export_revenue_qp > 0.0 - abs_tol
-
-        # the revenue should exceed the costs
-
-        abs_tol = 1e-2
-
-        assert (
-            export_revenue_qp > import_costs_qp - abs_tol
-        )
-
-        # the capex should be positive
-
-        abs_tol = 1e-6
-
-        assert pyo.value(ipp.instance.var_capex) > 0 - abs_tol
         
     # *************************************************************************
     # *************************************************************************
diff --git a/tests/test_esipp_resource.py b/tests/test_esipp_resource.py
index 21cd2ba252d98afd58c2c9d63e4ee557721a03b3..0fc4a96bbdfb4984c05adfba2b83bb617251a8dc 100644
--- a/tests/test_esipp_resource.py
+++ b/tests/test_esipp_resource.py
@@ -132,8 +132,8 @@ class TestResourcePrice:
         volumes = None
         res_p1 = ResourcePrice(prices=prices, volumes=volumes)
         res_p2 = ResourcePrice(prices=[prices], volumes=[volumes])
-        assert res_p1.is_equivalent(res_p2)
-        assert res_p2.is_equivalent(res_p1)
+        assert res_p1 == res_p2
+        assert res_p2 == res_p1
 
         # *********************************************************************
 
@@ -144,8 +144,8 @@ class TestResourcePrice:
         volumes = None
         res_p1 = ResourcePrice(prices=prices, volumes=volumes)
         res_p2 = ResourcePrice(prices=[prices + 1], volumes=[volumes])
-        assert not res_p1.is_equivalent(res_p2)
-        assert not res_p2.is_equivalent(res_p1)
+        assert not res_p1 == res_p2
+        assert not res_p2 == res_p1
 
         # *********************************************************************
 
@@ -156,8 +156,8 @@ class TestResourcePrice:
         volumes = None
         res_p1 = ResourcePrice(prices=prices, volumes=volumes)
         res_p2 = ResourcePrice(prices=prices, volumes=volumes)
-        assert res_p1.is_equivalent(res_p2)
-        assert res_p2.is_equivalent(res_p1)
+        assert res_p1 == res_p2
+        assert res_p2 == res_p1
 
         # *********************************************************************
 
@@ -168,8 +168,8 @@ class TestResourcePrice:
         volumes = None
         res_p1 = ResourcePrice(prices=prices, volumes=volumes)
         res_p2 = ResourcePrice(prices=prices + 1, volumes=volumes)
-        assert not res_p1.is_equivalent(res_p2)
-        assert not res_p2.is_equivalent(res_p1)
+        assert not res_p1 == res_p2
+        assert not res_p2 == res_p1
 
         # *********************************************************************
         # *********************************************************************
@@ -183,8 +183,8 @@ class TestResourcePrice:
         volumes = 1
         res_p1 = ResourcePrice(prices=prices, volumes=volumes)
         res_p2 = ResourcePrice(prices=[prices], volumes=[volumes])
-        assert res_p1.is_equivalent(res_p2)
-        assert res_p2.is_equivalent(res_p1)
+        assert res_p1 == res_p2
+        assert res_p2 == res_p1
 
         # *********************************************************************
 
@@ -195,8 +195,8 @@ class TestResourcePrice:
         volumes = 1
         res_p1 = ResourcePrice(prices=prices, volumes=volumes)
         res_p2 = ResourcePrice(prices=[prices + 1], volumes=[volumes])
-        assert not res_p1.is_equivalent(res_p2)
-        assert not res_p2.is_equivalent(res_p1)
+        assert not res_p1 == res_p2
+        assert not res_p2 == res_p1
 
         # *********************************************************************
 
@@ -207,8 +207,8 @@ class TestResourcePrice:
         volumes = 1
         res_p1 = ResourcePrice(prices=prices, volumes=volumes)
         res_p2 = ResourcePrice(prices=prices, volumes=volumes)
-        assert res_p1.is_equivalent(res_p2)
-        assert res_p2.is_equivalent(res_p1)
+        assert res_p1 == res_p2
+        assert res_p2 == res_p1
 
         # *********************************************************************
 
@@ -219,8 +219,8 @@ class TestResourcePrice:
         volumes = 1
         res_p1 = ResourcePrice(prices=prices, volumes=volumes)
         res_p2 = ResourcePrice(prices=prices + 1, volumes=volumes)
-        assert not res_p1.is_equivalent(res_p2)
-        assert not res_p2.is_equivalent(res_p1)
+        assert not res_p1 == res_p2
+        assert not res_p2 == res_p1
 
         # *********************************************************************
 
@@ -231,8 +231,8 @@ class TestResourcePrice:
         volumes = 1
         res_p1 = ResourcePrice(prices=prices, volumes=volumes)
         res_p2 = ResourcePrice(prices=[prices], volumes=[volumes + 1])
-        assert not res_p1.is_equivalent(res_p2)
-        assert not res_p2.is_equivalent(res_p1)
+        assert not res_p1 == res_p2
+        assert not res_p2 == res_p1
 
         # *********************************************************************
 
@@ -243,8 +243,8 @@ class TestResourcePrice:
         volumes = 1
         res_p1 = ResourcePrice(prices=prices, volumes=volumes)
         res_p2 = ResourcePrice(prices=prices, volumes=volumes + 1)
-        assert not res_p1.is_equivalent(res_p2)
-        assert not res_p2.is_equivalent(res_p1)
+        assert not res_p1 == res_p2
+        assert not res_p2 == res_p1
 
         # *********************************************************************
 
@@ -255,8 +255,8 @@ class TestResourcePrice:
         volumes = 1
         res_p1 = ResourcePrice(prices=prices, volumes=volumes)
         res_p2 = ResourcePrice(prices=[prices], volumes=[None])
-        assert not res_p1.is_equivalent(res_p2)
-        assert not res_p2.is_equivalent(res_p1)
+        assert not res_p1 == res_p2
+        assert not res_p2 == res_p1
 
         # *********************************************************************
 
@@ -267,8 +267,8 @@ class TestResourcePrice:
         volumes = 1
         res_p1 = ResourcePrice(prices=prices, volumes=volumes)
         res_p2 = ResourcePrice(prices=prices, volumes=None)
-        assert not res_p1.is_equivalent(res_p2)
-        assert not res_p2.is_equivalent(res_p1)
+        assert not res_p1 == res_p2
+        assert not res_p2 == res_p1
 
         # *********************************************************************
         # *********************************************************************
@@ -294,8 +294,8 @@ class TestResourcePrice:
         volumes = [1, None]
         res_p1 = ResourcePrice(prices=prices, volumes=volumes)
         res_p2 = ResourcePrice(prices=prices, volumes=volumes)
-        assert res_p1.is_equivalent(res_p2)
-        assert res_p2.is_equivalent(res_p1)
+        assert res_p1 == res_p2
+        assert res_p2 == res_p1
 
         # two segments, no volume limit, same format
         # prices do not match = False
@@ -306,8 +306,8 @@ class TestResourcePrice:
         prices = [2, 3]
         volumes = [1, None]
         res_p2 = ResourcePrice(prices=prices, volumes=volumes)
-        assert not res_p1.is_equivalent(res_p2)
-        assert not res_p2.is_equivalent(res_p1)
+        assert not res_p1 == res_p2
+        assert not res_p2 == res_p1
 
         # *********************************************************************
 
@@ -320,8 +320,8 @@ class TestResourcePrice:
         volumes = [1, 3]
         res_p1 = ResourcePrice(prices=prices, volumes=volumes)
         res_p2 = ResourcePrice(prices=prices, volumes=volumes)
-        assert res_p1.is_equivalent(res_p2)
-        assert res_p2.is_equivalent(res_p1)
+        assert res_p1 == res_p2
+        assert res_p2 == res_p1
 
         # two segments, volume limit, same format: False
         # prices do not match = False
@@ -332,8 +332,8 @@ class TestResourcePrice:
         prices = [1, 4]
         volumes = [1, 4]
         res_p2 = ResourcePrice(prices=prices, volumes=volumes)
-        assert not res_p1.is_equivalent(res_p2)
-        assert not res_p2.is_equivalent(res_p1)
+        assert not res_p1 == res_p2
+        assert not res_p2 == res_p1
 
         # *********************************************************************
 
@@ -348,8 +348,8 @@ class TestResourcePrice:
         prices = [1, 3]
         volumes = [1, 5]
         res_p2 = ResourcePrice(prices=prices, volumes=volumes)
-        assert not res_p1.is_equivalent(res_p2)
-        assert not res_p2.is_equivalent(res_p1)
+        assert not res_p1 == res_p2
+        assert not res_p2 == res_p1
 
         # single segment, volume limit, same format
         # volumes do not match = False
@@ -360,8 +360,8 @@ class TestResourcePrice:
         prices = [1, 3]
         volumes = [1, None]
         res_p2 = ResourcePrice(prices=prices, volumes=volumes)
-        assert not res_p1.is_equivalent(res_p2)
-        assert not res_p2.is_equivalent(res_p1)
+        assert not res_p1 == res_p2
+        assert not res_p2 == res_p1
 
         # *********************************************************************
         # *********************************************************************
@@ -374,8 +374,8 @@ class TestResourcePrice:
         prices = [1, 3, 5]
         volumes = [1, 4, None]
         res_p2 = ResourcePrice(prices=prices, volumes=volumes)
-        assert not res_p1.is_equivalent(res_p2)
-        assert not res_p2.is_equivalent(res_p1)
+        assert not res_p1 == res_p2
+        assert not res_p2 == res_p1
 
         # *********************************************************************
         # *********************************************************************