Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • pmag/topupopt
1 result
Show changes
Commits on Source (15)
Showing
with 5818 additions and 2785 deletions
......@@ -5,6 +5,8 @@ import numpy as np
from geopandas import GeoDataFrame
from ...misc.utils import discrete_sinusoid_matching_integral
from ...misc.utils import create_profile_using_time_weighted_state
from ...misc.utils import generate_manual_state_correlated_profile
from ...misc.utils import generate_state_correlated_profile, generate_profile
from .bbr import label_bbr_entrance_id, label_bbr_housing_area
# *****************************************************************************
......@@ -60,12 +62,15 @@ def heat_demand_dict_by_building_entrance(
number_intervals: int,
time_interval_durations: list,
bdg_specific_demand: dict,
bdg_ratio_min_max: dict,
bdg_min_max_ratio: dict,
bdg_demand_phase_shift: dict = None,
key_osm_entr_id: str = label_osm_entrance_id,
key_bbr_entr_id: str = label_bbr_entrance_id,
avg_state: list = None,
state_correlates_with_output: bool = False,
deviation_gain: float = 1.0,
solver: str = 'glpk',
**kwargs
) -> dict:
# initialise dict for each building entrance
......@@ -84,11 +89,8 @@ def heat_demand_dict_by_building_entrance(
# for each building
for building_index in building_indexes:
# get relevant data
# base_load_avg_ratio = 0.3
# specific_demand = 107 # kWh/m2/year
area = gdf_buildings.loc[building_index][label_bbr_housing_area]
# estimate its demand
# generate the profile
if type(avg_state) == type(None):
# ignore states
heat_demand_profiles.append(
......@@ -96,7 +98,7 @@ def heat_demand_dict_by_building_entrance(
discrete_sinusoid_matching_integral(
bdg_specific_demand[building_index] * area,
time_interval_durations=time_interval_durations,
min_to_max_ratio=bdg_ratio_min_max[building_index],
min_max_ratio=bdg_min_max_ratio[building_index],
phase_shift_radians=(
bdg_demand_phase_shift[building_index]
),
......@@ -104,21 +106,49 @@ def heat_demand_dict_by_building_entrance(
)
)
else:
# states matter
elif type(deviation_gain) == type(None):
# states matter but the gain must be determined
# heat_demand_profiles.append(
# np.array(
# create_profile_using_time_weighted_state(
# integration_result=(
# bdg_specific_demand[building_index] * area
# ),
# avg_state=avg_state,
# time_interval_durations=time_interval_durations,
# min_max_ratio=bdg_min_max_ratio[building_index],
# state_correlates_with_output=state_correlates_with_output,
# )
# )
# )
heat_demand_profiles.append(
np.array(
create_profile_using_time_weighted_state(
generate_state_correlated_profile(
integration_result=(
bdg_specific_demand[building_index] * area
),
avg_state=avg_state,
),
states=avg_state,
time_interval_durations=time_interval_durations,
min_to_max_ratio=bdg_ratio_min_max[building_index],
state_correlates_with_output=state_correlates_with_output,
states_correlate_profile=state_correlates_with_output,
min_max_ratio=bdg_min_max_ratio[building_index],
solver=solver
)
)
)
else:
# states matter and the gain is predefined
heat_demand_profiles.append(
np.array(
generate_manual_state_correlated_profile(
integration_result=(
bdg_specific_demand[building_index] * area
),
states=avg_state,
time_interval_durations=time_interval_durations,
deviation_gain=deviation_gain
)
)
)
)
# add the profiles, time step by time step
if len(heat_demand_profiles) == 0:
......@@ -132,10 +162,74 @@ def heat_demand_dict_by_building_entrance(
# return
return demand_dict
# *****************************************************************************
# *****************************************************************************
# TODO: allow reusing the gain
def heat_demand_profiles(
gdf_osm: GeoDataFrame,
gdf_buildings: GeoDataFrame,
time_interval_durations: list,
assessments: list,
annual_heat_demand: dict,
air_temperature: dict = None,
reuse_deviation_gain: bool = True,
**kwargs
) -> dict:
# calculate the total area
total_area = total_heating_area(gdf_osm, gdf_buildings)
# initialise data dict
heat_demand_dict = {}
# for each building entrance
for osm_index in gdf_osm.index:
# initialise dict for each building entrance
bdg_entr_dict = {}
# find the indexes for each building leading to the curr. cons. point
building_indexes = gdf_buildings[
gdf_buildings[label_bbr_entrance_id] == gdf_osm.loc[osm_index][label_osm_entrance_id]
].index
for q in assessments:
# define the specific heat demand
specific_demand = annual_heat_demand[q]/total_area
#
# initialise dict for each building consumption point
bdg_profile_list = []
# for each building
for building_index in building_indexes:
# get relevant data
area = gdf_buildings.loc[building_index][label_bbr_housing_area]
# handle states
if type(air_temperature) != type(None):
kwargs['states'] = air_temperature[q]
# append the profile for each building to the list
_profile = generate_profile(
integration_result=specific_demand*area,
time_interval_durations=time_interval_durations,
**kwargs
# min_max_ratio,
# states,
# states_correlate_profile,
# solver,
# deviation_gain
)
bdg_profile_list.append(np.array(_profile))
# aggregate profiles for the same building entrance
bdg_entr_profile = (
[]
if len(bdg_profile_list) == 0 else
sum(profile for profile in bdg_profile_list)
)
# store the demand profile
bdg_entr_dict[q] = bdg_entr_profile
# add to the main dict
heat_demand_dict[osm_index] = bdg_entr_dict
return heat_demand_dict, total_area
# *****************************************************************************
# *****************************************************************************
def total_heating_area(
gdf_osm: GeoDataFrame,
......@@ -156,6 +250,5 @@ def total_heating_area(
area += gdf_buildings.loc[building_index][label_bbr_housing_area]
return area
# *****************************************************************************
# *****************************************************************************
\ No newline at end of file
......@@ -1090,7 +1090,7 @@ def is_path_straight(
def find_simplifiable_paths(
network: nx.MultiDiGraph,
excluded_nodes: list,
protected_nodes: list,
ignore_self_loops: bool = False,
consider_reversed_edges: bool = False,
include_both_directions: bool = False,
......@@ -1106,7 +1106,7 @@ def find_simplifiable_paths(
----------
network : nx.MultiDiGraph
The object describing the graph.
excluded_nodes : list
protected_nodes : list
A list of keys for nodes that cannot be in any straight path.
ignore_self_loops : bool, optional
If True, paths including self-loops can still be straight. If False,
......@@ -1139,7 +1139,7 @@ def find_simplifiable_paths(
node_key
for node_key in network.nodes()
# the node cannot be among those excluded
if node_key not in excluded_nodes
if node_key not in protected_nodes
# the node has to be linked to two other nodes other than itself
if len(set(neighbours(network, node_key, ignore_self_loops=True))) == 2
# exclude nodes with self-loops if desired:
......
......@@ -28,7 +28,7 @@ from .calculate import update_street_count, edge_lengths
# *****************************************************************************
def remove_self_loops(network: nx.MultiDiGraph):
def remove_self_loops(network: nx.MultiDiGraph) -> list:
"""
Removes self-loops from a directed graph defined in a MultiDiGraph object.
......@@ -39,11 +39,11 @@ def remove_self_loops(network: nx.MultiDiGraph):
Returns
-------
generator-expression
list
The keys to the nodes whose self-loops were removed.
"""
selflooping_nodes = list(gis_iden.find_self_loops(network))
for node in selflooping_nodes:
while network.has_edge(u=node, v=node):
......@@ -276,7 +276,9 @@ def transform_roundabouts_into_crossroads(
def remove_dead_ends(
network: nx.MultiDiGraph, keepers: tuple = None, max_iterations: int = 1
network: nx.MultiDiGraph,
protected_nodes: list = None,
max_iterations: int = 1
) -> list:
"""
Removes dead ends (non-cyclical branches) from a directed graph.
......@@ -288,7 +290,7 @@ def remove_dead_ends(
----------
network : nx.MultiDiGraph
The object describing the directed graph.
keepers : tuple, optional
protected_nodes : list, optional
A list of keys for the nodes that are not to be considered for removal.
The default is None, which means all nodes are under consideration.
max_iterations : int, optional
......@@ -301,8 +303,8 @@ def remove_dead_ends(
"""
if type(keepers) == type(None):
keepers = []
if type(protected_nodes) == type(None):
protected_nodes = []
# while true
nodes_removed = []
......@@ -313,7 +315,7 @@ def remove_dead_ends(
target_nodes = [
node_key
for node_key in network.nodes()
if node_key not in keepers
if node_key not in protected_nodes
# if it has at most one neighbour other than itself
if len(set(gis_iden.neighbours(network, node_key, ignore_self_loops=True)))
<= 1
......@@ -505,20 +507,30 @@ def replace_path(network: nx.MultiDiGraph, path: list) -> tuple:
def remove_longer_parallel_edges(
network: nx.MultiDiGraph, ignore_edge_directions: bool = False
network: nx.MultiDiGraph,
distance_key: str = osm.KEY_OSMNX_LENGTH,
ignore_edge_directions: bool = True,
protected_edges: list = None
) -> list:
"""
Removes longer parallel edges from the network.
Parallel edges are those connecting the same nodes in the same direction. If
there are parallel edges between any given pair of nodes, the longer ones
will be removed. If desired, edge directions can be ignored. In that case,
only the shortest edge between the same pair of nodes will be retained.
Parallel edges refer to multiple edges connecting the same nodes. If there
are parallel edges between any given pair of nodes, only the shortest one
will be retained. If desired, edge directions can be ignored. By default,
edge directions are taken into account when selecting parallel edges.
Parameters
----------
network : nx.MultiDiGraph
The object describing the graph.
distance_key : str, optional
The key used to obtain distances. The default is osm.KEY_OSMNX_LENGTH.
ignore_edge_directions : bool, optional
If True, edge directions are ignored. The default is True.
protected_edges : list, optional
A list of edges that should be retained. The default is None, which
means all edges are elligible.
Returns
-------
......@@ -526,12 +538,9 @@ def remove_longer_parallel_edges(
A list of the edges removed.
"""
# redundancy: having more than one edge between two nodes
# solution: remove the longest one, leave the shortest one
# for each node pair
if type(protected_edges) == type(None):
# default: empty list
protected_edges = []
edges_removed = []
for node_one in network.nodes():
for node_two in network.nodes():
......@@ -547,28 +556,31 @@ def remove_longer_parallel_edges(
list_edges = gis_iden.get_edges_from_a_to_b(
network, node_start=node_one, node_end=node_two
)
# if none exist, skip
if len(list_edges) == 0:
continue
# otherwise, find out which is the shortest one
# otherwise, sort them by distance (shorter distances first)
# note: protected edges are considered in the sorting too
sorted_edges = sorted(
(network.edges[edge_key][osm.KEY_OSMNX_LENGTH], edge_key)
(network.edges[edge_key][distance_key], edge_key)
for edge_key in list_edges
)
network.remove_edges_from(edge_tuple[1] for edge_tuple in sorted_edges[1:])
edges_removed.extend(edge_tuple[1] for edge_tuple in sorted_edges[1:])
# edges to be removed (drop protected edges here)
edges_for_removal = tuple(
edge_tuple[1]
for edge_tuple in sorted_edges[1:]
if edge_tuple[1] not in protected_edges
)
# remove all but the shortest edge
network.remove_edges_from(edges_for_removal)
# update the list of edges removed
edges_removed.extend(edges_for_removal)
# return statement
return edges_removed
# *****************************************************************************
# *****************************************************************************
def merge_points_into_linestring(
line: LineString,
points: tuple or list,
......
......@@ -11,8 +11,9 @@ from networkx import MultiDiGraph, MultiGraph
from pandas import MultiIndex, Series
from numpy import float64, int64
from geopandas import GeoDataFrame, read_file
from shapely.geometry import Point
from shapely.geometry import Point, LineString
import contextily as cx
from shapely import intersects
# local, internal
......@@ -740,10 +741,13 @@ def get_directed(
def simplify_network(
network: MultiDiGraph,
protected_nodes: list,
protected_nodes: list = None,
protected_edges: list = None,
dead_end_probing_depth: int = 5,
remove_opposite_parallel_edges: bool = False,
ignore_edge_directions: bool = True,
update_street_count_per_node: bool = True,
transform_roundabouts: bool = False,
max_number_iterations: int = 5,
**roundabout_conditions
):
"""
......@@ -754,14 +758,21 @@ def simplify_network(
network : MultiDiGraph
The object describing the network.
protected_nodes : list
A list with the keys for the nodes that must be preserved.
dead_end_probing_depth: int
The maximum number of nodes a dead end can have to be detectable.
remove_opposite_parallel_edges : bool, optional
If True, longer parallel edges in opposite directions are also removed.
The default is False.
The keys for the nodes that must be preserved.
protected_edges : list
The keys for the edges that must be preserved.
dead_end_probing_depth : int, optional
The maximum number of nodes a dead end can have to be detectable. The
default is 5.
ignore_edge_directions : bool, optional
If True, direction is ignored in the search for parallel edges and
simplifiable paths. The default is True.
update_street_count_per_node : bool, optional
If True, updates the street count on each node. The default is True.
transform_roundabouts : bool, optional
If True, roundabouts are to be transformed. The default is False.
max_number_iterations : int, optional
The maximum number of iterations. The default is 5.
**roundabout_conditions : keyword and value pairs
The conditions used to define which roundabouts are simplified.
......@@ -770,27 +781,60 @@ def simplify_network(
None.
"""
# 1) remove dead ends (tends to create straight paths)
gis_mod.remove_dead_ends(
network, protected_nodes, max_iterations=dead_end_probing_depth
)
# 2) remove longer parallel edges (tends to create straight paths)
gis_mod.remove_longer_parallel_edges(
network, ignore_edge_directions=remove_opposite_parallel_edges
)
# 3) remove self loops (tends to create straight paths and dead ends)
gis_mod.remove_self_loops(network)
# 4) join segments (can create self-loops)
simplifiable_paths = gis_iden.find_simplifiable_paths(network, protected_nodes)
for path in simplifiable_paths:
gis_mod.replace_path(network, path)
# 4) remove self loops (tends to create straight paths and dead ends)
gis_mod.remove_self_loops(network)
# 5) transform roundabouts into crossroads (can create straight paths)
list_roundabout_nodes = gis_iden.find_roundabouts(network, **roundabout_conditions)
gis_mod.transform_roundabouts_into_crossroads(network, list_roundabout_nodes)
# 6) update street count
if type(protected_nodes) == type(None):
protected_nodes = []
if type(protected_edges) == type(None):
protected_edges = []
else:
# if there are protected edges, then the nodes involved in those edges
# must also be preserved, otherwise they can be removed too
protected_nodes.extend(
# set(aaa for aa in a for aaa in aa[0:-1])
set(nn for nnn in protected_edges for nn in nnn[0:-1])
)
iteration_counter = 0
while iteration_counter < max_number_iterations:
# remove self loops (can create straight paths and dead ends)
looping_node_keys = gis_mod.remove_self_loops(network)
# remove longer parallel edges (can create dead ends and straight paths)
edge_keys = gis_mod.remove_longer_parallel_edges(
network,
ignore_edge_directions=ignore_edge_directions,
protected_edges=protected_edges,
)
# remove dead ends (can create straight paths)
node_keys = gis_mod.remove_dead_ends(
network,
protected_nodes=protected_nodes,
max_iterations=dead_end_probing_depth
)
# join segments (can create self-loops and parallel edges)
paths = gis_iden.find_simplifiable_paths(
network,
protected_nodes=protected_nodes,
consider_reversed_edges=ignore_edge_directions)
for path in paths:
gis_mod.replace_path(network, path)
# update iteration counter
iteration_counter += 1
# check if it makes sense to break out of the loop
if (len(looping_node_keys) == 0 and
len(edge_keys) == 0 and
len(paths) == 0 and
len(node_keys) == 0):
# no self-loops
# no edges were removed
# no paths were simplified
# no nodes were removed
break
# transform roundabouts into crossroads (can create straight paths)
if transform_roundabouts:
list_roundabout_nodes = gis_iden.find_roundabouts(network, **roundabout_conditions)
gis_mod.transform_roundabouts_into_crossroads(network, list_roundabout_nodes)
# update street count
if update_street_count_per_node:
gis_calc.update_street_count(network)
......@@ -1186,6 +1230,90 @@ def convert_edge_path(
# return statement
return node_path
# *****************************************************************************
# *****************************************************************************
def create_edge_geometry(
network: MultiDiGraph,
edge_key,
x_key = osm.KEY_OSMNX_X,
y_key = osm.KEY_OSMNX_Y) -> LineString:
"Returns a newly-created geometry for a given edge."
return LineString(
[(network.nodes[edge_key[0]][x_key],
network.nodes[edge_key[0]][y_key]),
(network.nodes[edge_key[1]][x_key],
network.nodes[edge_key[1]][y_key])]
)
# *****************************************************************************
# *****************************************************************************
def find_overlapping_edges(
network: MultiDiGraph,
excluded_edges: list = None
) -> list:
"""
Returns a list of key pairs for edges whose geometries overlap.
Parameters
----------
network : MultiDiGraph
The object describing the network.
excluded_edges : list, optional
A list of edges that should not be considered. The default is None, in
which case all edges in the network object will be considered.
Returns
-------
list
A list containing key pairs for overlapping edges.
"""
# check if there are excluded edges
if type(excluded_edges) == type(None):
excluded_edges = list()
# initialise the list of edges to check
edges = list(
edge_key
for edge_key in network.edges(keys=True)
if edge_key not in excluded_edges
)
visited_edges = []
out = []
# for each edge
for edge_key in edges:
# remove the current edge so it is not considered again
visited_edges.append(edge_key)
# for each other edge
for other_edge_key in edges:
# for each other edge
# skip edges having nodes in common
# this will also skip identical edges
if edge_key[0] in other_edge_key[0:2] or edge_key[1] in other_edge_key[0:2]:
# has nodes in common, skip
continue
# skip edges that have already been considered in the first loop
if other_edge_key in visited_edges:
# this edge has already been tested against all other edges
continue
# first edge
if osm.KEY_OSMNX_GEOMETRY in network.edges[edge_key]:
first_geo = network.edges[edge_key][osm.KEY_OSMNX_GEOMETRY]
else:
first_geo = create_edge_geometry(network, edge_key)
# second edge
if osm.KEY_OSMNX_GEOMETRY in network.edges[other_edge_key]:
second_geo = network.edges[other_edge_key][osm.KEY_OSMNX_GEOMETRY]
else:
second_geo = create_edge_geometry(network, other_edge_key)
# check if they intersect
if intersects(first_geo, second_geo):
# they do, add tuple of the edges to the output
out.append((edge_key, other_edge_key))
# return tuples of overlapping edges
return out
# *****************************************************************************
# *****************************************************************************
......@@ -2,14 +2,11 @@
# *****************************************************************************
# standard
import uuid
import math
from statistics import mean
# local, external
import pyomo.environ as pyo
# *****************************************************************************
# *****************************************************************************
......@@ -40,7 +37,7 @@ def generate_pseudo_unique_key(key_list: tuple, max_iterations: int = 10) -> str
def discrete_sinusoid_matching_integral(
integration_result: float,
time_interval_durations: list,
min_to_max_ratio: float,
min_max_ratio: float,
phase_shift_radians: float = None,
) -> list:
"""
......@@ -57,7 +54,7 @@ def discrete_sinusoid_matching_integral(
where:
a = b*(1-min_to_max_ratio)/(1+min_to_max_ratio)
a = b*(1-min_max_ratio)/(1+min_max_ratio)
b = integration_result/integration_period
......@@ -71,7 +68,7 @@ def discrete_sinusoid_matching_integral(
The result of integrating the sinusoidal function for one period.
time_interval_durations : list
The time interval durations for each sample.
min_to_max_ratio : float
min_max_ratio : float
The ratio between the minimum and maximum values of the function.
phase_shift_radians : float, optional
The phase shift for the sinusoidal function. The default is None, which
......@@ -90,7 +87,7 @@ def discrete_sinusoid_matching_integral(
b = integration_result / integration_period
a = b * (1 - min_to_max_ratio) / (1 + min_to_max_ratio)
a = b * (1 - min_max_ratio) / (1 + min_max_ratio)
alpha = 2 * math.pi / integration_period
......@@ -127,7 +124,7 @@ def synch_profile(profile: list, reference_profile: list, synch: bool = True) ->
By default, the profiles are synched: the highest sample in one is placed
in the same position as the highest sample in the other; the second highest
sample is placede in the same position as the second highest sample in the
sample is placed in the same position as the second highest sample in the
other profile; and so on. Alternatively, the profiles can be synched in
reverse: the highest sample in one profile is placed in the same position
as the lowest sample in the other; and so on and so forth.
......@@ -188,10 +185,10 @@ def synch_profile(profile: list, reference_profile: list, synch: bool = True) ->
def create_profile_using_time_weighted_state(
integration_result: float,
avg_state: list,
states: list,
time_interval_durations: list,
min_to_max_ratio: float,
state_correlates_with_output: bool = True,
min_max_ratio: float,
states_correlate_profile: bool = True,
) -> list:
"""
Returns a profile that approximates a sinusoidal function in discrete time.
......@@ -210,7 +207,7 @@ def create_profile_using_time_weighted_state(
where:
a = b*(1-min_to_max_ratio)/(1+min_to_max_ratio)
a = b*(1-min_max_ratio)/(1+min_max_ratio)
b = integration_result/integration_period
......@@ -222,13 +219,13 @@ def create_profile_using_time_weighted_state(
----------
integration_result : float
The result of integrating the sinusoidal function for one period.
avg_state : list
states : list
The average state during each time interval.
time_interval_durations : list
The time interval durations for each sample.
min_to_max_ratio : float
min_max_ratio : float
The ratio between the minimum and maximum values of the function.
state_correlates_with_output : bool, optional
states_correlate_profile : bool, optional
If True, the peak should happen when the state is at its highest point.
If False, the peak should happen when the state is at its lowest point.
The default is True.
......@@ -246,26 +243,26 @@ def create_profile_using_time_weighted_state(
"""
if len(avg_state) != len(time_interval_durations):
if len(states) != len(time_interval_durations):
raise ValueError("The inputs are inconsistent.")
period = sum(time_interval_durations)
avg_time_interval_duration = mean(time_interval_durations)
avg_state_weighted = [
states_weighted = [
(
x_k * delta_k / avg_time_interval_duration
if state_correlates_with_output
if states_correlate_profile
else -x_k * delta_k / avg_time_interval_duration
)
for delta_k, x_k in zip(time_interval_durations, avg_state)
for delta_k, x_k in zip(time_interval_durations, states)
]
# find the peak
_sorted = sorted(
((state, index) for index, state in enumerate(avg_state_weighted)), reverse=True
((state, index) for index, state in enumerate(states_weighted)), reverse=True
)
# create new list for time durations starting with that of the peak
......@@ -280,7 +277,7 @@ def create_profile_using_time_weighted_state(
new_profile = discrete_sinusoid_matching_integral(
integration_result=integration_result,
time_interval_durations=swapped_time_durations,
min_to_max_ratio=min_to_max_ratio,
min_max_ratio=min_max_ratio,
phase_shift_radians=(
math.pi / 2
- 0.5 * (time_interval_durations[_sorted[0][1]] / period) * 2 * math.pi
......@@ -291,6 +288,257 @@ def create_profile_using_time_weighted_state(
n = len(time_interval_durations)
return [*new_profile[n - _sorted[0][1] :], *new_profile[0 : n - _sorted[0][1]]]
# *****************************************************************************
# *****************************************************************************
def generate_manual_state_correlated_profile(
integration_result: float,
states: list,
time_interval_durations: list,
deviation_gain: float
) -> list:
"""
Returns a profile matching a given integral and varying according to a
sequence of time intervals and the respective mean states.
The profile for interval i is defined as follows:
P[i] = (dt[i]/dt_mean)*( (x[i]-x_mean)*gain + offset)
where:
dt[i] is the time interval duration for interval i
dt_mean is the mean time interval duration
x[i] is the state for interval i
x_mean is the mean state for the entire profile
The offset is defined as the integration result divided by the number
time intervals, whereas the gain is user-defined and real-valued.
Parameters
----------
integration_result : float
The result of integrating the sinusoidal function for one period.
states : list
The average state during each time interval.
time_interval_durations : list
The time interval durations for each sample.
deviation_gain : float
DESCRIPTION.
Raises
------
ValueError
This error is raised if the list inputs do not have the same size.
Returns
-------
list
A profile matching the aforementioned characteristics.
"""
if len(states) != len(time_interval_durations):
raise ValueError("The inputs are inconsistent.")
dt_total = sum(time_interval_durations)
dt_mean = mean(time_interval_durations)
# x_mean = mean(states)
x_mean = sum(
deltat_k*x_k
for deltat_k, x_k in zip(time_interval_durations, states)
)/dt_total
beta = integration_result/len(states)
return [
((x_k-x_mean)*deviation_gain+beta )* deltat_k / dt_mean
for deltat_k, x_k in zip(time_interval_durations, states)
]
# *****************************************************************************
# *****************************************************************************
def generate_state_correlated_profile(
integration_result: float,
states: list,
time_interval_durations: list,
states_correlate_profile: bool,
min_max_ratio: float,
solver: str
) -> tuple:
"""
Returns a profile observing a number of conditions.
The profile must correlate with a set of states averaged over certain time
intervals, whose durations may be irregular. Integration of the profile
over all time intervals must also match a certain value. Finally, the peaks
must be related by a factor between 0 and 1.
This method relies on linear programming. An LP solver must be used.
The profile for interval i is defined as follows:
P[i] = (dt[i]/dt_mean)*( (x[i]-x_mean)*gain + offset)
where:
dt[i] is the time interval duration for interval i
dt_mean is the mean time interval duration
x[i] is the state for interval i
x_mean is the mean state for the entire profile
The offset is defined as the integration result divided by the number
time intervals, whereas the gain is determined via optimisation.
Parameters
----------
integration_result : float
The result of integrating the sinusoidal function for one period.
states : list
The average state during each time interval.
time_interval_durations : list
The duration of each time interval.
states_correlate_profile : bool
If True, higher state values must lead to higher profile values.
If False, lower state values must lead to higher profile values.
min_max_ratio : float
The ratio between the minimum and the maximum profile values.
solver : str
The name of the LP solver to use, according to Pyomo conventions.
Raises
------
ValueError
This error is raised if the list inputs do not have the same size.
Returns
-------
tuple
A tuple containing the profile, the gain and the offset.
"""
# *************************************************************************
# *************************************************************************
# TODO: find alternative solution, as this is most likely overkill
# internal model
def model(states_correlate_profile: bool) -> pyo.AbstractModel:
# abstract model
model = pyo.AbstractModel()
# sets
model.I = pyo.Set()
# decision variables
model.P_i = pyo.Var(model.I, domain=pyo.NonNegativeReals)
model.P_max = pyo.Var(domain=pyo.NonNegativeReals)
model.P_min = pyo.Var(domain=pyo.NonNegativeReals)
if states_correlate_profile:
model.alpha = pyo.Var(domain=pyo.PositiveReals)
else:
model.alpha = pyo.Var(domain=pyo.NegativeReals)
# parameters
model.param_R = pyo.Param()
model.param_VI = pyo.Param()
model.param_X_i = pyo.Param(model.I)
model.param_Y_i = pyo.Param(model.I)
def obj_f(m):
if states_correlate_profile:
return m.alpha # if positive
else:
return -m.alpha # if negative
# model.OBJ = pyo.Objective(rule=obj_f)
model.OBJ = pyo.Objective(rule=obj_f, sense=pyo.maximize)
# integral
def constr_integral_rule(m):
return sum(m.P_i[i] for i in m.I) == m.param_VI
model.constr_integral = pyo.Constraint(rule=constr_integral_rule)
# profile equations
def constr_profile_equations_rule(m,i):
return m.P_i[i] - m.param_X_i[i]*m.alpha == m.param_Y_i[i]
model.constr_profile_equations = pyo.Constraint(model.I, rule=constr_profile_equations_rule)
# upper bound
def constr_max_upper_bound_rule(m,i):
return m.P_i[i] <= m.P_max
model.constr_max_upper_bound = pyo.Constraint(model.I, rule=constr_max_upper_bound_rule)
# lower bound
def constr_max_lower_bound_rule(m,i):
return m.P_i[i] >= m.P_min
model.constr_max_lower_bound = pyo.Constraint(model.I, rule=constr_max_lower_bound_rule)
# ratio
def constr_min_max_rule(m,i):
return m.P_min == m.P_max*m.param_R
model.constr_min_max = pyo.Constraint(rule=constr_min_max_rule)
return model
number_time_steps = len(time_interval_durations)
if len(states) != number_time_steps:
raise ValueError("The inputs are inconsistent.")
# *************************************************************************
# *************************************************************************
dt_total = sum(time_interval_durations)
dt_mean = mean(time_interval_durations)
x_mean = sum(
deltat_k*x_k
for deltat_k, x_k in zip(time_interval_durations, states)
)/dt_total
beta = integration_result/number_time_steps
f = [dt_k/dt_mean for dt_k in time_interval_durations]
set_I = tuple(i for i in range(number_time_steps))
if len(set(states)) == 1:
# alpha = 0, return trivial profile
return (
[fi*beta for fi in f],
0,
beta
)
# create a dictionary with the data (using pyomo conventions)
data_dict = {
None: {
# sets
"I": {None: set_I},
# parameters
"param_VI": {None: integration_result},
"param_R": {None: min_max_ratio},
"param_X_i": {i:f[i]*(states[i]-x_mean) for i in set_I},
"param_Y_i": {i:f[i]*beta for i in set_I},
}
}
# *************************************************************************
# *************************************************************************
opt = pyo.SolverFactory(solver)
fit_model = model(states_correlate_profile=states_correlate_profile)
problem = fit_model.create_instance(data=data_dict)
opt.solve(problem, tee=False)
# return profile
return (
[pyo.value(problem.P_i[i]) for i in problem.I],
pyo.value(problem.alpha),
beta
)
# *****************************************************************************
# *****************************************************************************
......@@ -300,7 +548,7 @@ def max_min_sinusoidal_profile(
integration_result: float or int,
period: float or int,
time_interval_duration: float or int,
min_to_max_ratio: float,
min_max_ratio: float,
) -> tuple:
"""
Returns the maximum and minimum amount for a given time interval, according
......@@ -317,7 +565,7 @@ def max_min_sinusoidal_profile(
where:
a = b*(1-min_to_max_ratio)/(1+min_to_max_ratio)
a = b*(1-min_max_ratio)/(1+min_max_ratio)
b = integration_result/integration_period
......@@ -331,7 +579,7 @@ def max_min_sinusoidal_profile(
The result of integrating the sinusoidal function for one period.
time_interval_durations : list
The time interval durations for each sample.
min_to_max_ratio : float
min_max_ratio : float
The ratio between the minimum and maximum values of the function.
phase_shift_radians : float, optional
The phase shift for the sinusoidal function. The default is None, which
......@@ -345,7 +593,7 @@ def max_min_sinusoidal_profile(
"""
b = integration_result / period
a = b * (1 - min_to_max_ratio) / (1 + min_to_max_ratio)
a = b * (1 - min_max_ratio) / (1 + min_max_ratio)
alpha = 2 * math.pi / period
amplitude = a * (2 / alpha) * math.sin(alpha * time_interval_duration / 2)
......@@ -354,6 +602,87 @@ def max_min_sinusoidal_profile(
b * time_interval_duration - amplitude,
)
# *****************************************************************************
# *****************************************************************************
def generate_profile(
integration_result: float,
time_interval_durations: list,
**kwargs
) -> tuple:
"""
Returns a profile according to a variety of methods.
Parameters
----------
integration_result : float
The value which must be obtained by adding up all samples.
time_interval_durations : list
A list with the durations of each time interval.
**kwargs :
A sequence of key and value pairs for use in subsequent methods.
Returns
-------
numpy array
Returns the desired profile.
"""
# generate the profile
if 'states' not in kwargs:
# min_max_ratio is necessary
# phase_shift_radians is optional
# states play no role
# # remove unnecessary arguments
# if 'deviation_gain' in kwargs:
# kwargs.pop('deviation_gain')
# if 'solver' in kwargs:
# kwargs.pop('solver')
return discrete_sinusoid_matching_integral(
integration_result=integration_result,
time_interval_durations=time_interval_durations,
**kwargs
)
elif 'deviation_gain' not in kwargs:
# states matter but the gain must be determined
if 'solver' in kwargs:
# - states_correlate_profile is necessary
# - min_max_ratio is necessary
# - solver is necessary
return generate_state_correlated_profile(
integration_result=integration_result,
time_interval_durations=time_interval_durations,
**kwargs
)[0]
else:
# - states_correlate_profile is necessary
# - min_max_ratio is necessary
# - solver is not necessary
return create_profile_using_time_weighted_state(
integration_result=integration_result,
time_interval_durations=time_interval_durations,
**kwargs)
else:
# states matter and the gain is predefined
# states are necessary
# deviation gain is necessary
# # remove unnecessary arguments
# if 'phase_shift_radians' in kwargs:
# kwargs.pop('phase_shift_radians')
return generate_manual_state_correlated_profile(
integration_result=integration_result,
time_interval_durations=time_interval_durations,
**kwargs
)
# integration_result: float,
# states: list,
# time_interval_durations: list,
# min_max_ratio: float,
# states_correlate_profile: bool = True,
# *****************************************************************************
# *****************************************************************************
# imports
import pyomo.environ as pyo
# *****************************************************************************
# *****************************************************************************
def add_converters(
model: pyo.AbstractModel,
enable_default_values: bool = True,
enable_validation: bool = True,
enable_initialisation: bool = True,
):
# *************************************************************************
# *************************************************************************
# systems
# set of all systems
model.set_I = pyo.Set()
# set of optional systems
model.set_I_new = pyo.Set(within=model.set_I)
# *************************************************************************
# inputs
# set of inputs (indexed by system)
model.set_M = pyo.Set(model.set_I)
# set of inputs modelled using non-negative real variables
model.set_M_nnr = pyo.Set(model.set_I, within=model.set_M)
# set of inputs modelled using binary variables
model.set_M_bin = pyo.Set(model.set_I, within=model.set_M)
# set of amplitude-constrained inputs
model.set_M_dim = pyo.Set(model.set_I_new, within=model.set_M)
# set of amplitude-constrained inputs
model.set_M_fix = pyo.Set(model.set_I, within=model.set_M)
# set of externality-inducing inputs
model.set_M_ext = pyo.Set(model.set_I, within=model.set_M)
# *************************************************************************
# outputs
# set of outputs (indexed by system)
model.set_R = pyo.Set(model.set_I)
# set of outputs with fixed bounds
model.set_R_fix = pyo.Set(model.set_I, within=model.set_R)
# set of positive amplitude-constrained outputs
model.set_R_dim_pos = pyo.Set(model.set_I, within=model.set_R)
# set of negative amplitude-constrained outputs
model.set_R_dim_neg = pyo.Set(model.set_I, within=model.set_R)
# set of amplitude-limited outputs with matching pos. and neg. amplitudes
model.set_R_dim_eq = pyo.Set(model.set_I, within=model.set_R)
# set of outputs (indexed by system) inducing externalities
model.set_R_ext = pyo.Set(model.set_I)
# *************************************************************************
# states
# set of states
model.set_N = pyo.Set(model.set_I)
# set of states with fixed bounds
model.set_N_fix = pyo.Set(model.set_I, within=model.set_N)
# set of positive amplitude-constrained states
model.set_N_dim_pos = pyo.Set(model.set_I, within=model.set_N)
# set of negative amplitude-constrained states
model.set_N_dim_neg = pyo.Set(model.set_I, within=model.set_N)
# set of amplitude-limited states with matching pos. and neg. amplitudes
model.set_N_dim_eq = pyo.Set(model.set_I, within=model.set_N)
# set of states (indexed by system) inducing externalities
model.set_N_ext = pyo.Set(model.set_I, within=model.set_N)
# set of positive state variation-penalised states
model.set_N_pos_var = pyo.Set(model.set_I, within=model.set_N)
# set of negative state variation-penalised states
model.set_N_neg_var = pyo.Set(model.set_I, within=model.set_N)
# set of upper reference violation-penalised states
model.set_N_ref_u = pyo.Set(model.set_I, within=model.set_N)
# set of lower reference violation-penalised states
model.set_N_ref_d = pyo.Set(model.set_I, within=model.set_N)
# *************************************************************************
# *************************************************************************
# sparse index sets
# *************************************************************************
# *************************************************************************
# inputs
# set of IM tuples
def init_set_IM(m):
return ((i, m_i) for i in m.set_I for m_i in m.set_M[i])
model.set_IM = pyo.Set(dimen=2, initialize=init_set_IM)
# set of IM tuples for systems with binary signals
def init_set_IM_bin(m):
return ((i, m_i) for (i, m_i) in m.set_IM if m_i in m.set_M_bin[i])
model.set_IM_bin = pyo.Set(dimen=2, initialize=init_set_IM_bin, within=model.set_IM)
# set of IM tuples for tech. with dimensionable reference mode levels
def init_set_IM_dim(m):
return ((i, m_i) for (i, m_i) in m.set_IM if m_i in m.set_M_dim[i])
model.set_IM_dim = pyo.Set(dimen=2, initialize=init_set_IM_dim, within=model.set_IM)
# set of IM tuples for fixed amplitude inputs
def init_set_IM_fix(m):
return ((i, m_i) for (i, m_i) in m.set_IM if m_i in m.set_M_fix[i])
model.set_IM_fix = pyo.Set(dimen=2, initialize=init_set_IM_fix, within=model.set_IM)
# set of IM tuples for technologies whose modes can induce externalities
def init_set_IM_ext(m):
return ((i, m_i) for (i, m_i) in m.set_IM if m_i in m.set_M_ext[i])
model.set_IM_ext = pyo.Set(dimen=2, initialize=init_set_IM_ext, within=model.set_IM)
# *************************************************************************
# states
# set of IN tuples
def init_set_IN(m):
return (
(i, n_i) for i in m.set_I for n_i in m.set_N[i] # IN tuple
) # for each state
model.set_IN = pyo.Set(dimen=2, initialize=init_set_IN)
# set of IN tuples for states with fixed bounds
def init_set_IN_fix(m):
return ((i, n_i) for i in m.set_I for n_i in m.set_N_fix[i])
model.set_IN_fix = pyo.Set(dimen=2, initialize=init_set_IN_fix)
# set of IN tuples for converters with amplitude-constrained states
def init_set_IN_dim_eq(m):
return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_dim_eq[i])
model.set_IN_dim_eq = pyo.Set(
dimen=2, initialize=init_set_IN_dim_eq, within=model.set_IN
)
# set of IN tuples for converters with pos. amplitude-constrained states
def init_set_IN_dim_pos(m):
return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_dim_pos[i])
model.set_IN_dim_pos = pyo.Set(
dimen=2, initialize=init_set_IN_dim_pos, within=model.set_IN
)
# set of IN tuples for converters with neg. amplitude-constrained states
def init_set_IN_dim_neg(m):
return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_dim_neg[i])
model.set_IN_dim_neg = pyo.Set(
dimen=2, initialize=init_set_IN_dim_neg, within=model.set_IN
)
# set of IN tuples for converters with externality-inducing states
def init_set_IN_ext(m):
return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_ext[i])
model.set_IN_ext = pyo.Set(dimen=2, initialize=init_set_IN_ext, within=model.set_IN)
# set of IN tuples for positive variation-penalised states
def init_set_IN_pos_var(m):
return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_pos_var[i])
model.set_IN_pos_var = pyo.Set(
dimen=2, initialize=init_set_IN_pos_var, within=model.set_IN
)
# set of IN tuples for negative variation-penalised states
def init_set_IN_neg_var(m):
return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_neg_var[i])
model.set_IN_neg_var = pyo.Set(
dimen=2, initialize=init_set_IN_neg_var, within=model.set_IN
)
# set of IN tuples for upper reference violation penalised states
def init_set_IN_ref_u(m):
return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_ref_u[i])
model.set_IN_ref_u = pyo.Set(
dimen=2, initialize=init_set_IN_ref_u, within=model.set_IN
)
# set of IN tuples for lower reference violation penalised states
def init_set_IN_ref_d(m):
return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_ref_d[i])
model.set_IN_ref_d = pyo.Set(
dimen=2, initialize=init_set_IN_ref_d, within=model.set_IN
)
# *************************************************************************
# outputs
# set of IR tuples
def init_set_IR(m):
return ((i, r_i) for i in m.set_I for r_i in m.set_R[i])
model.set_IR = pyo.Set(dimen=2, initialize=init_set_IR)
# set of IR tuples for outputs with fixed bounds
def init_set_IR_fix(m):
return ((i, r_i) for i in m.set_I for r_i in m.set_R_fix[i])
model.set_IR_fix = pyo.Set(dimen=2, initialize=init_set_IR_fix)
# set of IR tuples for converters with matching pos. and neg. out. amp. limits
def init_set_IR_dim_eq(m):
return ((i, r_i) for (i, r_i) in m.set_IR if r_i in m.set_R_dim_eq[i])
model.set_IR_dim_eq = pyo.Set(dimen=2, initialize=init_set_IR_dim_eq)
# set of IR tuples for converters with amplitude-penalised outputs
def init_set_IR_dim_neg(m):
return ((i, r_i) for (i, r_i) in m.set_IR if r_i in m.set_R_dim_neg[i])
model.set_IR_dim_neg = pyo.Set(dimen=2, initialize=init_set_IR_dim_neg)
# set of IR tuples for converters with amplitude-penalised outputs
def init_set_IR_dim(m):
return ((i, r_i) for (i, r_i) in m.set_IR if r_i in m.set_R_dim[i])
model.set_IR_dim = pyo.Set(dimen=2, initialize=init_set_IR_dim)
# set of IR tuples for converters with pos. amplitude-constrained outputs
def init_set_IR_dim_pos(m):
return ((i, r_i) for (i, r_i) in m.set_IR if r_i in m.set_R_dim_pos[i])
model.set_IR_dim_pos = pyo.Set(dimen=2, initialize=init_set_IR_dim_pos)
# set of IR tuples for converters with externality-inducing outputs
def init_set_IR_ext(m):
return ((i, r_i) for (i, r_i) in m.set_IR if r_i in m.set_R_ext[i])
model.set_IR_ext = pyo.Set(dimen=2, initialize=init_set_IR_ext)
# *************************************************************************
# combined inputs/states/outputs
# TODO: narrow down these sets if possible
# set of INN tuples
def init_set_INN(m):
return ((i, n1, n2) for (i, n1) in m.set_IN for n2 in m.set_N[i])
model.set_INN = pyo.Set(dimen=3, initialize=init_set_INN)
# set of INM tuples
def init_set_INM(m):
return ((i, n_i, m_i) for (i, n_i) in m.set_IN for m_i in m.set_M[i])
model.set_INM = pyo.Set(dimen=3, initialize=init_set_INM)
# set of IRM tuples
def init_set_IRM(m):
return (
(i, r_i, m_i) for (i, r_i) in m.set_IR for m_i in m.set_M[i]
) # can be further constrained
model.set_IRM = pyo.Set(dimen=3, initialize=init_set_IRM)
# set of IRN tuples
def init_set_IRN(m):
return (
(i, r_i, n_i) for (i, r_i) in m.set_IR for n_i in m.set_N[i]
) # can be further constrained
model.set_IRN = pyo.Set(dimen=3, initialize=init_set_IRN)
# *************************************************************************
# *************************************************************************
# parameters
# converters
# externality cost per input unit
model.param_c_ext_u_imqk = pyo.Param(
model.set_IM_ext, model.set_QK, within=pyo.NonNegativeReals, default=0
)
# externality cost per output unit
model.param_c_ext_y_irqk = pyo.Param(
model.set_IR_ext, model.set_QK, within=pyo.NonNegativeReals, default=0
)
# externality cost per state unit
model.param_c_ext_x_inqk = pyo.Param(
model.set_IN_ext, model.set_QK, within=pyo.NonNegativeReals, default=0
)
# unit cost of positive state variations
model.param_c_pos_var_in = pyo.Param(
model.set_IN_pos_var, within=pyo.NonNegativeReals, default=0
)
# unit cost of negative state variations
model.param_c_neg_var_in = pyo.Param(
model.set_IN_neg_var, within=pyo.NonNegativeReals, default=0
)
# unit cost of upper state reference violations
model.param_c_ref_u_inqk = pyo.Param(
model.set_IN_ref_u, model.set_QK, within=pyo.NonNegativeReals, default=0
)
# unit cost of lower state reference violations
model.param_c_ref_d_inqk = pyo.Param(
model.set_IN_ref_d, model.set_QK, within=pyo.NonNegativeReals, default=0
)
# minimum converter cost
model.param_c_cvt_min_i = pyo.Param(
model.set_I_new, within=pyo.NonNegativeReals, default=0
)
# unit (positive) input amplitude cost
model.param_c_cvt_u_im = pyo.Param(
model.set_IM_dim, within=pyo.NonNegativeReals, default=0
)
# unit output amplitude cost
model.param_c_cvt_y_ir = pyo.Param(
model.set_IR_dim, within=pyo.NonNegativeReals, default=0
)
# unit positive state amplitude cost
model.param_c_cvt_x_pos_in = pyo.Param(
model.set_IN_dim_pos, within=pyo.NonNegativeReals, default=0
)
# unit negative state amplitude cost
model.param_c_cvt_x_neg_in = pyo.Param(
model.set_IN_dim_neg, within=pyo.NonNegativeReals, default=0
)
# unit positive output amplitude cost
model.param_c_cvt_y_pos_ir = pyo.Param(
model.set_IR_dim_pos, within=pyo.NonNegativeReals, default=0
)
# unit negative output amplitude cost
model.param_c_cvt_y_neg_ir = pyo.Param(
model.set_IR_dim_neg, within=pyo.NonNegativeReals, default=0
)
# *************************************************************************
# effect of system inputs on specific network and node pairs
model.param_a_nw_glimqk = pyo.Param(
model.set_GL_not_exp_imp,
model.set_IM,
model.set_QK,
default=0, # default: no effect
within=pyo.Reals,
)
# effect of system outputs on specific network and node pairs
model.param_a_nw_glirqk = pyo.Param(
model.set_GL_not_exp_imp,
model.set_IR,
model.set_QK,
default=0, # default: no effect
within=pyo.Reals,
)
# *************************************************************************
# inputs
# upper bounds for (non-binary, non-dimensionable) inputs
model.param_u_ub_imqk = pyo.Param(
model.set_IM_fix, model.set_QK, within=pyo.PositiveReals
)
# maximum input limits
model.param_u_amp_max_im = pyo.Param(
model.set_IM_dim, within=pyo.PositiveReals, default=1
)
# time interval-dependent adjustment coefficients for input limits
model.param_f_amp_u_imqk = pyo.Param(
model.set_IM_dim, model.set_QK, within=pyo.PositiveReals, default=1
)
# *************************************************************************
# states
# initial conditions
model.param_x_inq0 = pyo.Param(model.set_IN, model.set_Q, within=pyo.Reals)
# fixed upper bounds for state variables
model.param_x_ub_irqk = pyo.Param(model.set_IN_fix, model.set_QK, within=pyo.Reals)
# fixed lower bounds for state variables
model.param_x_lb_irqk = pyo.Param(model.set_IN_fix, model.set_QK, within=pyo.Reals)
# maximum positive amplitude for states
model.param_x_amp_pos_max_in = pyo.Param(
model.set_IN_dim_pos, within=pyo.PositiveReals
)
# maximum negative amplitude for states
model.param_x_amp_neg_max_in = pyo.Param(
model.set_IN_dim_neg, within=pyo.PositiveReals
)
# adjustment of positive state amplitude limits
model.param_f_amp_pos_x_inqk = pyo.Param(
model.set_IN_dim_pos, model.set_QK, within=pyo.PositiveReals, default=1
)
# adjustment of negative state amplitude limits
model.param_f_amp_neg_x_inqk = pyo.Param(
model.set_IN_dim_neg, model.set_QK, within=pyo.PositiveReals, default=1
)
# state equations: coefficients from C matrix
model.param_a_eq_x_innqk = pyo.Param(
model.set_INN, model.set_QK, default=0, within=pyo.Reals # default: no effect
)
# state equations: coefficients from D matrix
model.param_b_eq_x_inmqk = pyo.Param(
model.set_INM, model.set_QK, default=0, within=pyo.Reals # default: no effect
)
# state equations: constant term
model.param_e_eq_x_inqk = pyo.Param(
model.set_IN, model.set_QK, default=0, within=pyo.Reals # default: no effect
)
# *************************************************************************
# outputs
# fixed upper bounds for output variables
model.param_y_ub_irqk = pyo.Param(model.set_IR_fix, model.set_QK, within=pyo.Reals)
# fixed lower bounds for output variables
model.param_y_lb_irqk = pyo.Param(model.set_IR_fix, model.set_QK, within=pyo.Reals)
# adjustment of positive output amplitude limits
model.param_f_amp_y_pos_irqk = pyo.Param(
model.set_IR_dim_pos, model.set_QK, within=pyo.PositiveReals, default=1
)
# adjustment of negative output amplitude limits
model.param_f_amp_y_neg_irqk = pyo.Param(
model.set_IR_dim_neg, model.set_QK, within=pyo.PositiveReals, default=1
)
# maximum positive amplitude limit for outputs
model.param_y_amp_pos_max_ir = pyo.Param(
model.set_IR_dim_pos, within=pyo.PositiveReals
)
# maximum negative amplitude limit for outputs
model.param_y_amp_neg_max_ir = pyo.Param(
model.set_IR_dim_neg, within=pyo.PositiveReals
)
# output equation coefficients from C matrix
model.param_c_eq_y_irnqk = pyo.Param(
model.set_IRN, model.set_QK, default=0, within=pyo.Reals # default: no effect
)
# output equation coefficients from D matrix
model.param_d_eq_y_irmqk = pyo.Param(
model.set_IRM, model.set_QK, default=0, within=pyo.Reals # default: no effect
)
# output equation constant
model.param_e_eq_y_irqk = pyo.Param(
model.set_IR, model.set_QK, default=0, within=pyo.Reals # default: no effect
)
# *************************************************************************
# *************************************************************************
# *************************************************************************
# *************************************************************************
# variables
# *************************************************************************
# *************************************************************************
# capex for installing individual converters
model.var_capex_cvt_i = pyo.Var(model.set_I_new, within=pyo.NonNegativeReals)
# *************************************************************************
# converters
# decision to install converter i
model.var_cvt_inv_i = pyo.Var(model.set_I_new, within=pyo.Binary)
# inputs
# input variables
def bounds_var_u_imqk(m, i, m_i, q, k):
if (i, m_i) in m.param_u_ub_imqk:
# predefined limit
return (0, m.param_u_ub_imqk[(i, m_i, q, k)])
else:
# dynamic limit (set elsewhere)
return (0, None)
def domain_var_u_imqk(m, i, m_i, q, k):
try:
if m_i in m.set_M_bin[i]:
return pyo.Binary # binary: {0,1}
else:
return pyo.NonNegativeReals # nonnegative real: [0,inf]
except KeyError:
return pyo.NonNegativeReals # nonnegative real: [0,inf]
model.var_u_imqk = pyo.Var(
model.set_IM,
model.set_QK,
domain=domain_var_u_imqk,
# within=pyo.NonNegativeReals,
bounds=bounds_var_u_imqk,
)
# input amplitude variables (only one per sign is needed, as vars. are nnr)
model.var_u_amp_im = pyo.Var(model.set_IM_dim, within=pyo.NonNegativeReals)
# *************************************************************************
# outputs
# output variables
def bounds_var_y_irqk(m, i, r, q, k):
if r in m.set_R_fix:
# predefined limit
return (m.param_u_lb_irqk[(i, r, q, k)], m.param_u_ub_irqk[(i, r, q, k)])
else:
# do not enforce any limits
return (None, None)
# def domain_var_y_irqk(m, i, r, k):
# try:
# if m_i in m.set_M_bin[i]:
# return pyo.Binary # binary: {0,1}
# else:
# return pyo.NonNegativeReals # nonnegative real: [0,inf]
# except KeyError:
# return pyo.NonNegativeReals # nonnegative real: [0,inf]
model.var_y_irqk = pyo.Var(
model.set_IR, model.set_QK, bounds=bounds_var_y_irqk, within=pyo.Reals
)
# positive output amplitudes
model.var_y_amp_pos_ir = pyo.Var(model.set_IR_dim_pos, within=pyo.Reals)
# output amplitudes
model.var_y_amp_neg_ir = pyo.Var(model.set_IR_dim_neg, within=pyo.Reals)
# *************************************************************************
# states
# state variables
model.var_x_inqk = pyo.Var(model.set_IN, model.set_QK, within=pyo.Reals)
# positive amplitude variables
model.var_x_amp_pos_in = pyo.Var(model.set_IN_dim_pos, within=pyo.NonNegativeReals)
# negative amplitude variables
model.var_x_amp_neg_in = pyo.Var(model.set_IN_dim_neg, within=pyo.NonNegativeReals)
# positive state variation
model.var_delta_x_pos_var_in = pyo.Var(
model.set_IN_pos_var, within=pyo.NonNegativeReals
)
# negative state variation
model.var_delta_x_neg_var_in = pyo.Var(
model.set_IN_neg_var, within=pyo.NonNegativeReals
)
# positive reference state violation
model.var_delta_x_ref_u_inqk = pyo.Var(
model.set_IN_ref_u, model.set_QK, within=pyo.NonNegativeReals
)
# negative reference state violation
model.var_delta_x_ref_d_inqk = pyo.Var(
model.set_IN_ref_d, model.set_QK, within=pyo.NonNegativeReals
)
# *************************************************************************
# *************************************************************************
# objective function
# capex for converters
def rule_capex_converter(m, i):
return (
m.var_cvt_inv_i[i] * m.param_c_cvt_min_i[i]
+ sum(
m.var_u_amp_im[(i, m_i)] * m.param_c_cvt_u_im[(i, m_i)]
for m_i in m.set_M_dim_i[i]
)
+ sum(
m.var_x_amp_pos_in[(i, n)] * m.param_c_cvt_x_pos_in[(i, n)]
for n in m.set_N_dim_pos[i]
)
+ sum(
m.var_x_amp_neg_in[(i, n)] * m.param_c_cvt_x_neg_in[(i, n)]
for n in m.set_N_dim_neg[i]
)
+ sum(
m.var_y_amp_pos_ir[(i, r)] * m.param_c_cvt_y_pos_ir[(i, r)]
for r in m.set_N_dim_pos[i]
)
+ sum(
m.var_y_amp_neg_ir[(i, r)] * m.param_c_cvt_y_neg_ir[(i, r)]
for r in m.set_N_dim_neg[i]
)
<= m.var_capex_cvt_i[i]
)
model.constr_capex_system = pyo.Constraint(
model.set_I_new, rule=rule_capex_converter
)
# *************************************************************************
# *************************************************************************
# converters
# *************************************************************************
# *************************************************************************
# input signal limits for dimensionable inputs
def rule_constr_u_limit_dim(m, i, m_i, q, k):
return (
m.var_u_imqk[(i, m_i, q, k)]
<= m.var_u_amp_im[(i, m_i)] * m.param_f_amp_u_imqk[(i, m_i, q, k)]
)
model.constr_u_limit_dim = pyo.Constraint(
model.set_IM_dim, model.set_QK, rule=rule_constr_u_limit_dim
)
# nominal input amplitude limit for dimensionable inputs
def rule_constr_u_amp_ub(m, i, m_i):
return (
m.var_u_amp_im[(i, m_i)]
<= m.var_cvt_inv_i[i] * m.param_u_amp_max_im[(i, m_i)]
)
model.constr_u_amp_ub = pyo.Constraint(model.set_IM_dim, rule=rule_constr_u_amp_ub)
# fixed upper limits
def rule_constr_u_fix_limits(m, i, m_i, q, k):
# if we need to know the lim input signal (e.g., for the obj. func.)
if i in m.set_I_new:
# new converter
return (
m.var_u_imqk[(i, m_i, q, k)]
<= m.param_u_ub_imqk[(i, m_i, q, k)] * m.var_cvt_inv_i[i]
)
return m.var_u_imqk[(i, m_i, q, k)] <= m.var_cvt_inv_i[i]
else:
# pre-existing
return m.var_u_imqk[(i, m_i, q, k)] <= m.param_u_ub_imqk[(i, m_i, q, k)]
model.constr_u_fix_limits = pyo.Constraint(
model.set_IM_fix, model.set_QK, rule=rule_constr_u_fix_limits
)
# input limits for binary inputs
def rule_constr_u_bin_limits(m, i, m_i, q, k):
if i in m.set_I_new:
# binary variables
return m.var_u_imqk[(i, m_i, q, k)] <= m.var_cvt_inv_i[i]
else:
return pyo.Constraint.Skip
model.constr_u_bin_limits = pyo.Constraint(
model.set_IM_bin, model.set_QK, rule=rule_constr_u_bin_limits
)
# *************************************************************************
# outputs
# output equations
def rule_constr_output_equations(m, i, r, q, k):
return (
m.var_y_irqk[(i, r, k)]
== sum(
m.param_c_eq_y_irnqk[(i, r, n_i, q, k)] * m.var_x_inqk[(i, n_i, q, k)]
for n_i in m.set_N[i]
)
+ sum(
m.param_d_eq_y_irmqk[(i, r, m_i, q, k)] * m.var_u_imqk[(i, m_i, q, k)]
for m_i in m.set_M[i]
)
+ m.param_e_eq_y_irqk[(i, r, q, k)]
)
model.constr_output_equations = pyo.Constraint(
model.set_IR, model.set_QK, rule=rule_constr_output_equations
)
# positive amplitude limit for output variables
def rule_constr_y_vars_have_pos_amp_limits(m, i, r, q, k):
return m.var_y_irqk[(i, r, q, k)] <= (
m.var_y_amp_pos_ir[(i, r)] * m.param_f_amp_y_pos_irqk[(i, r, q, k)]
)
model.constr_y_vars_have_pos_amp_limits = pyo.Constraint(
model.set_IR_dim_pos, model.set_QK, rule=rule_constr_y_vars_have_pos_amp_limits
)
# negative amplitude limit for output variables
def rule_constr_y_vars_have_neg_amp_limits(m, i, r, q, k):
return m.var_y_irqk[(i, r, q, k)] >= (
-m.var_y_amp_neg_ir[(i, r)] * m.param_f_amp_y_neg_irqk[(i, r, q, k)]
)
model.constr_y_vars_have_neg_amp_limits = pyo.Constraint(
model.set_IR_dim_neg, model.set_QK, rule=rule_constr_y_vars_have_neg_amp_limits
)
# positive amplitude limit must be zero unless the system is installed
def rule_constr_y_amp_pos_zero_if_cvt_not_selected(m, i, r):
return m.var_y_amp_pos_ir[(i, r)] <= (
m.var_cvt_inv_i[i] * m.param_y_amp_pos_ir[(i, r)]
)
model.constr_y_amp_pos_zero_if_cvt_not_newected = pyo.Constraint(
model.set_IR_dim_pos, rule=rule_constr_y_amp_pos_zero_if_cvt_not_selected
)
# negative amplitude limit must be zero unless the system is installed
def rule_constr_y_amp_neg_zero_if_cvt_not_selected(m, i, r):
return m.var_y_amp_neg_ir[(i, r)] <= (
m.var_cvt_inv_i[i] * m.param_y_amp_neg_ir[(i, r)]
)
model.constr_y_amp_neg_zero_if_cvt_not_selected = pyo.Constraint(
model.set_IR_dim_neg, rule=rule_constr_y_amp_neg_zero_if_cvt_not_selected
)
# the positive and negative amplitudes must match
def rule_constr_y_amp_pos_neg_match(m, i, r):
return m.var_y_amp_pos_ir[(i, r)] == m.var_y_amp_neg_ir[(i, r)]
model.constr_y_amp_pos_neg_match = pyo.Constraint(
model.set_IR_dim_eq, rule=rule_constr_y_amp_pos_neg_match
)
# *************************************************************************
# states
def rule_constr_state_equations(m, i, n, q, k):
return (
m.var_x_inqk[(i, n, q, k)]
== sum(
m.param_a_eq_x_innqk[(i, n, n_star, q, k)]
* (
m.var_x_inqk[(i, n_star, q, k - 1)]
if k != 0
else m.param_x_inq0[(i, n, q)]
)
for n_star in m.set_N[i]
)
+ sum(
m.param_b_eq_x_inmqk[(i, n, m_i, q, k)] * m.var_u_imqk[(i, m_i, q, k)]
for m_i in m.set_M[i]
)
+ m.param_e_eq_x_inqk[(i, n, q, k)]
)
model.constr_state_equations = pyo.Constraint(
model.set_IN, model.set_QK, rule=rule_constr_state_equations
)
# positive amplitude limit for state variables
def rule_constr_x_vars_have_pos_amp_limits(m, i, n, q, k):
return m.var_x_inqk[(i, n, q, k)] <= (
m.var_x_amp_pos_in[(i, n)] * m.param_f_amp_x_pos_inqk[(i, n, q, k)]
)
model.constr_x_vars_have_pos_amp_limits = pyo.Constraint(
model.set_IN_dim_pos, model.set_QK, rule=rule_constr_x_vars_have_pos_amp_limits
)
# negative amplitude limit for state variables
def rule_constr_x_vars_have_neg_amp_limits(m, i, n, q, k):
return m.var_x_inqk[(i, n, q, k)] >= (
-m.var_y_amp_neg_in[(i, n)] * m.param_f_amp_x_neg_inqk[(i, n, q, k)]
)
model.constr_x_vars_have_neg_amp_limits = pyo.Constraint(
model.set_IN_dim_neg, model.set_QK, rule=rule_constr_x_vars_have_neg_amp_limits
)
# positive amplitude limit must be zero unless the system is installed
def rule_constr_x_amp_pos_zero_if_cvt_not_selected(m, i, n):
return m.var_x_amp_pos_in[(i, n)] <= (
m.var_cvt_inv_i[i] * m.param_x_amp_pos_in[(i, n)]
)
model.constr_x_amp_pos_zero_if_cvt_not_selected = pyo.Constraint(
model.set_IN_dim_pos, rule=rule_constr_x_amp_pos_zero_if_cvt_not_selected
)
# negative amplitude limit must be zero unless the system is installed
def rule_constr_x_amp_neg_zero_if_cvt_not_selected(m, i, n):
return m.var_x_amp_neg_in[(i, n)] <= (
m.var_cvt_inv_i[i] * m.param_x_amp_neg_in[(i, n)]
)
model.constr_x_amp_neg_zero_if_cvt_not_selected = pyo.Constraint(
model.set_IN_dim_neg, rule=rule_constr_x_amp_neg_zero_if_cvt_not_selected
)
# the positive and negative amplitudes must match
def rule_constr_x_amp_pos_neg_match(m, i, n):
return m.var_x_amp_pos_in[(i, n)] == m.var_x_amp_neg_in[(i, n)]
model.constr_x_amp_pos_neg_match = pyo.Constraint(
model.set_IN_dim_eq, rule=rule_constr_x_amp_pos_neg_match
)
# *************************************************************************
# *************************************************************************
return model
# *************************************************************************
# *************************************************************************
# *****************************************************************************
# *****************************************************************************
# *****************************************************************************
# *****************************************************************************
# *****************************************************************************
# imports
import pyomo.environ as pyo
from math import isfinite, inf
# *****************************************************************************
# *****************************************************************************
def add_network_restrictions(
model: pyo.AbstractModel,
enable_default_values: bool = True,
enable_validation: bool = True,
enable_initialisation: bool = True,
):
# *************************************************************************
# *************************************************************************
model.set_L_max_in_g = pyo.Set(
model.set_G, within=model.set_L
) # should inherently exclude import nodes
model.set_L_max_out_g = pyo.Set(
model.set_G, within=model.set_L
) # should inherently exclude export nodes
# maximum number of arcs per node pair
model.param_max_number_parallel_arcs = pyo.Param(
model.set_GLL,
# within=pyo.PositiveIntegers,
within=pyo.PositiveReals,
default=inf,
)
def init_set_GLL_arc_max(m):
return (
(g, l1, l2)
for (g, l1, l2) in m.param_max_number_parallel_arcs
if isfinite(m.param_max_number_parallel_arcs[(g, l1, l2)])
)
model.set_GLL_arc_max = pyo.Set(
dimen=3, within=model.set_GLL, initialize=init_set_GLL_arc_max
)
# *************************************************************************
# *************************************************************************
# limit number of directed arcs per direction
def rule_constr_limited_parallel_arcs_per_direction(m, g, l1, l2):
# cases:
# 1) the number of options is lower than or equal to the limit (skip)
# 2) the number of preexisting and new mandatory arcs exceeds
# the limit (infeasible: pyo.Constraint.Infeasible)
# 3) all other cases (constraint)
# number of preexisting arcs going from l1 to l2
number_arcs_pre_nom = (
len(m.set_J_pre[(g, l1, l2)]) if (g, l1, l2) in m.set_J_pre else 0
)
number_arcs_pre_rev = (
sum(1 for j in m.set_J_pre[(g, l2, l1)] if j in m.set_J_und[(g, l2, l1)])
if (g, l2, l1) in m.set_J_pre
else 0
)
# number of mandatory arcs going from l1 to l2
number_arcs_mdt_nom = (
len(m.set_J_mdt[(g, l1, l2)]) if (g, l1, l2) in m.set_J_mdt else 0
)
number_arcs_mdt_rev = (
sum(1 for j in m.set_J_mdt[(g, l2, l1)] if j in m.set_J_und[(g, l2, l1)])
if (g, l2, l1) in m.set_J_mdt
else 0
)
# number of optional arcs going from l1 to l2
number_arcs_opt_nom = (
sum(
1
for j in m.set_J[(g, l1, l2)]
if j not in m.set_J_pre[(g, l1, l2)]
if j not in m.set_J_mdt[(g, l1, l2)]
)
if (g, l1, l2) in m.set_J
else 0
)
number_arcs_opt_rev = (
sum(
1
for j in m.set_J[(g, l2, l1)]
if j not in m.set_J_pre[(g, l2, l1)]
if j not in m.set_J_mdt[(g, l2, l1)]
if j in m.set_J_und[(g, l2, l1)]
)
if (g, l2, l1) in m.set_J
else 0
)
# build the constraints
if (
number_arcs_mdt_nom
+ number_arcs_mdt_rev
+ number_arcs_pre_nom
+ number_arcs_pre_rev
> m.param_max_number_parallel_arcs[(g, l1, l2)]
):
# the number of unavoidable arcs already exceeds the limit
return pyo.Constraint.Infeasible
elif (
number_arcs_opt_nom
+ number_arcs_opt_rev
+ number_arcs_mdt_nom
+ number_arcs_mdt_rev
+ number_arcs_pre_nom
+ number_arcs_pre_rev
> m.param_max_number_parallel_arcs[(g, l1, l2)]
):
# the number of potential arcs exceeds the limit: cannot be skipped
return (
# preexisting arcs
number_arcs_pre_nom + number_arcs_pre_rev +
# mandatory arcs
number_arcs_mdt_nom + number_arcs_mdt_rev +
# arcs within an (optional) group that uses interfaces
sum(
(
sum(
1
for j in m.set_J_col[(g, l1, l2)]
if (g, l1, l2, j) in m.set_GLLJ_col_t[t]
)
if (g, l1, l2) in m.set_J_col
else 0
+ sum(
1
for j in m.set_J_col[(g, l2, l1)]
if j in m.set_J_und[(g, l2, l1)]
if (g, l2, l1, j) in m.set_GLLJ_col_t[t]
)
if ((g, l2, l1) in m.set_J_col and (g, l2, l1) in m.set_J_und)
else 0
)
* m.var_xi_arc_inv_t[t]
for t in m.set_T_int
)
+
# arcs within an (optional) group that does not use interfaces
sum(
(
sum(
1
for j in m.set_J_col[(g, l1, l2)]
if (g, l1, l2, j) in m.set_GLLJ_col_t[t]
)
if (g, l1, l2) in m.set_J_col
else 0
+ sum(
1
for j in m.set_J_col[(g, l2, l1)]
if j in m.set_J_und[(g, l2, l1)]
if (g, l2, l1, j) in m.set_GLLJ_col_t[t]
)
if ((g, l2, l1) in m.set_J_col and (g, l2, l1) in m.set_J_und)
else 0
)
* sum(m.var_delta_arc_inv_th[(t, h)] for h in m.set_H_t[t])
for t in m.set_T # new
if t not in m.set_T_mdt # optional
if t not in m.set_T_int # not interfaced
)
+
# optional individual arcs using interfaces, nominal direction
sum(
m.var_xi_arc_inv_gllj[(g, l1, l2, j)]
for j in m.set_J_int[(g, l1, l2)] # interfaced
if j not in m.set_J_col[(g, l1, l2)] # individual
)
if (g, l1, l2) in m.set_J_int
else 0 +
# optional individual arcs using interfaces, reverse direction
sum(
m.var_xi_arc_inv_gllj[(g, l2, l1, j)]
for j in m.set_J_int[(g, l2, l1)] # interfaced
if j in m.set_J_und[(g, l2, l1)] # undirected
if j not in m.set_J_col[(g, l1, l2)] # individual
)
if ((g, l2, l1) in m.set_J_int and (g, l2, l1) in m.set_J_und)
else 0 +
# optional individual arcs not using interfaces, nominal dir.
sum(
sum(
m.var_delta_arc_inv_glljh[(g, l1, l2, j, h)]
for h in m.set_H_gllj[(g, l1, l2, j)]
)
for j in m.set_J[(g, l1, l2)]
if j not in m.set_J_pre[(g, l1, l2)] # not preexisting
if j not in m.set_J_mdt[(g, l1, l2)] # not mandatory
if j not in m.set_J_int[(g, l1, l2)] # not interfaced
if j not in m.set_J_col[(g, l1, l2)] # individual
)
if (g, l1, l2) in m.set_J
else 0 +
# optional individual arcs not using interfaces, reverse dir.
sum(
sum(
m.var_delta_arc_inv_glljh[(g, l2, l1, j, h)]
for h in m.set_H_gllj[(g, l2, l1, j)]
)
for j in m.set_J_opt[(g, l2, l1)]
if j in m.set_J_und[(g, l2, l1)]
if j not in m.set_J_pre[(g, l2, l1)] # not preexisting
if j not in m.set_J_mdt[(g, l2, l1)] # not mandatory
if j not in m.set_J_int[(g, l2, l1)] # not interfaced
if j not in m.set_J_col[(g, l2, l1)] # individual
)
if (g, l2, l1) in m.set_J
else 0 <= m.param_max_number_parallel_arcs[(g, l1, l2)]
)
else: # the number of options is lower than or equal to the limit: skip
return pyo.Constraint.Skip
model.constr_limited_parallel_arcs_per_direction = pyo.Constraint(
model.set_GLL_arc_max, rule=rule_constr_limited_parallel_arcs_per_direction
)
# *************************************************************************
# *************************************************************************
# there can only one incoming arc at most, if there are no outgoing arcs
def rule_constr_max_incoming_directed_arcs(m, g, l):
# check if the node is not among those subject to a limited number of incoming arcs
if l not in m.set_L_max_in_g[g]:
# it is not, skip this constraint
return pyo.Constraint.Skip
# max number of directed incoming arcs
n_max_dir_in = sum(
sum(
1
for j in m.set_J[(g, l_line, l)]
if j not in m.set_J_und[(g, l_line, l)]
) # directed
for l_line in m.set_L[g] # for every node
if l_line != l # cannot be the same node
# if l_line not in m.set_L_imp[g] # why?
if (g, l_line, l) in m.set_J
)
# check the maximum number of incoming arcs
if n_max_dir_in <= 1:
# there can only be one incoming arc at most: redundant constraint
return pyo.Constraint.Skip
else: # more than one incoming arc is possible
# number of (new) incoming directed arcs in a group
b_max_in_gl = 0
# the big m
M_gl = n_max_dir_in - 1 # has to be positive since n_max_dir_in > 1
# TODO: put parenthesis to avoid funny results
temp_constr = (
sum(
# *********************************************************
# interfaced groups
sum(
sum(
1
for j in m.set_J_col[(g, l_circ, l)] # part of group
if j not in m.set_J_und[(g, l_circ, l)] # directed
if (g, l_circ, l, j) in m.set_GLLJ_col_t[t]
)
* m.var_xi_arc_inv_t[t] # in t
for t in m.set_T_int
)
+
# *********************************************************
# optional non-interfaced groups
sum(
sum(
sum(
1
for j in m.set_J_col[(g, l_circ, l)] # part of group
if j not in m.set_J_und[(g, l_circ, l)] # directed
if (g, l_circ, l, j) in m.set_GLLJ_col_t[t]
)
* m.var_delta_arc_inv_th[(t, h)]
for h in m.set_H_t[t]
)
for t in m.set_T
if t not in m.set_T_mdt # optional
if t not in m.set_T_int # not interfaced
)
+
# *********************************************************
# interfaced arcs
(sum(
m.var_xi_arc_inv_gllj[(g, l_circ, l, j_circ)]
for j_circ in m.set_J[(g, l_circ, l)]
if j_circ not in m.set_J_und[(g, l_circ, l)] # directed
if j_circ in m.set_J_int[(g, l_circ, l)] # interfaced
if j_circ not in m.set_J_col[(g, l_circ, l)] # individual
)
if (g, l_circ, l) in m.set_J
else 0) +
# *********************************************************
# optional non-interfaced arcs
(sum(
sum(
m.var_delta_arc_inv_glljh[(g, l_circ, l, j_dot, h_dot)]
for h_dot in m.set_H_gllj[(g, l_circ, l, j_dot)]
)
for j_dot in m.set_J[(g, l_circ, l)]
if j_dot not in m.set_J_und[(g, l_circ, l)] # directed
if j_dot not in m.set_J_int[(g, l_circ, l)] # not interfaced
if j_dot not in m.set_J_col[(g, l_circ, l)] # individual
if j_dot not in m.set_J_pre[(g, l_circ, l)] # new
if j_dot not in m.set_J_mdt[(g, l_circ, l)] # optional
)
if (g, l_circ, l) in m.set_J
else 0) +
# *********************************************************
# preexisting directed arcs
(sum(
1
for j_pre_dir in m.set_J_pre[(g, l_circ, l)] # preexisting
if j_pre_dir not in m.set_J_und[(g, l_circ, l)] # directed
)
if (g, l_circ, l) in m.set_J_pre
else 0) +
# *********************************************************
# mandatory directed arcs
(sum(
1
for j_mdt_dir in m.set_J_mdt[(g, l_circ, l)]
if j_mdt_dir not in m.set_J_und[(g, l_circ, l)] # directed
)
if (g, l_circ, l) in m.set_J_mdt
else 0)
# *********************************************************
for l_circ in m.set_L[g]
if l_circ not in m.set_L_exp[g]
if l_circ != l
)
<= 1 # +
# M_gl*sum(
# # *********************************************************
# # outgoing arcs in interfaced groups, nominal direction
# sum(sum(1
# for j in m.set_J_col[(g,l,l_diamond)]
# #if j in m.set_J_int[(g,l,l_diamond)]
# if (g,l,l_diamond,j) in m.set_GLLJ_col_t[t]
# )*m.var_xi_arc_inv_t[t]
# for t in m.set_T_int
# ) if (g,l,l_diamond) in m.set_J_col else 0
# +
# # outgoing arcs in interfaced groups, reverse direction
# sum(sum(1
# for j in m.set_J_col[(g,l_diamond,l)]
# #if j in m.set_J_int[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# if (g,l_diamond,l,j) in m.set_GLLJ_col_t[t]
# )*m.var_xi_arc_inv_t[t]
# for t in m.set_T_int
# ) if (g,l_diamond,l) in m.set_J_col else 0
# +
# # *********************************************************
# # TODO: outgoing arcs in non-interfaced optional groups, nominal
# sum(sum(1
# for j in m.set_J_col[(g,l,l_diamond)]
# #if j in m.set_J_int[(g,l,l_diamond)]
# if (g,l,l_diamond,j) in m.set_GLLJ_col_t[t]
# )*sum(
# m.var_delta_arc_inv_th[(t,h)]
# for h in m.set_H_t[t]
# )
# for t in m.set_T
# if t not in m.set_T_mdt
# if t not in m.set_T_int
# ) if (g,l,l_diamond) in m.set_J_col else 0
# +
# # TODO: outgoing arcs in non-interfaced optional groups, reverse
# sum(sum(1
# for j in m.set_J_col[(g,l_diamond,l)]
# #if j in m.set_J_int[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# if (g,l_diamond,l,j) in m.set_GLLJ_col_t[t]
# )*sum(
# m.var_delta_arc_inv_th[(t,h)]
# for h in m.set_H_t[t]
# )
# for t in m.set_T
# if t not in m.set_T_mdt
# if t not in m.set_T_int
# ) if (g,l_diamond,l) in m.set_J_col else 0
# +
# # *********************************************************
# # interfaced individual outgoing arcs, nominal direction
# sum(m.var_xi_arc_inv_gllj[(g,l,l_diamond,j)]
# for j in m.set_J_int[(g,l,l_diamond)] # interfaced
# if j not in m.set_J_col[(g,l,l_diamond)] # individual
# ) if (g,l,l_diamond) in m.set_J_int else 0
# +
# # *********************************************************
# # interfaced individual undirected arcs, reverse direction
# sum(m.var_xi_arc_inv_gllj[(g,l,l_diamond,j)]
# for j in m.set_J_und[(g,l_diamond,l)] # undirected
# if j in m.set_J_int[(g,l_diamond,l)] # interfaced
# if j not in m.set_J_col[(g,l_diamond,l)] # individual
# ) if (g,l_diamond,l) in m.set_J_und else 0
# +
# # *********************************************************
# # outgoing non-interfaced individual optional arcs
# sum(
# sum(m.var_delta_arc_inv_glljh[(g,l,l_diamond,j,h)]
# for h in m.set_H_gllj[(g,l,l_diamond,j)])
# for j in m.set_J[(g,l,l_diamond)]
# if j not in m.set_J_col[(g,l,l_diamond)] # individual
# if j not in m.set_J_mdt[(g,l,l_diamond)] # optional
# if j not in m.set_J_int[(g,l,l_diamond)] # interfaced
# ) if (g,l,l_diamond) in m.set_J else 0
# +
# # *********************************************************
# # individual non-interfaced undirected arcs, reverse dir.
# sum(
# sum(m.var_delta_arc_inv_glljh[(g,l_diamond,l,j,h)]
# for h in m.set_H_gllj[(g,l_diamond,l,j)])
# for j in m.set_J_und[(g,l_diamond,l)] # undirected
# if j not in m.set_J_col[(g,l_diamond,l)] # individual
# if j not in m.set_J_mdt[(g,l_diamond,l)] # optional
# if j not in m.set_J_int[(g,l_diamond,l)] # interfaced
# ) if (g,l_diamond,l) in m.set_J_und else 0
# +
# # *********************************************************
# # preselected outgonig arcs, nominal direction
# len(m.set_J_pre[(g,l,l_diamond)]
# ) if (g,l,l_diamond) in m.set_J_pre else 0
# +
# # *********************************************************
# # mandatory outgoing arcs, nominal direction
# len(m.set_J_mdt[(g,l,l_diamond)]
# ) if (g,l,l_diamond) in m.set_J_mdt else 0
# +
# # *********************************************************
# # undirected preselected arcs, reverse direction
# sum(1
# for j in m.set_J_pre[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# ) if (g,l_diamond,l) in m.set_J_pre else 0
# +
# # *********************************************************
# # undirected mandatory arcs, reverse direction
# sum(1
# for j in m.set_J_mdt[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# ) if (g,l_diamond,l) in m.set_J_mdt else 0
# # *********************************************************
# for l_diamond in m.set_L[g]
# if l_diamond not in m.set_L_imp[g]
# if l_diamond != l
# )
)
if type(temp_constr) == bool:
# trivial outcome
return pyo.Constraint.Feasible if temp_constr else pyo.Constraint.Infeasible
else:
# constraint is relevant
return temp_constr
model.constr_max_incoming_directed_arcs = pyo.Constraint(
model.set_GL, rule=rule_constr_max_incoming_directed_arcs
)
# *************************************************************************
# *************************************************************************
def rule_constr_max_outgoing_directed_arcs(m, g, l):
# check if the node is not among those subject to a limited number of outgoing arcs
if l not in m.set_L_max_out_g[g]:
# it is not, skip this constraint
return pyo.Constraint.Skip
# max number of directed outgoing arcs
n_max_dir_out = sum(
sum(
1
for j in m.set_J[(g, l, l_line)]
if j not in m.set_J_und[(g, l, l_line)]
) # directed
for l_line in m.set_L[g]
if l_line != l
# if l_line not in m.set_L_exp[g] # cannot be an export: why?
if (g, l, l_line) in m.set_J
)
# check the maximum number of incoming arcs
if n_max_dir_out <= 1:
# there can only be one outgoing arc at most: redundant constraint
# TODO: consider this condition when defining the set
return pyo.Constraint.Skip
else: # more than one outgoing arc is possible
# number of (new) incoming directed arcs in a group
b_max_out_gl = 0
# the big m
M_gl = n_max_dir_out - 1 # has to be positive since n_max_dir_out > 1
# TODO: put parenthesis to avoid funny results
temp_constr = (
sum(
# *********************************************************
# interfaced groups
sum(
sum(
1
for j in m.set_J_col[(g, l, l_circ)] # part of group
if j not in m.set_J_und[(g, l, l_circ)] # directed
if (g, l, l_circ, j) in m.set_GLLJ_col_t[t]
)
* m.var_xi_arc_inv_t[t] # in t
for t in m.set_T_int
)
+
# *********************************************************
# optional non-interfaced groups
sum(
sum(
sum(
1
for j in m.set_J_col[(g, l, l_circ)] # part of group
if j not in m.set_J_und[(g, l, l_circ)] # directed
if (g, l, l_circ, j) in m.set_GLLJ_col_t[t]
)
* m.var_delta_arc_inv_th[(t, h)]
for h in m.set_H_t[t]
)
for t in m.set_T
if t not in m.set_T_mdt # optional
if t not in m.set_T_int # not interfaced
)
+
# *********************************************************
# interfaced arcs
(sum(
m.var_xi_arc_inv_gllj[(g, l, l_circ, j_circ)]
for j_circ in m.set_J[(g, l, l_circ)]
if j_circ not in m.set_J_und[(g, l, l_circ)] # directed
if j_circ in m.set_J_int[(g, l, l_circ)] # interfaced
if j_circ not in m.set_J_col[(g, l, l_circ)] # individual
)
if (g, l, l_circ) in m.set_J
else 0) +
# *********************************************************
# optional non-interfaced arcs
(sum(
sum(
m.var_delta_arc_inv_glljh[(g, l, l_circ, j_dot, h_dot)]
for h_dot in m.set_H_gllj[(g, l, l_circ, j_dot)]
)
for j_dot in m.set_J[(g, l, l_circ)]
if j_dot not in m.set_J_und[(g, l, l_circ)] # directed
if j_dot not in m.set_J_int[(g, l, l_circ)] # not interfaced
if j_dot not in m.set_J_col[(g, l, l_circ)] # individual
if j_dot not in m.set_J_pre[(g, l, l_circ)] # new
if j_dot not in m.set_J_mdt[(g, l, l_circ)] # optional
)
if (g, l, l_circ) in m.set_J
else 0) +
# *********************************************************
# preexisting directed arcs
(sum(
1
for j_pre_dir in m.set_J_pre[(g, l, l_circ)] # preexisting
if j_pre_dir not in m.set_J_und[(g, l, l_circ)] # directed
)
if (g, l, l_circ) in m.set_J_pre
else 0) +
# *********************************************************
# mandatory directed arcs
(sum(
1
for j_mdt_dir in m.set_J_mdt[(g, l, l_circ)]
if j_mdt_dir not in m.set_J_und[(g, l, l_circ)] # directed
)
if (g, l, l_circ) in m.set_J_mdt
else 0)
# *********************************************************
for l_circ in m.set_L[g]
if l_circ not in m.set_L_imp[g]
if l_circ != l
)
<= 1 # +
# TODO: what is below has copy&pasted, must be completely revised
# M_gl*sum(
# # *********************************************************
# # outgoing arcs in interfaced groups, nominal direction
# sum(sum(1
# for j in m.set_J_col[(g,l,l_diamond)]
# #if j in m.set_J_int[(g,l,l_diamond)]
# if (g,l,l_diamond,j) in m.set_GLLJ_col_t[t]
# )*m.var_xi_arc_inv_t[t]
# for t in m.set_T_int
# ) if (g,l,l_diamond) in m.set_J_col else 0
# +
# # outgoing arcs in interfaced groups, reverse direction
# sum(sum(1
# for j in m.set_J_col[(g,l_diamond,l)]
# #if j in m.set_J_int[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# if (g,l_diamond,l,j) in m.set_GLLJ_col_t[t]
# )*m.var_xi_arc_inv_t[t]
# for t in m.set_T_int
# ) if (g,l_diamond,l) in m.set_J_col else 0
# +
# # *********************************************************
# # TODO: outgoing arcs in non-interfaced optional groups, nominal
# sum(sum(1
# for j in m.set_J_col[(g,l,l_diamond)]
# #if j in m.set_J_int[(g,l,l_diamond)]
# if (g,l,l_diamond,j) in m.set_GLLJ_col_t[t]
# )*sum(
# m.var_delta_arc_inv_th[(t,h)]
# for h in m.set_H_t[t]
# )
# for t in m.set_T
# if t not in m.set_T_mdt
# if t not in m.set_T_int
# ) if (g,l,l_diamond) in m.set_J_col else 0
# +
# # TODO: outgoing arcs in non-interfaced optional groups, reverse
# sum(sum(1
# for j in m.set_J_col[(g,l_diamond,l)]
# #if j in m.set_J_int[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# if (g,l_diamond,l,j) in m.set_GLLJ_col_t[t]
# )*sum(
# m.var_delta_arc_inv_th[(t,h)]
# for h in m.set_H_t[t]
# )
# for t in m.set_T
# if t not in m.set_T_mdt
# if t not in m.set_T_int
# ) if (g,l_diamond,l) in m.set_J_col else 0
# +
# # *********************************************************
# # interfaced individual outgoing arcs, nominal direction
# sum(m.var_xi_arc_inv_gllj[(g,l,l_diamond,j)]
# for j in m.set_J_int[(g,l,l_diamond)] # interfaced
# if j not in m.set_J_col[(g,l,l_diamond)] # individual
# ) if (g,l,l_diamond) in m.set_J_int else 0
# +
# # *********************************************************
# # interfaced individual undirected arcs, reverse direction
# sum(m.var_xi_arc_inv_gllj[(g,l,l_diamond,j)]
# for j in m.set_J_und[(g,l_diamond,l)] # undirected
# if j in m.set_J_int[(g,l_diamond,l)] # interfaced
# if j not in m.set_J_col[(g,l_diamond,l)] # individual
# ) if (g,l_diamond,l) in m.set_J_und else 0
# +
# # *********************************************************
# # outgoing non-interfaced individual optional arcs
# sum(
# sum(m.var_delta_arc_inv_glljh[(g,l,l_diamond,j,h)]
# for h in m.set_H_gllj[(g,l,l_diamond,j)])
# for j in m.set_J[(g,l,l_diamond)]
# if j not in m.set_J_col[(g,l,l_diamond)] # individual
# if j not in m.set_J_mdt[(g,l,l_diamond)] # optional
# if j not in m.set_J_int[(g,l,l_diamond)] # interfaced
# ) if (g,l,l_diamond) in m.set_J else 0
# +
# # *********************************************************
# # individual non-interfaced undirected arcs, reverse dir.
# sum(
# sum(m.var_delta_arc_inv_glljh[(g,l_diamond,l,j,h)]
# for h in m.set_H_gllj[(g,l_diamond,l,j)])
# for j in m.set_J_und[(g,l_diamond,l)] # undirected
# if j not in m.set_J_col[(g,l_diamond,l)] # individual
# if j not in m.set_J_mdt[(g,l_diamond,l)] # optional
# if j not in m.set_J_int[(g,l_diamond,l)] # interfaced
# ) if (g,l_diamond,l) in m.set_J_und else 0
# +
# # *********************************************************
# # preselected outgonig arcs, nominal direction
# len(m.set_J_pre[(g,l,l_diamond)]
# ) if (g,l,l_diamond) in m.set_J_pre else 0
# +
# # *********************************************************
# # mandatory outgoing arcs, nominal direction
# len(m.set_J_mdt[(g,l,l_diamond)]
# ) if (g,l,l_diamond) in m.set_J_mdt else 0
# +
# # *********************************************************
# # undirected preselected arcs, reverse direction
# sum(1
# for j in m.set_J_pre[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# ) if (g,l_diamond,l) in m.set_J_pre else 0
# +
# # *********************************************************
# # undirected mandatory arcs, reverse direction
# sum(1
# for j in m.set_J_mdt[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# ) if (g,l_diamond,l) in m.set_J_mdt else 0
# # *********************************************************
# for l_diamond in m.set_L[g]
# if l_diamond not in m.set_L_imp[g]
# if l_diamond != l
# )
)
if type(temp_constr) == bool:
# trivial outcome
return pyo.Constraint.Feasible if temp_constr else pyo.Constraint.Infeasible
else:
# constraint is relevant
return temp_constr
model.constr_max_outgoing_directed_arcs = pyo.Constraint(
model.set_GL, rule=rule_constr_max_outgoing_directed_arcs
)
# *************************************************************************
# *************************************************************************
return model
# *************************************************************************
# *************************************************************************
# *****************************************************************************
# *****************************************************************************
# imports
import pyomo.environ as pyo
# *****************************************************************************
# *****************************************************************************
def add_prices_block(
model: pyo.AbstractModel,
**kwargs
):
# *************************************************************************
# *************************************************************************
# model.node_price_block = pyo.Block(model.set_QPK)
price_other(model, **kwargs)
# price_block_other(model, **kwargs)
# *****************************************************************************
# *****************************************************************************
# TODO: try to implement it as a block
def price_block_other(
model: pyo.AbstractModel,
enable_default_values: bool = True,
enable_validation: bool = True,
enable_initialisation: bool = True
):
model.set_GLQPK = model.set_GL_exp_imp*model.set_QPK
def rule_node_prices(b, g, l, q, p, k):
# imported flow
def bounds_var_if_glqpks(m, g, l, q, p, k, s):
if (g, l, q, p, k, s) in m.param_v_max_glqpks:
# predefined finite capacity
return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)])
else:
# infinite capacity
return (0, None)
b.var_trans_flow_s = pyo.Var(
b.set_GLQPKS, within=pyo.NonNegativeReals, bounds=bounds_var_trans_flow_s
)
# imported flow cost
def rule_constr_imp_flow_cost(m, g, l, q, p, k):
return (
sum(
m.var_if_glqpks[(g, l, q, p, k, s)]
* m.param_p_glqpks[(g, l, q, p, k, s)]
for s in m.set_S[(g, l, q, p, k)]
)
== m.var_ifc_glqpk[(g, l, q, p, k)]
)
model.constr_imp_flow_cost = pyo.Constraint(
model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flow_cost
)
# imported flows
def rule_constr_imp_flows(m, g, l, q, p, k):
return sum(
m.var_v_glljqk[(g, l, l_star, j, q, k)]
for l_star in m.set_L[g]
if l_star not in m.set_L_imp[g]
for j in m.set_J[(g, l, l_star)] # only directed arcs
) == sum(m.var_if_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
model.constr_imp_flows = pyo.Constraint(
model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flows
)
# if (g,l) in b.parent_block().set_GL_imp:
# # import node
# pass
# elif (g,l) in b.parent_block().set_GL_exp:
# # export node
# pass
# otherwise: do nothing
model.node_price_block = pyo.Block(model.set_GLQPK, rule=rule_node_prices)
# set of price segments
model.node_price_block.set_S = pyo.Set()
# set of GLQKS tuples
def init_set_GLQPKS(m):
return (
(g, l, q, p, k, s)
# for (g,l) in m.set_GL_exp_imp
# for (q,k) in m.set_QK
for (g, l, q, p, k) in m.node_price_block.set_S
for s in m.node_price_block.set_S[(g, l, q, p, k)]
)
model.node_price_block.set_GLQPKS = pyo.Set(
dimen=6, initialize=(init_set_GLQPKS if enable_initialisation else None)
)
def init_set_GLQPKS_exp(m):
return (
glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_exp[glqpks[0]]
)
model.node_price_block.set_GLQPKS_exp = pyo.Set(
dimen=6, initialize=(init_set_GLQPKS_exp if enable_initialisation else None)
)
def init_set_GLQPKS_imp(m):
return (
glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_imp[glqpks[0]]
)
model.node_price_block.set_GLQPKS_imp = pyo.Set(
dimen=6, initialize=(init_set_GLQPKS_imp if enable_initialisation else None)
)
# *************************************************************************
# *************************************************************************
# parameters
# resource prices
model.param_p_glqpks = pyo.Param(model.set_GLQPKS, within=pyo.NonNegativeReals)
# maximum resource volumes for each prices
model.param_v_max_glqpks = pyo.Param(
model.set_GLQPKS,
within=pyo.NonNegativeReals
)
# *************************************************************************
# *************************************************************************
# variables
# *************************************************************************
# *************************************************************************
# exported flow
# TODO: validate the bounds by ensuring inf. cap. only exists in last segm.
def bounds_var_ef_glqpks(m, g, l, q, p, k, s):
if (g, l, q, p, k, s) in m.param_v_max_glqpks:
# predefined finite capacity
return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)])
else:
# infinite capacity
return (0, None)
model.var_ef_glqpks = pyo.Var(
model.set_GLQPKS_exp, within=pyo.NonNegativeReals, bounds=bounds_var_ef_glqpks
)
# *************************************************************************
# *************************************************************************
# exported flow revenue
def rule_constr_exp_flow_revenue(m, g, l, q, p, k):
return (
sum(
m.var_ef_glqpks[(g, l, q, p, k, s)]
* m.param_p_glqpks[(g, l, q, p, k, s)]
for s in m.set_S[(g, l, q, p, k)]
)
== m.var_efr_glqpk[(g, l, q, p, k)]
)
model.constr_exp_flow_revenue = pyo.Constraint(
model.set_GL_exp, model.set_QPK, rule=rule_constr_exp_flow_revenue
)
# exported flows
def rule_constr_exp_flows(m, g, l, q, p, k):
return sum(
m.var_v_glljqk[(g, l_star, l, j, q, k)]
* m.param_eta_glljqk[(g, l_star, l, j, q, k)]
for l_star in m.set_L[g]
if l_star not in m.set_L_exp[g]
for j in m.set_J[(g, l_star, l)] # only directed arcs
) == sum(m.var_ef_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
model.constr_exp_flows = pyo.Constraint(
model.set_GL_exp, model.set_QPK, rule=rule_constr_exp_flows
)
# *************************************************************************
# *************************************************************************
# # non-convex price functions
# if not convex_price_function:
# # delta variables
# model.var_active_segment_glqpks = pyo.Var(
# model.set_GLQPKS, within=pyo.Binary
# )
# # segments must be empty if the respective delta variable is zero
# def rule_constr_empty_segment_if_delta_zero_imp(m, g, l, q, p, k, s):
# return (
# m.var_if_glqpks[(g,l,q,p,k,s)] <=
# m.param_v_max_glqpks[(g,l,q,p,k,s)]*
# m.var_active_segment_glqpks[(g,l,q,p,k,s)]
# )
# model.constr_empty_segment_if_delta_zero_imp = pyo.Constraint(
# model.set_GLQPKS_imp, rule=rule_constr_empty_segment_if_delta_zero_imp
# )
# # segments must be empty if the respective delta variable is zero
# def rule_constr_empty_segment_if_delta_zero_exp(m, g, l, q, p, k, s):
# return (
# m.var_ef_glqpks[(g,l,q,p,k,s)] <=
# m.param_v_max_glqpks[(g,l,q,p,k,s)]*
# m.var_active_segment_glqpks[(g,l,q,p,k,s)]
# )
# model.constr_empty_segment_if_delta_zero_exp = pyo.Constraint(
# model.set_GLQPKS_exp, rule=rule_constr_empty_segment_if_delta_zero_exp
# )
# # if delta var is one, previous ones must be one too
# def rule_constr_delta_summing_logic(m, g, l, q, p, k, s):
# if s == len(m.set_S)-1:
# return pyo.Constraint.Skip
# return (
# m.var_active_segment_glqpks[(g,l,q,p,k,s)] >=
# m.var_active_segment_glqpks[(g,l,q,p,k,s+1)]
# )
# model.constr_delta_summing_logic = pyo.Constraint(
# model.set_GLQPKS, rule=rule_constr_delta_summing_logic
# )
# # if delta var is zero, subsequent ones must also be zero
# def rule_constr_delta_next_zeros(m, g, l, q, p, k, s):
# if s == len(m.set_S)-1:
# return pyo.Constraint.Skip
# return (
# 1-m.var_active_segment_glqpks[(g,l,q,p,k,s)] >=
# m.var_active_segment_glqpks[(g,l,q,p,k,s+1)]
# )
# model.constr_delta_next_zeros = pyo.Constraint(
# model.set_GLQPKS, rule=rule_constr_delta_next_zeros
# )
# *************************************************************************
# *************************************************************************
# *****************************************************************************
# *****************************************************************************
# def price_other2(
# model: pyo.AbstractModel,
# convex_price_function: bool = False,
# enable_default_values: bool = True,
# enable_validation: bool = True,
# enable_initialisation: bool = True
# ):
# # set of price segments
# model.set_S = pyo.Set(model.set_GL_exp_imp, model.set_QPK)
# # set of GLQKS tuples
# def init_set_GLQPKS(m):
# return (
# (g, l, q, p, k, s)
# # for (g,l) in m.set_GL_exp_imp
# # for (q,k) in m.set_QK
# for (g, l, q, p, k) in m.set_S
# for s in m.set_S[(g, l, q, p, k)]
# )
# model.set_GLQPKS = pyo.Set(
# dimen=6, initialize=(init_set_GLQPKS if enable_initialisation else None)
# )
# def init_set_GLQPKS_exp(m):
# return (
# glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_exp[glqpks[0]]
# )
# model.set_GLQPKS_exp = pyo.Set(
# dimen=6, initialize=(init_set_GLQPKS_exp if enable_initialisation else None)
# )
# def init_set_GLQPKS_imp(m):
# return (
# glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_imp[glqpks[0]]
# )
# model.set_GLQPKS_imp = pyo.Set(
# dimen=6, initialize=(init_set_GLQPKS_imp if enable_initialisation else None)
# )
# # *************************************************************************
# # *************************************************************************
# # parameters
# # resource prices
# model.param_p_glqpks = pyo.Param(model.set_GLQPKS, within=pyo.NonNegativeReals)
# # maximum resource volumes for each prices
# model.param_v_max_glqpks = pyo.Param(
# model.set_GLQPKS,
# within=pyo.NonNegativeReals
# )
# # *************************************************************************
# # *************************************************************************
# # variables
# # *************************************************************************
# # *************************************************************************
# # exported flow
# # TODO: validate the bounds by ensuring inf. cap. only exists in last segm.
# def bounds_var_ef_glqpks(m, g, l, q, p, k, s):
# if (g, l, q, p, k, s) in m.param_v_max_glqpks:
# # predefined finite capacity
# return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)])
# else:
# # infinite capacity
# return (0, None)
# model.var_ef_glqpks = pyo.Var(
# model.set_GLQPKS_exp, within=pyo.NonNegativeReals, bounds=bounds_var_ef_glqpks
# )
# # imported flow
# def bounds_var_if_glqpks(m, g, l, q, p, k, s):
# if (g, l, q, p, k, s) in m.param_v_max_glqpks:
# # predefined finite capacity
# return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)])
# else:
# # infinite capacity
# return (0, None)
# model.var_if_glqpks = pyo.Var(
# model.set_GLQPKS_imp, within=pyo.NonNegativeReals, bounds=bounds_var_if_glqpks
# )
# # *************************************************************************
# # *************************************************************************
# # exported flow revenue
# def rule_constr_exp_flow_revenue(m, g, l, q, p, k):
# return (
# sum(
# m.var_ef_glqpks[(g, l, q, p, k, s)]
# * m.param_p_glqpks[(g, l, q, p, k, s)]
# for s in m.set_S[(g, l, q, p, k)]
# )
# == m.var_efr_glqpk[(g, l, q, p, k)]
# )
# model.constr_exp_flow_revenue = pyo.Constraint(
# model.set_GL_exp, model.set_QPK, rule=rule_constr_exp_flow_revenue
# )
# # imported flow cost
# def rule_constr_imp_flow_cost(m, g, l, q, p, k):
# return (
# sum(
# m.var_if_glqpks[(g, l, q, p, k, s)]
# * m.param_p_glqpks[(g, l, q, p, k, s)]
# for s in m.set_S[(g, l, q, p, k)]
# )
# == m.var_ifc_glqpk[(g, l, q, p, k)]
# )
# model.constr_imp_flow_cost = pyo.Constraint(
# model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flow_cost
# )
# # exported flows
# def rule_constr_exp_flows(m, g, l, q, p, k):
# return sum(
# m.var_v_glljqk[(g, l_star, l, j, q, k)]
# * m.param_eta_glljqk[(g, l_star, l, j, q, k)]
# for l_star in m.set_L[g]
# if l_star not in m.set_L_exp[g]
# for j in m.set_J[(g, l_star, l)] # only directed arcs
# ) == sum(m.var_ef_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
# model.constr_exp_flows = pyo.Constraint(
# model.set_GL_exp, model.set_QPK, rule=rule_constr_exp_flows
# )
# # imported flows
# def rule_constr_imp_flows(m, g, l, q, p, k):
# return sum(
# m.var_v_glljqk[(g, l, l_star, j, q, k)]
# for l_star in m.set_L[g]
# if l_star not in m.set_L_imp[g]
# for j in m.set_J[(g, l, l_star)] # only directed arcs
# ) == sum(m.var_if_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
# model.constr_imp_flows = pyo.Constraint(
# model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flows
# )
# # *************************************************************************
# # *************************************************************************
# # non-convex price functions
# if not convex_price_function:
# # delta variables
# model.var_active_segment_glqpks = pyo.Var(
# model.set_GLQPKS, within=pyo.Binary
# )
# # segments must be empty if the respective delta variable is zero
# def rule_constr_empty_segment_if_delta_zero_imp(m, g, l, q, p, k, s):
# return (
# m.var_if_glqpks[(g,l,q,p,k,s)] <=
# m.param_v_max_glqpks[(g,l,q,p,k,s)]*
# m.var_active_segment_glqpks[(g,l,q,p,k,s)]
# )
# model.constr_empty_segment_if_delta_zero_imp = pyo.Constraint(
# model.set_GLQPKS_imp, rule=rule_constr_empty_segment_if_delta_zero_imp
# )
# # segments must be empty if the respective delta variable is zero
# def rule_constr_empty_segment_if_delta_zero_exp(m, g, l, q, p, k, s):
# return (
# m.var_ef_glqpks[(g,l,q,p,k,s)] <=
# m.param_v_max_glqpks[(g,l,q,p,k,s)]*
# m.var_active_segment_glqpks[(g,l,q,p,k,s)]
# )
# model.constr_empty_segment_if_delta_zero_exp = pyo.Constraint(
# model.set_GLQPKS_exp, rule=rule_constr_empty_segment_if_delta_zero_exp
# )
# # if delta var is one, previous ones must be one too
# # if delta var is zero, the next ones must also be zero
# def rule_constr_delta_summing_logic(m, g, l, q, p, k, s):
# if s == len(m.set_S[(g,l,q,p,k)])-1:
# # last segment, skip
# return pyo.Constraint.Skip
# return (
# m.var_active_segment_glqpks[(g,l,q,p,k,s)] >=
# m.var_active_segment_glqpks[(g,l,q,p,k,s+1)]
# )
# model.constr_delta_summing_logic = pyo.Constraint(
# model.set_GLQPKS, rule=rule_constr_delta_summing_logic
# )
# # if a segment is not completely used, the next ones must remain empty
# def rule_constr_fill_up_segment_before_next(m, g, l, q, p, k, s):
# if s == len(m.set_S[(g,l,q,p,k)])-1:
# # last segment, skip
# return pyo.Constraint.Skip
# if (g,l) in m.set_GL_imp:
# return (
# m.var_if_glqpks[(g,l,q,p,k,s)] >=
# m.var_active_segment_glqpks[(g,l,q,p,k,s+1)]*
# m.param_v_max_glqpks[(g,l,q,p,k,s)]
# )
# else:
# return (
# m.var_ef_glqpks[(g,l,q,p,k,s)] >=
# m.var_active_segment_glqpks[(g,l,q,p,k,s+1)]*
# m.param_v_max_glqpks[(g,l,q,p,k,s)]
# )
# # return (
# # m.var_if_glqpks[(g,l,q,p,k,s)]/m.param_v_max_glqpks[(g,l,q,p,k,s)] >=
# # m.var_active_segment_glqpks[(g,l,q,p,k,s+1)]
# # )
# # return (
# # m.param_v_max_glqpks[(g,l,q,p,k,s)]-m.var_if_glqpks[(g,l,q,p,k,s)] <=
# # m.param_v_max_glqpks[(g,l,q,p,k,s)]*(1- m.var_active_segment_glqpks[(g,l,q,p,k,s+1)])
# # )
# model.constr_fill_up_segment_before_next = pyo.Constraint(
# model.set_GLQPKS, rule=rule_constr_fill_up_segment_before_next
# )
# *****************************************************************************
# *****************************************************************************
def price_other(
model: pyo.AbstractModel,
convex_price_function: bool = True,
enable_default_values: bool = True,
enable_validation: bool = True,
enable_initialisation: bool = True
):
# auxiliary set for pyomo
model.set_GLQPK = model.set_GL_exp_imp*model.set_QPK
# set of price segments
model.set_S = pyo.Set(model.set_GLQPK)
# set of GLQKS tuples
def init_set_GLQPKS(m):
return (
(g, l, q, p, k, s)
# for (g,l) in m.set_GL_exp_imp
# for (q,k) in m.set_QK
for (g, l, q, p, k) in m.set_S
for s in m.set_S[(g, l, q, p, k)]
)
model.set_GLQPKS = pyo.Set(
dimen=6, initialize=(init_set_GLQPKS if enable_initialisation else None)
)
# *************************************************************************
# *************************************************************************
# parameters
# resource prices
model.param_p_glqpks = pyo.Param(model.set_GLQPKS, within=pyo.NonNegativeReals)
# price function convexity
model.param_price_function_is_convex = pyo.Param(
model.set_GLQPK,
within=pyo.Boolean
)
# maximum resource volumes for each prices
model.param_v_max_glqpks = pyo.Param(
model.set_GLQPKS,
within=pyo.NonNegativeReals
)
# *************************************************************************
# *************************************************************************
# variables
# *************************************************************************
# *************************************************************************
# import and export flows
def bounds_var_trans_flows_glqpks(m, g, l, q, p, k, s):
if (g, l, q, p, k, s) in m.param_v_max_glqpks:
# predefined finite capacity
return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)])
else:
# infinite capacity
return (0, None)
model.var_trans_flows_glqpks = pyo.Var(
model.set_GLQPKS, within=pyo.NonNegativeReals, bounds=bounds_var_trans_flows_glqpks
)
# *************************************************************************
# *************************************************************************
# import flow costs and export flow revenues
def rule_constr_trans_monetary_flows(m, g, l, q, p, k):
if (g,l) in m.set_GL_imp:
return (
sum(
m.var_trans_flows_glqpks[(g, l, q, p, k, s)]
* m.param_p_glqpks[(g, l, q, p, k, s)]
for s in m.set_S[(g, l, q, p, k)]
)
== m.var_ifc_glqpk[(g, l, q, p, k)]
)
else:
return (
sum(
m.var_trans_flows_glqpks[(g, l, q, p, k, s)]
* m.param_p_glqpks[(g, l, q, p, k, s)]
for s in m.set_S[(g, l, q, p, k)]
)
== m.var_efr_glqpk[(g, l, q, p, k)]
)
model.constr_trans_monetary_flows = pyo.Constraint(
model.set_GLQPK, rule=rule_constr_trans_monetary_flows
)
# imported and exported flows
def rule_constr_trans_flows(m, g, l, q, p, k):
if (g,l) in m.set_GL_imp:
return sum(
m.var_v_glljqk[(g, l, l_star, j, q, k)]
for l_star in m.set_L[g]
if l_star not in m.set_L_imp[g]
for j in m.set_J[(g, l, l_star)] # only directed arcs
) == sum(m.var_trans_flows_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
else:
return sum(
m.var_v_glljqk[(g, l_star, l, j, q, k)]
* m.param_eta_glljqk[(g, l_star, l, j, q, k)]
for l_star in m.set_L[g]
if l_star not in m.set_L_exp[g]
for j in m.set_J[(g, l_star, l)] # only directed arcs
) == sum(m.var_trans_flows_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
model.constr_trans_flows = pyo.Constraint(
model.set_GLQPK, rule=rule_constr_trans_flows
)
# *************************************************************************
# *************************************************************************
# non-convex price functions
# delta variables
model.var_active_segment_glqpks = pyo.Var(
model.set_GLQPKS, within=pyo.Binary
)
# segments must be empty if the respective delta variable is zero
def rule_constr_empty_segment_if_delta_zero(m, g, l, q, p, k, s):
if len(m.set_S[(g,l,q,p,k)]) == 1 or m.param_price_function_is_convex[(g,l,q,p,k)]:
# single segment, skip
# convex, skip
return pyo.Constraint.Skip
return (
m.var_trans_flows_glqpks[(g,l,q,p,k,s)] <=
m.param_v_max_glqpks[(g,l,q,p,k,s)]*
m.var_active_segment_glqpks[(g,l,q,p,k,s)]
)
model.constr_empty_segment_if_delta_zero = pyo.Constraint(
model.set_GLQPKS, rule=rule_constr_empty_segment_if_delta_zero
)
# if delta var is one, previous ones must be one too
# if delta var is zero, the next ones must also be zero
def rule_constr_delta_summing_logic(m, g, l, q, p, k, s):
if s == len(m.set_S[(g,l,q,p,k)])-1 or m.param_price_function_is_convex[(g,l,q,p,k)]:
# last segment, skip
# convex, skip
return pyo.Constraint.Skip
return (
m.var_active_segment_glqpks[(g,l,q,p,k,s)] >=
m.var_active_segment_glqpks[(g,l,q,p,k,s+1)]
)
model.constr_delta_summing_logic = pyo.Constraint(
model.set_GLQPKS, rule=rule_constr_delta_summing_logic
)
# if a segment is not completely used, the next ones must remain empty
def rule_constr_fill_up_segment_before_next(m, g, l, q, p, k, s):
if s == len(m.set_S[(g,l,q,p,k)])-1 or m.param_price_function_is_convex[(g,l,q,p,k)]:
# last segment, skip
# convex, skip
return pyo.Constraint.Skip
return (
m.var_trans_flows_glqpks[(g,l,q,p,k,s)] >=
m.var_active_segment_glqpks[(g,l,q,p,k,s+1)]*
m.param_v_max_glqpks[(g,l,q,p,k,s)]
)
# return (
# m.var_if_glqpks[(g,l,q,p,k,s)]/m.param_v_max_glqpks[(g,l,q,p,k,s)] >=
# m.var_active_segment_glqpks[(g,l,q,p,k,s+1)]
# )
# return (
# m.param_v_max_glqpks[(g,l,q,p,k,s)]-m.var_if_glqpks[(g,l,q,p,k,s)] <=
# m.param_v_max_glqpks[(g,l,q,p,k,s)]*(1- m.var_active_segment_glqpks[(g,l,q,p,k,s+1)])
# )
model.constr_fill_up_segment_before_next = pyo.Constraint(
model.set_GLQPKS, rule=rule_constr_fill_up_segment_before_next
)
# *****************************************************************************
# *****************************************************************************
def price_block_lambda(model: pyo.AbstractModel, **kwargs):
raise NotImplementedError
# *****************************************************************************
# *****************************************************************************
def price_block_delta(model: pyo.AbstractModel, **kwargs):
raise NotImplementedError
# *****************************************************************************
# *****************************************************************************
\ No newline at end of file
......@@ -2,7 +2,8 @@
import pyomo.environ as pyo
from math import isfinite, inf
from .blocks.networks import add_network_restrictions
from .blocks.prices import add_prices_block
# *****************************************************************************
# *****************************************************************************
......@@ -22,7 +23,7 @@ def create_model(
# create model object
model = pyo.AbstractModel(name)
# *************************************************************************
# *************************************************************************
......@@ -84,14 +85,7 @@ def create_model(
# set of exporting nodes on each network
model.set_L_exp = pyo.Set(model.set_G, within=model.set_L)
# set of nodes on network g incompatible with having more than one incoming
# arc unless there are outgoing arcs too
model.set_L_max_in_g = pyo.Set(
model.set_G, within=model.set_L
) # should inherently exclude import nodes
# *************************************************************************
# *************************************************************************
......@@ -395,45 +389,6 @@ def create_model(
# *************************************************************************
# set of price segments
model.set_S = pyo.Set(model.set_GL_exp_imp, model.set_QPK)
# set of GLQKS tuples
def init_set_GLQPKS(m):
return (
(g, l, q, p, k, s)
# for (g,l) in m.set_GL_exp_imp
# for (q,k) in m.set_QK
for (g, l, q, p, k) in m.set_S
for s in m.set_S[(g, l, q, p, k)]
)
model.set_GLQPKS = pyo.Set(
dimen=6, initialize=(init_set_GLQPKS if enable_initialisation else None)
)
def init_set_GLQPKS_exp(m):
return (
glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_exp[glqpks[0]]
)
model.set_GLQPKS_exp = pyo.Set(
dimen=6, initialize=(init_set_GLQPKS_exp if enable_initialisation else None)
)
def init_set_GLQPKS_imp(m):
return (
glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_imp[glqpks[0]]
)
model.set_GLQPKS_imp = pyo.Set(
dimen=6, initialize=(init_set_GLQPKS_imp if enable_initialisation else None)
)
# *************************************************************************
# all arcs
# set of GLLJ tuples for all arcs (undirected arcs appear twice)
......@@ -1445,14 +1400,6 @@ def create_model(
model.set_QPK, within=pyo.PositiveReals, default=1
)
# resource prices
model.param_p_glqpks = pyo.Param(model.set_GLQPKS, within=pyo.NonNegativeReals)
# maximum resource volumes for each prices
model.param_v_max_glqpks = pyo.Param(model.set_GLQPKS, within=pyo.NonNegativeReals)
# converters
# externality cost per input unit
......@@ -1617,26 +1564,6 @@ def create_model(
model.set_GL_not_exp_imp, model.set_QK, within=pyo.Reals, default=0
)
# maximum number of arcs per node pair
model.param_max_number_parallel_arcs = pyo.Param(
model.set_GLL,
# within=pyo.PositiveIntegers,
within=pyo.PositiveReals,
default=inf,
)
def init_set_GLL_arc_max(m):
return (
(g, l1, l2)
for (g, l1, l2) in m.param_max_number_parallel_arcs
if isfinite(m.param_max_number_parallel_arcs[(g, l1, l2)])
)
model.set_GLL_arc_max = pyo.Set(
dimen=3, within=model.set_GLL, initialize=init_set_GLL_arc_max
)
# effect of system inputs on specific network and node pairs
model.param_a_nw_glimqk = pyo.Param(
......@@ -1835,36 +1762,6 @@ def create_model(
model.set_GL_imp, model.set_QPK, within=pyo.NonNegativeReals
)
# exported flow
# TODO: validate the bounds by ensuring inf. cap. only exists in last segm.
def bounds_var_ef_glqpks(m, g, l, q, p, k, s):
if (g, l, q, p, k, s) in m.param_v_max_glqpks:
# predefined finite capacity
return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)])
else:
# infinite capacity
return (0, None)
model.var_ef_glqpks = pyo.Var(
model.set_GLQPKS_exp, within=pyo.NonNegativeReals, bounds=bounds_var_ef_glqpks
)
# imported flow
def bounds_var_if_glqpks(m, g, l, q, p, k, s):
if (g, l, q, p, k, s) in m.param_v_max_glqpks:
# predefined finite capacity
return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)])
else:
# infinite capacity
return (0, None)
model.var_if_glqpks = pyo.Var(
model.set_GLQPKS_imp, within=pyo.NonNegativeReals, bounds=bounds_var_if_glqpks
)
# *************************************************************************
# arcs
......@@ -2127,67 +2024,6 @@ def create_model(
model.constr_sdncf_q = pyo.Constraint(model.set_Q, rule=rule_sdncf_q)
# exported flow revenue
def rule_constr_exp_flow_revenue(m, g, l, q, p, k):
return (
sum(
m.var_ef_glqpks[(g, l, q, p, k, s)]
* m.param_p_glqpks[(g, l, q, p, k, s)]
for s in m.set_S[(g, l, q, p, k)]
)
== m.var_efr_glqpk[(g, l, q, p, k)]
)
model.constr_exp_flow_revenue = pyo.Constraint(
model.set_GL_exp, model.set_QPK, rule=rule_constr_exp_flow_revenue
)
# imported flow cost
def rule_constr_imp_flow_cost(m, g, l, q, p, k):
return (
sum(
m.var_if_glqpks[(g, l, q, p, k, s)]
* m.param_p_glqpks[(g, l, q, p, k, s)]
for s in m.set_S[(g, l, q, p, k)]
)
== m.var_ifc_glqpk[(g, l, q, p, k)]
)
model.constr_imp_flow_cost = pyo.Constraint(
model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flow_cost
)
# exported flows
def rule_constr_exp_flows(m, g, l, q, p, k):
return sum(
m.var_v_glljqk[(g, l_star, l, j, q, k)]
* m.param_eta_glljqk[(g, l_star, l, j, q, k)]
for l_star in m.set_L[g]
if l_star not in m.set_L_exp[g]
for j in m.set_J[(g, l_star, l)] # only directed arcs
) == sum(m.var_ef_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
model.constr_exp_flows = pyo.Constraint(
model.set_GL_exp, model.set_QPK, rule=rule_constr_exp_flows
)
# imported flows
def rule_constr_imp_flows(m, g, l, q, p, k):
return sum(
m.var_v_glljqk[(g, l, l_star, j, q, k)]
for l_star in m.set_L[g]
if l_star not in m.set_L_imp[g]
for j in m.set_J[(g, l, l_star)] # only directed arcs
) == sum(m.var_if_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)])
model.constr_imp_flows = pyo.Constraint(
model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flows
)
# *************************************************************************
# sum of discounted externalities
......@@ -2325,6 +2161,9 @@ def create_model(
model.constr_capex_system = pyo.Constraint(
model.set_I_new, rule=rule_capex_converter
)
# prices
add_prices_block(model)
# *************************************************************************
# *************************************************************************
......@@ -2475,577 +2314,9 @@ def create_model(
)
# *************************************************************************
# limit number of directed arcs per direction
def rule_constr_limited_parallel_arcs_per_direction(m, g, l1, l2):
# cases:
# 1) the number of options is lower than or equal to the limit (skip)
# 2) the number of preexisting and new mandatory arcs exceeds
# the limit (infeasible: pyo.Constraint.Infeasible)
# 3) all other cases (constraint)
# number of preexisting arcs going from l1 to l2
number_arcs_pre_nom = (
len(m.set_J_pre[(g, l1, l2)]) if (g, l1, l2) in m.set_J_pre else 0
)
number_arcs_pre_rev = (
sum(1 for j in m.set_J_pre[(g, l2, l1)] if j in m.set_J_und[(g, l2, l1)])
if (g, l2, l1) in m.set_J_pre
else 0
)
# number of mandatory arcs going from l1 to l2
number_arcs_mdt_nom = (
len(m.set_J_mdt[(g, l1, l2)]) if (g, l1, l2) in m.set_J_mdt else 0
)
number_arcs_mdt_rev = (
sum(1 for j in m.set_J_mdt[(g, l2, l1)] if j in m.set_J_und[(g, l2, l1)])
if (g, l2, l1) in m.set_J_mdt
else 0
)
# number of optional arcs going from l1 to l2
number_arcs_opt_nom = (
sum(
1
for j in m.set_J[(g, l1, l2)]
if j not in m.set_J_pre[(g, l1, l2)]
if j not in m.set_J_mdt[(g, l1, l2)]
)
if (g, l1, l2) in m.set_J
else 0
)
number_arcs_opt_rev = (
sum(
1
for j in m.set_J[(g, l2, l1)]
if j not in m.set_J_pre[(g, l2, l1)]
if j not in m.set_J_mdt[(g, l2, l1)]
if j in m.set_J_und[(g, l2, l1)]
)
if (g, l2, l1) in m.set_J
else 0
)
# build the constraints
if (
number_arcs_mdt_nom
+ number_arcs_mdt_rev
+ number_arcs_pre_nom
+ number_arcs_pre_rev
> m.param_max_number_parallel_arcs[(g, l1, l2)]
):
# the number of unavoidable arcs already exceeds the limit
return pyo.Constraint.Infeasible
elif (
number_arcs_opt_nom
+ number_arcs_opt_rev
+ number_arcs_mdt_nom
+ number_arcs_mdt_rev
+ number_arcs_pre_nom
+ number_arcs_pre_rev
> m.param_max_number_parallel_arcs[(g, l1, l2)]
):
# the number of potential arcs exceeds the limit: cannot be skipped
return (
# preexisting arcs
number_arcs_pre_nom + number_arcs_pre_rev +
# mandatory arcs
number_arcs_mdt_nom + number_arcs_mdt_rev +
# arcs within an (optional) group that uses interfaces
sum(
(
sum(
1
for j in m.set_J_col[(g, l1, l2)]
if (g, l1, l2, j) in m.set_GLLJ_col_t[t]
)
if (g, l1, l2) in m.set_J_col
else 0
+ sum(
1
for j in m.set_J_col[(g, l2, l1)]
if j in m.set_J_und[(g, l2, l1)]
if (g, l2, l1, j) in m.set_GLLJ_col_t[t]
)
if ((g, l2, l1) in m.set_J_col and (g, l2, l1) in m.set_J_und)
else 0
)
* m.var_xi_arc_inv_t[t]
for t in m.set_T_int
)
+
# arcs within an (optional) group that does not use interfaces
sum(
(
sum(
1
for j in m.set_J_col[(g, l1, l2)]
if (g, l1, l2, j) in m.set_GLLJ_col_t[t]
)
if (g, l1, l2) in m.set_J_col
else 0
+ sum(
1
for j in m.set_J_col[(g, l2, l1)]
if j in m.set_J_und[(g, l2, l1)]
if (g, l2, l1, j) in m.set_GLLJ_col_t[t]
)
if ((g, l2, l1) in m.set_J_col and (g, l2, l1) in m.set_J_und)
else 0
)
* sum(m.var_delta_arc_inv_th[(t, h)] for h in m.set_H_t[t])
for t in m.set_T # new
if t not in m.set_T_mdt # optional
if t not in m.set_T_int # not interfaced
)
+
# optional individual arcs using interfaces, nominal direction
sum(
m.var_xi_arc_inv_gllj[(g, l1, l2, j)]
for j in m.set_J_int[(g, l1, l2)] # interfaced
if j not in m.set_J_col[(g, l1, l2)] # individual
)
if (g, l1, l2) in m.set_J_int
else 0 +
# optional individual arcs using interfaces, reverse direction
sum(
m.var_xi_arc_inv_gllj[(g, l2, l1, j)]
for j in m.set_J_int[(g, l2, l1)] # interfaced
if j in m.set_J_und[(g, l2, l1)] # undirected
if j not in m.set_J_col[(g, l1, l2)] # individual
)
if ((g, l2, l1) in m.set_J_int and (g, l2, l1) in m.set_J_und)
else 0 +
# optional individual arcs not using interfaces, nominal dir.
sum(
sum(
m.var_delta_arc_inv_glljh[(g, l1, l2, j, h)]
for h in m.set_H_gllj[(g, l1, l2, j)]
)
for j in m.set_J[(g, l1, l2)]
if j not in m.set_J_pre[(g, l1, l2)] # not preexisting
if j not in m.set_J_mdt[(g, l1, l2)] # not mandatory
if j not in m.set_J_int[(g, l1, l2)] # not interfaced
if j not in m.set_J_col[(g, l1, l2)] # individual
)
if (g, l1, l2) in m.set_J
else 0 +
# optional individual arcs not using interfaces, reverse dir.
sum(
sum(
m.var_delta_arc_inv_glljh[(g, l2, l1, j, h)]
for h in m.set_H_gllj[(g, l2, l1, j)]
)
for j in m.set_J_opt[(g, l2, l1)]
if j in m.set_J_und[(g, l2, l1)]
if j not in m.set_J_pre[(g, l2, l1)] # not preexisting
if j not in m.set_J_mdt[(g, l2, l1)] # not mandatory
if j not in m.set_J_int[(g, l2, l1)] # not interfaced
if j not in m.set_J_col[(g, l2, l1)] # individual
)
if (g, l2, l1) in m.set_J
else 0 <= m.param_max_number_parallel_arcs[(g, l1, l2)]
)
else: # the number of options is lower than or equal to the limit: skip
return pyo.Constraint.Skip
model.constr_limited_parallel_arcs_per_direction = pyo.Constraint(
model.set_GLL_arc_max, rule=rule_constr_limited_parallel_arcs_per_direction
)
# *************************************************************************
# there can only one incoming arc at most, if there are no outgoing arcs
def rule_constr_max_incoming_directed_arcs(m, g, l):
# check if the node is not among those subject to a limited number of incoming arcs
if l not in m.set_L_max_in_g[g]:
# it is not, skip this constraint
return pyo.Constraint.Skip
# max number of directed incoming arcs
n_max_dir_in = sum(
sum(
1
for j in m.set_J[(g, l_line, l)]
if j not in m.set_J_und[(g, l_line, l)]
) # directed
for l_line in m.set_L[g]
if l_line != l
if l_line not in m.set_L_imp[g]
if (g, l_line, l) in m.set_J
)
# check the maximum number of incoming arcs
if n_max_dir_in <= 1:
# there can only be one incoming arc at most: redundant constraint
return pyo.Constraint.Skip
else: # more than one incoming arc is possible
# *****************************************************************
# number of (new) incoming directed arcs in a group
# *****************************************************************
b_max_in_gl = 0
# the big m
M_gl = n_max_dir_in - 1 # has to be positive since n_max_dir_in > 1
# TODO: put parenthesis to avoid funny results
temp_constr = (
sum(
# *********************************************************
# interfaced groups
sum(
sum(
1
for j in m.set_J_col[(g, l_circ, l)] # part of group
if j not in m.set_J_und[(g, l_circ, l)] # directed
if (g, l_circ, l, j) in m.set_GLLJ_col_t[t]
)
* m.var_xi_arc_inv_t[t] # in t
for t in m.set_T_int
)
+
# *********************************************************
# optional non-interfaced groups
sum(
sum(
sum(
1
for j in m.set_J_col[(g, l_circ, l)] # part of group
if j not in m.set_J_und[(g, l_circ, l)] # directed
if (g, l_circ, l, j) in m.set_GLLJ_col_t[t]
)
* m.var_delta_arc_inv_th[(t, h)]
for h in m.set_H_t[t]
)
for t in m.set_T
if t not in m.set_T_mdt # optional
if t not in m.set_T_int # not interfaced
)
+
# *********************************************************
# interfaced arcs
(sum(
m.var_xi_arc_inv_gllj[(g, l_circ, l, j_circ)]
for j_circ in m.set_J[(g, l_circ, l)]
if j_circ not in m.set_J_und[(g, l_circ, l)] # directed
if j_circ in m.set_J_int[(g, l_circ, l)] # interfaced
if j_circ not in m.set_J_col[(g, l_circ, l)] # individual
)
if (g, l_circ, l) in m.set_J
else 0) +
# *********************************************************
# optional non-interfaced arcs
(sum(
sum(
m.var_delta_arc_inv_glljh[(g, l_circ, l, j_dot, h_dot)]
for h_dot in m.set_H_gllj[(g, l_circ, l, j_dot)]
)
for j_dot in m.set_J[(g, l_circ, l)]
if j_dot not in m.set_J_und[(g, l_circ, l)] # directed
if j_dot not in m.set_J_int[(g, l_circ, l)] # not interfaced
if j_dot not in m.set_J_col[(g, l_circ, l)] # individual
if j_dot not in m.set_J_mdt[(g, l_circ, l)] # optional
)
if (g, l_circ, l) in m.set_J
else 0) +
# *********************************************************
# preexisting directed arcs
(sum(
1
for j_pre_dir in m.set_J_pre[(g, l_circ, l)] # preexisting
if j_pre_dir not in m.set_J_und[(g, l_circ, l)] # directed
)
if (g, l_circ, l) in m.set_J_pre
else 0) +
# *********************************************************
# mandatory directed arcs
(sum(
1
for j_mdt_dir in m.set_J_mdt[(g, l_circ, l)]
if j_mdt_dir not in m.set_J_und[(g, l_circ, l)] # directed
)
if (g, l_circ, l) in m.set_J_mdt
else 0)
# *********************************************************
for l_circ in m.set_L[g]
if l_circ not in m.set_L_exp[g]
if l_circ != l
)
<= 1 # +
# M_gl*sum(
# # *********************************************************
# # outgoing arcs in interfaced groups, nominal direction
# sum(sum(1
# for j in m.set_J_col[(g,l,l_diamond)]
# #if j in m.set_J_int[(g,l,l_diamond)]
# if (g,l,l_diamond,j) in m.set_GLLJ_col_t[t]
# )*m.var_xi_arc_inv_t[t]
# for t in m.set_T_int
# ) if (g,l,l_diamond) in m.set_J_col else 0
# +
# # outgoing arcs in interfaced groups, reverse direction
# sum(sum(1
# for j in m.set_J_col[(g,l_diamond,l)]
# #if j in m.set_J_int[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# if (g,l_diamond,l,j) in m.set_GLLJ_col_t[t]
# )*m.var_xi_arc_inv_t[t]
# for t in m.set_T_int
# ) if (g,l_diamond,l) in m.set_J_col else 0
# +
# # *********************************************************
# # TODO: outgoing arcs in non-interfaced optional groups, nominal
# sum(sum(1
# for j in m.set_J_col[(g,l,l_diamond)]
# #if j in m.set_J_int[(g,l,l_diamond)]
# if (g,l,l_diamond,j) in m.set_GLLJ_col_t[t]
# )*sum(
# m.var_delta_arc_inv_th[(t,h)]
# for h in m.set_H_t[t]
# )
# for t in m.set_T
# if t not in m.set_T_mdt
# if t not in m.set_T_int
# ) if (g,l,l_diamond) in m.set_J_col else 0
# +
# # TODO: outgoing arcs in non-interfaced optional groups, reverse
# sum(sum(1
# for j in m.set_J_col[(g,l_diamond,l)]
# #if j in m.set_J_int[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# if (g,l_diamond,l,j) in m.set_GLLJ_col_t[t]
# )*sum(
# m.var_delta_arc_inv_th[(t,h)]
# for h in m.set_H_t[t]
# )
# for t in m.set_T
# if t not in m.set_T_mdt
# if t not in m.set_T_int
# ) if (g,l_diamond,l) in m.set_J_col else 0
# +
# # *********************************************************
# # interfaced individual outgoing arcs, nominal direction
# sum(m.var_xi_arc_inv_gllj[(g,l,l_diamond,j)]
# for j in m.set_J_int[(g,l,l_diamond)] # interfaced
# if j not in m.set_J_col[(g,l,l_diamond)] # individual
# ) if (g,l,l_diamond) in m.set_J_int else 0
# +
# # *********************************************************
# # interfaced individual undirected arcs, reverse direction
# sum(m.var_xi_arc_inv_gllj[(g,l,l_diamond,j)]
# for j in m.set_J_und[(g,l_diamond,l)] # undirected
# if j in m.set_J_int[(g,l_diamond,l)] # interfaced
# if j not in m.set_J_col[(g,l_diamond,l)] # individual
# ) if (g,l_diamond,l) in m.set_J_und else 0
# +
# # *********************************************************
# # outgoing non-interfaced individual optional arcs
# sum(
# sum(m.var_delta_arc_inv_glljh[(g,l,l_diamond,j,h)]
# for h in m.set_H_gllj[(g,l,l_diamond,j)])
# for j in m.set_J[(g,l,l_diamond)]
# if j not in m.set_J_col[(g,l,l_diamond)] # individual
# if j not in m.set_J_mdt[(g,l,l_diamond)] # optional
# if j not in m.set_J_int[(g,l,l_diamond)] # interfaced
# ) if (g,l,l_diamond) in m.set_J else 0
# +
# # *********************************************************
# # individual non-interfaced undirected arcs, reverse dir.
# sum(
# sum(m.var_delta_arc_inv_glljh[(g,l_diamond,l,j,h)]
# for h in m.set_H_gllj[(g,l_diamond,l,j)])
# for j in m.set_J_und[(g,l_diamond,l)] # undirected
# if j not in m.set_J_col[(g,l_diamond,l)] # individual
# if j not in m.set_J_mdt[(g,l_diamond,l)] # optional
# if j not in m.set_J_int[(g,l_diamond,l)] # interfaced
# ) if (g,l_diamond,l) in m.set_J_und else 0
# +
# # *********************************************************
# # preselected outgonig arcs, nominal direction
# len(m.set_J_pre[(g,l,l_diamond)]
# ) if (g,l,l_diamond) in m.set_J_pre else 0
# +
# # *********************************************************
# # mandatory outgoing arcs, nominal direction
# len(m.set_J_mdt[(g,l,l_diamond)]
# ) if (g,l,l_diamond) in m.set_J_mdt else 0
# +
# # *********************************************************
# # undirected preselected arcs, reverse direction
# sum(1
# for j in m.set_J_pre[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# ) if (g,l_diamond,l) in m.set_J_pre else 0
# +
# # *********************************************************
# # undirected mandatory arcs, reverse direction
# sum(1
# for j in m.set_J_mdt[(g,l_diamond,l)]
# if j in m.set_J_und[(g,l_diamond,l)]
# ) if (g,l_diamond,l) in m.set_J_mdt else 0
# # *********************************************************
# for l_diamond in m.set_L[g]
# if l_diamond not in m.set_L_imp[g]
# if l_diamond != l
# )
)
if type(temp_constr) == bool:
# trivial outcome
return pyo.Constraint.Feasible if temp_constr else pyo.Constraint.Infeasible
else:
# constraint is relevant
return temp_constr
model.constr_max_incoming_directed_arcs = pyo.Constraint(
model.set_GL_not_exp_imp, rule=rule_constr_max_incoming_directed_arcs
)
# *************************************************************************
# def rule_constr_max_outgoing_directed_arcs(m, g, l):
# pass
# model.constr_max_outgoing_directed_arcs = pyo.Constraint(
# model.set_GL_not_exp_imp,
# rule=rule_constr_max_outgoing_directed_arcs
# )
# # *************************************************************************
# # there can only one outgoing arc at most, if there are no incoming arcs
# def rule_constr_max_outgoing_arcs(m,g,l):
# # the number of predefined incoming arcs
# n_in_pre = sum(
# len(m.set_J_pre[(g,l_star,l)]) # = n_in_pre
# for l_star in m.set_L[g]
# if l_star not in m.set_L_exp[g]
# if l_star != l
# )
# # if there is at least one predefined incoming arc, skip constraint
# if n_in_pre >= 1:
# return pyo.Constraint.Skip
# # the number of non-predefined incoming arcs
# n_in_opt = sum(
# len(m.set_J_new[(g,l_star,l)]) # = n_in_pre
# for l_star in m.set_L[g]
# if l_star not in m.set_L_exp[g]
# if l_star != l
# )
# n_in_max = n_in_pre + n_in_opt
# # the number of predefined outgoing arcs
# n_out_pre = sum(
# len(m.set_J_pre[(g,l,l_line)])
# for l_line in m.set_L[g]
# if l_line not in m.set_L_imp[g]
# if l_line != l
# )
# # the constraint is infeasible if the maximum number of incoming arcs
# # is zero and the number of predefined outgoing arcs is bigger than 1
# if n_in_max == 0 and n_out_pre >= 2:
# return pyo.Constraint.Infeasible
# # DONE: it is also infeasible if the maximum number of incoming arcs is
# # zero and the number of predefined outgoing arcs is one and the poten-
# # tial outgoing arcs include mandatory arcs (i.e. sum(...)=1 )
# n_out_fcd = sum(
# len(m.set_J_mdt[(g,l,l_line)])
# for l_line in m.set_L[g]
# if l_line not in m.set_L_imp[g]
# if l_line != l
# )
# if n_in_max == 0 and n_out_pre == 1 and n_out_fcd >= 1:
# return pyo.Constraint.Infeasible
# # the number of non-predefined outgoing arcs
# n_out_opt = sum(
# len(m.set_J_new[(g,l,l_line)])
# for l_line in m.set_L[g]
# if l_line not in m.set_L_imp[g]
# if l_line != l
# )
# n_out_max = n_out_pre + n_out_opt
# if n_out_max <= 1:
# # there can only be one outgoing arc at most: redundant constraint
# return pyo.Constraint.Skip
# else: # more than one outgoing arc is possible
# M_gl = n_out_max - 1
# return (
# sum(
# sum(
# sum(m.var_delta_arc_inv_glljh[(g,l,l_diamond,j,s)]
# for s in m.set_H_gllj[(g,l,l_diamond,j)])
# for j in m.set_J_new[(g,l,l_diamond)]
# )
# #+len(m.set_J_pre[(g,l,l_diamond)]) # = n_out_pre
# for l_diamond in m.set_L[g]
# if l_diamond not in m.set_L_imp[g]
# if l_diamond != l
# )+n_out_pre
# <= 1 + M_gl*
# sum(
# sum(
# sum(m.var_delta_arc_inv_glljh[
# (g,l_star,l,j_star,s_star)]
# for s_star in m.set_H_gllj[(g,l_star,l,j_star)])
# for j_star in m.set_J_new[(g,l_star,l)]
# )
# #+len(m.set_J_pre[(g,l_star,l)]) # = n_in_pre
# for l_star in m.set_L[g]
# if l_star not in m.set_L_exp[g]
# if l_star != l
# )+n_in_pre
# )
# model.constr_max_outgoing_arcs = pyo.Constraint(
# model.set_GL_not_exp_imp,
# rule=rule_constr_max_outgoing_arcs)
add_network_restrictions(model)
# *************************************************************************
# *************************************************************************
......
......@@ -612,21 +612,62 @@ class Network(nx.MultiDiGraph):
KEY_ARC_TECH_CAPACITY_INSTANTANEOUS,
KEY_ARC_TECH_STATIC_LOSS,
)
NET_TYPE_HYBRID = 0
NET_TYPE_TREE = 1
NET_TYPE_REV_TREE = 2
NET_TYPES = (
NET_TYPE_HYBRID,
NET_TYPE_TREE,
NET_TYPE_REV_TREE
)
def __init__(self, incoming_graph_data=None, **attr):
def __init__(self, network_type = NET_TYPE_HYBRID, **kwargs):
# run base class init routine
nx.MultiDiGraph.__init__(self, incoming_graph_data=incoming_graph_data, **attr)
nx.MultiDiGraph.__init__(self, **kwargs)
# identify node types
self.identify_node_types()
# declare variables for the nodes without directed arc limitations
self.network_type = network_type
self.nodes_w_in_dir_arc_limitations = dict()
self.nodes_w_out_dir_arc_limitations = dict()
self.nodes_wo_in_dir_arc_limitations = []
# *************************************************************************
# *************************************************************************
def _set_up_node(self, node_key, max_number_in_arcs: int = None, max_number_out_arcs: int = None):
if self.should_be_tree_network():
# nodes have to be part of a tree: one incoming arc per node at most
self.nodes_w_in_dir_arc_limitations[node_key] = 1
elif self.should_be_reverse_tree_network():
# nodes have to be part of a reverse tree: one outgoing arc per node at most
self.nodes_w_out_dir_arc_limitations[node_key] = 1
else:
# nodes have no peculiar restrictions or they are defined 1 by 1
if type(max_number_in_arcs) != type(None):
self.nodes_w_in_dir_arc_limitations[node_key] = max_number_in_arcs
if type(max_number_out_arcs) != type(None):
self.nodes_w_out_dir_arc_limitations[node_key] = max_number_out_arcs
# *************************************************************************
# *************************************************************************
def should_be_tree_network(self) -> bool:
return self.network_type == self.NET_TYPE_TREE
self.nodes_wo_out_dir_arc_limitations = []
# *************************************************************************
# *************************************************************************
def should_be_reverse_tree_network(self) -> bool:
return self.network_type == self.NET_TYPE_REV_TREE
# *************************************************************************
# *************************************************************************
......@@ -661,23 +702,25 @@ class Network(nx.MultiDiGraph):
# add a new supply/demand node
def add_source_sink_node(self, node_key, base_flow: dict):
def add_source_sink_node(self, node_key, base_flow: dict, **kwargs):
node_dict = {
self.KEY_NODE_TYPE: self.KEY_NODE_TYPE_SOURCE_SINK,
self.KEY_NODE_BASE_FLOW: base_flow,
}
self.add_node(node_key, **node_dict)
self._set_up_node(node_key, **kwargs)
# *************************************************************************
# *************************************************************************
# add a new waypoint node
def add_waypoint_node(self, node_key):
def add_waypoint_node(self, node_key, **kwargs):
node_dict = {self.KEY_NODE_TYPE: self.KEY_NODE_TYPE_WAY}
self.add_node(node_key, **node_dict)
self._set_up_node(node_key, **kwargs)
# *************************************************************************
# *************************************************************************
......@@ -1256,6 +1299,31 @@ class Network(nx.MultiDiGraph):
return nx.is_tree(network_view)
# *************************************************************************
# *************************************************************************
def has_selected_antiparallel_arcs(self) -> bool:
"Returns True if any two nodes have selected arcs in both directions."
return len(self.find_selected_antiparallel_arcs()) != 0
# *************************************************************************
# *************************************************************************
def find_selected_antiparallel_arcs(self) -> list:
"""Returns True if any two nodes have (selected) forward and reverse arcs."""
# check the existence of forward and reverse arcs in the same segment
arcs = [ # get the arcs selected
arc_key[0:2]
for arc_key in self.edges(keys=True)
if True in self.edges[arc_key][Network.KEY_ARC_TECH].options_selected
]
arcs = [ # get the selected arcs that exist both ways
arc_key
for arc_key in arcs
if (arc_key[1], arc_key[0]) in arcs
]
return arcs
# *****************************************************************************
# *****************************************************************************
......@@ -63,6 +63,15 @@ class InfrastructurePlanningProblem(EnergySystem):
STATIC_LOSS_MODE_US,
STATIC_LOSS_MODE_DS,
)
NODE_PRICE_LAMBDA = 1
NODE_PRICE_DELTA = 2
NODE_PRICE_OTHER = 3
NODE_PRICES = (
NODE_PRICE_LAMBDA,
NODE_PRICE_DELTA,
NODE_PRICE_OTHER
)
# *************************************************************************
# *************************************************************************
......@@ -80,6 +89,7 @@ class InfrastructurePlanningProblem(EnergySystem):
converters: dict = None,
prepare_model: bool = True,
validate_inputs: bool = True,
node_price_model = NODE_PRICE_DELTA
): # TODO: switch to False when everything is more mature
# *********************************************************************
......@@ -1830,22 +1840,14 @@ class InfrastructurePlanningProblem(EnergySystem):
}
set_L_max_in_g = {
g: tuple(
l
for l in self.networks[g].nodes
if l not in self.networks[g].nodes_wo_in_dir_arc_limitations
)
g: tuple(self.networks[g].nodes_w_in_dir_arc_limitations.keys())
for g in self.networks.keys()
}
}
# set_L_max_out_g = {
# g: tuple(
# l
# for l in self.networks[g].nodes
# if l not in self.networks[g].nodes_wo_out_dir_arc_limitations
# )
# for g in self.networks.keys()
# }
set_L_max_out_g = {
g: tuple(self.networks[g].nodes_w_out_dir_arc_limitations.keys())
for g in self.networks.keys()
}
set_GL = tuple((g, l) for g in set_G for l in set_L[g])
......@@ -1897,7 +1899,7 @@ class InfrastructurePlanningProblem(EnergySystem):
for (g, l) in set_GL_exp_imp
for (q, p, k) in set_QPK
}
# set of GLKS tuples
set_GLQPKS = tuple(
(*glqpk, s) for glqpk, s_tuple in set_S.items() for s in s_tuple
......@@ -2547,6 +2549,17 @@ class InfrastructurePlanningProblem(EnergySystem):
for s in set_S[(g, l, q, p, k)]
}
# price function convexity
param_price_function_is_convex = {
(g, l, q, p, k): (
self.networks[g].nodes[l][Network.KEY_NODE_PRICES][(q, p, k)].price_monotonically_increasing_with_volume()
if l in set_L_imp[g] else
self.networks[g].nodes[l][Network.KEY_NODE_PRICES][(q, p, k)].price_monotonically_decreasing_with_volume()
)
for (g, l, q, p, k) in set_S
}
# maximum resource volume per segment (infinity is the default)
param_v_max_glqpks = {
......@@ -3317,7 +3330,7 @@ class InfrastructurePlanningProblem(EnergySystem):
"set_L_imp": set_L_imp,
"set_L_exp": set_L_exp,
"set_L_max_in_g": set_L_max_in_g,
#'set_L_max_out_g': set_L_max_out_g,
'set_L_max_out_g': set_L_max_out_g,
"set_GL": set_GL,
"set_GL_exp": set_GL_exp,
"set_GL_imp": set_GL_imp,
......@@ -3449,6 +3462,7 @@ class InfrastructurePlanningProblem(EnergySystem):
"param_c_df_qp": param_c_df_qp,
"param_c_time_qpk": param_c_time_qpk,
"param_p_glqpks": param_p_glqpks,
"param_price_function_is_convex": param_price_function_is_convex,
"param_v_max_glqpks": param_v_max_glqpks,
# *****************************************************************
# converters
......
......@@ -12,7 +12,11 @@ from numbers import Real
class ResourcePrice:
"""A class for piece-wise linear resource prices in network problems."""
def __init__(self, prices: list or int, volumes: list = None):
def __init__(
self,
prices: list or int,
volumes: list = None
):
# how do we keep the size of the object as small as possible
# if the tariff is time-invariant, how can information be stored?
# - a flag
......@@ -206,30 +210,10 @@ class ResourcePrice:
# *************************************************************************
# *************************************************************************
def is_equivalent(self, other) -> bool:
"""Returns True if a given ResourcePrice is equivalent to another."""
# resources are equivalent if:
# 1) the prices are the same
# 2) the volume limits are the same
# the number of segments has to match
if self.number_segments() != other.number_segments():
return False # the number of segments do not match
# check the prices
if self.prices != other.prices:
return False # prices are different
# prices match, check the volumes
if self.volumes != other.volumes:
return False # volumes are different
return True # all conditions have been met
# *************************************************************************
# *************************************************************************
def __eq__(self, o) -> bool:
"""Returns True if a given ResourcePrice is equivalent to another."""
return self.is_equivalent(o)
return hash(self) == hash(o)
def __hash__(self):
return hash(
......@@ -260,9 +244,7 @@ def are_prices_time_invariant(resource_prices_qpk: dict) -> bool:
# check if the tariffs per period and assessment are equivalent
for qp, qpk_list in qpk_qp.items():
for i in range(len(qpk_list) - 1):
if not resource_prices_qpk[qpk_list[0]].is_equivalent(
resource_prices_qpk[qpk_list[i + 1]]
):
if not resource_prices_qpk[qpk_list[0]] == resource_prices_qpk[qpk_list[i + 1]]:
return False
# all tariffs are equivalent per period and assessment: they are invariant
return True
......
......@@ -134,8 +134,13 @@ class TimeFrame:
def consecutive_qpk(self, qpk_keyed_dict: dict) -> bool:
"Returns True if all (q,p,k) tuple keys are valid and consecutive."
# TODO: here
raise NotImplementedError
# all k intervals have to be consecutive for each (q,p) pair
set_qp = set(qpk[0:2] for qpk in qpk_keyed_dict.keys())
for qp in set_qp:
for k in range(self.number_time_intervals(qp[0])):
if (*qp,k) not in qpk_keyed_dict:
return False
return True
# *************************************************************************
# *************************************************************************
......@@ -240,6 +245,29 @@ class TimeFrame:
# *************************************************************************
# *************************************************************************
def assessments_overlap(self) -> bool:
"Returns True if any period is covered by more than one assessment."
# if there is only one assessment, return False
if self.number_assessments() == 1:
return False
else:
# if there is more than one assessment, check whether two or more
# cover the same period
qs = tuple(self.assessments)
# for each assessment
for q1, q2 in zip(qs, qs[1:]):
# for each period in one assessment (q1)
for p in self.reporting_periods[q1]:
# check if it is covered by the other assessment (q2)
if p in self.reporting_periods[q2]:
# p is covered by at least two assessments (q1 and q2)
return True
# if no period is covered by more than one assessment, return False
return False
# *************************************************************************
# *************************************************************************
# *****************************************************************************
# *****************************************************************************
......@@ -279,7 +307,7 @@ class EconomicTimeFrame(TimeFrame):
# dict: 1 value per p and q
self._discount_rates = dict(discount_rates_q)
else:
raise ValueError('Unrecognised inputs.')
raise TypeError('Unrecognised inputs.')
# TODO: validate the discount rate object
......
......@@ -6,7 +6,6 @@
# import uuid
# local, external
import networkx as nx
import pyomo.environ as pyo
from matplotlib import pyplot as plt
......@@ -14,398 +13,198 @@ from matplotlib import pyplot as plt
# local, internal
from .problem import InfrastructurePlanningProblem
from .network import Network
# *****************************************************************************
# *****************************************************************************
def review_final_network(network: Network):
# check that the network topology is a tree
if network.has_tree_topology():
print("The network has a tree topology.")
else:
print("The network does not have a tree topology.")
# check the existence of forward and reverse arcs between the same nodes
has_forward_reverse_arcs(network, print_result=True)
# *****************************************************************************
# *****************************************************************************
def has_forward_reverse_arcs(network: Network, print_result: bool = True) -> bool:
"""Returns True if there are simultaneous forward and reverse arcs."""
# check the existence of forward and reverse arcs in the same segment
forward_reverse_arcs = [ # get the arcs selected
arc_key[0:2]
for arc_key in network.edges(keys=True)
if True in network.edges[arc_key][Network.KEY_ARC_TECH].options_selected
]
forward_reverse_arcs = [ # get the selected arcs that exist both ways
arc_key
for arc_key in forward_reverse_arcs
if (arc_key[1], arc_key[0]) in forward_reverse_arcs
]
if print_result:
if len(forward_reverse_arcs) == 0:
print(
"The network has no forward and reverse arcs in" + " the same segment."
def pre_statistics(ipp: InfrastructurePlanningProblem,
node_keys = None):
"Returns preliminary problem statistics."
if type(node_keys) == type(None):
# pick all
node_keys = tuple(
(g, node_key)
for g, net in ipp.networks.items()
for node_key in net.nodes()
if Network.KEY_NODE_BASE_FLOW in net.nodes[node_key]
)
else:
print("The network has forward and reverse arcs in" + " the same segment.")
# *****************************************************************************
# *****************************************************************************
def run_mvesipp_analysis(
problem: InfrastructurePlanningProblem = None,
model_instance: pyo.ConcreteModel = None,
analyse_results: bool = False,
analyse_problem: bool = False,
):
# *************************************************************************
if model_instance != None and analyse_problem:
describe_mves(model_instance)
# *************************************************************************
if model_instance != None and analyse_results:
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(model_instance)
present_summary_results(flow_in, flow_out, flow_in_cost, flow_out_revenue)
# *************************************************************************
if problem != None and analyse_results:
# paths
describe_solution(problem)
# *************************************************************************
# aggregate static (end use) demand
aggregate_static_demand_qk = {
qk: sum(
ipp.networks[g].nodes[node_key][Network.KEY_NODE_BASE_FLOW][qk]
for g, node_key in node_keys
if ipp.networks[g].nodes[node_key][Network.KEY_NODE_BASE_FLOW][qk] >= 0
)
for qk in ipp.time_frame.qk()
}
# aggregate static (primary) supply
aggregate_static_supply_qk = {
qk: - sum(
ipp.networks[g].nodes[node_key][Network.KEY_NODE_BASE_FLOW][qk]
for g, node_key in node_keys
if ipp.networks[g].nodes[node_key][Network.KEY_NODE_BASE_FLOW][qk] < 0
)
for qk in ipp.time_frame.qk()
}
# static nodal balance
aggregate_static_balance_qk = {
qk: aggregate_static_demand_qk[qk]-aggregate_static_supply_qk[qk]
for qk in ipp.time_frame.qk()
}
return (
aggregate_static_demand_qk,
aggregate_static_supply_qk,
aggregate_static_balance_qk
)
# *****************************************************************************
# *****************************************************************************
# prepare a dictionary with the key results
def compute_cost_volume_metrics(
instance: pyo.ConcreteModel, read_directly_if_possible: bool = True
):
# select calculation method
if read_directly_if_possible:
# total flow imported
flow_in = {
(g, q, p): pyo.value(
sum(
instance.var_if_glqpks[(g, l, q, p, k, s)]
* instance.param_c_time_qpk[(q, p, k)]
for l in instance.set_L_imp[g]
for k in instance.set_K_q[q]
for s in instance.set_S[(g, l, q, p, k)]
)
def statistics(ipp: InfrastructurePlanningProblem,
import_node_keys: tuple = None,
export_node_keys: tuple = None,
other_node_keys: tuple = None):
"Returns flow statistics using the optimisation results."
if type(import_node_keys) == type(None):
# pick all import nodes
import_node_keys = tuple(
(g, l)
for g in ipp.networks
for l in ipp.networks[g].import_nodes
)
for g in instance.set_G
for q, p in instance.set_QP
}
# total flow imported per network, scenario, period, time interval
flow_in_k = {
(g, q, p, k): pyo.value(
sum(
instance.var_if_glqpks[(g, l, q, p, k, s)]
* instance.param_c_time_qpk[(q, p, k)]
for l in instance.set_L_imp[g]
# for k in instance.set_K_q[q]
for s in instance.set_S[(g, l, q, p, k)]
)
if type(export_node_keys) == type(None):
# pick all export nodes
export_node_keys = tuple(
(g, l)
for g in ipp.networks
for l in ipp.networks[g].export_nodes
)
for g in instance.set_G
for q, p, k in instance.set_QPK
}
# total flow exported
flow_out = {
(g, q, p): pyo.value(
sum(
instance.var_ef_glqpks[(g, l, q, p, k, s)]
* instance.param_c_time_qpk[(q, p, k)]
for l in instance.set_L_exp[g]
for k in instance.set_K_q[q]
for s in instance.set_S[(g, l, q, p, k)]
)
if type(other_node_keys) == type(None):
# pick all
other_node_keys = tuple(
(g, node_key)
for g, net in ipp.networks.items()
for node_key in net.nodes()
if Network.KEY_NODE_BASE_FLOW in net.nodes[node_key]
)
for g in instance.set_G
for q, p in instance.set_QP
}
# import costs
flow_in_cost = {
(g, q, p): pyo.value(
sum(
instance.var_ifc_glqpk[(g, l, q, p, k)]
* instance.param_c_time_qpk[(q, p, k)]
for l in instance.set_L_imp[g]
for k in instance.set_K_q[q]
# imports
imports_qpk = {
qpk: pyo.value(
sum(
ipp.instance.var_trans_flows_glqpks[(g,l_imp,*qpk, s)]
for g, l_imp in import_node_keys
# for g in ipp.networks
# for l_imp in ipp.networks[g].import_nodes
for s in ipp.instance.set_S[(g,l_imp,*qpk)]
)
*ipp.instance.param_c_time_qpk[qpk]
)
for g in instance.set_G
for q, p in instance.set_QP
for qpk in ipp.time_frame.qpk()
}
# export revenue
flow_out_revenue = {
(g, q, p): pyo.value(
sum(
instance.var_efr_glqpk[(g, l, q, p, k)]
* instance.param_c_time_qpk[(q, p, k)]
for l in instance.set_L_exp[g]
for k in instance.set_K_q[q]
# exports
exports_qpk = {
qpk: pyo.value(
sum(
ipp.instance.var_trans_flows_glqpks[(g,l_exp,*qpk, s)]
for g, l_exp in export_node_keys
# for g in ipp.networks
# for l_exp in ipp.networks[g].export_nodes
for s in ipp.instance.set_S[(g,l_exp,*qpk)]
)
*ipp.instance.param_c_time_qpk[qpk]
)
for g in instance.set_G
for q, p in instance.set_QP
for qpk in ipp.time_frame.qpk()
}
else:
# total flow imported
flow_in = {
(g, q, p): pyo.value(
sum(
instance.var_v_glljqk[(g, l1, l2, j, q, k)]
* instance.param_c_time_qpk[(q, p, k)]
for l1 in instance.set_L_imp[g]
for l2 in instance.set_L[g] - instance.set_L_imp[g]
for j in instance.set_J[(g, l1, l2)]
for k in instance.set_K
)
)
for g in instance.set_G
for q, p in instance.set_QP
# balance
balance_qpk = {
qpk: imports_qpk[qpk]-exports_qpk[qpk]
for qpk in ipp.time_frame.qpk()
}
# total flow imported per network, scenario, period, time interval
flow_in_k = {
(g, q, p, k): pyo.value(
sum(
instance.var_if_glqpks[(g, l, q, p, k, s)]
* instance.param_c_time_qpk[(q, p, k)]
for l in instance.set_L_imp[g]
# for k in instance.set_K_q[q]
for s in instance.set_S[(g, l, q, p, k)]
# import costs
import_costs_qpk = {
qpk: pyo.value(
sum(
ipp.instance.var_ifc_glqpk[(g,l_imp,*qpk)]
for g, l_imp in import_node_keys
# for g in ipp.networks
# for l_imp in ipp.networks[g].import_nodes
)
*ipp.instance.param_c_time_qpk[qpk]
)
for g in instance.set_G
for q, p, k in instance.set_QPK
for qpk in ipp.time_frame.qpk()
}
# total flow exported
flow_out = {
(g, q, p): pyo.value(
sum(
instance.var_v_glljqk[(g, l1, l2, j, q, k)]
* instance.param_c_time_qpk[(q, p, k)]
for l2 in instance.set_L_exp[g]
for l1 in instance.set_L[g] - instance.set_L_exp[g]
for j in instance.set_J[(g, l1, l2)]
for k in instance.set_K
# export revenue
export_revenue_qpk = {
qpk: pyo.value(
sum(
ipp.instance.var_efr_glqpk[(g,l_exp,*qpk)]
for g, l_exp in export_node_keys
# for g in ipp.networks
# for l_exp in ipp.networks[g].export_nodes
)
*ipp.instance.param_c_time_qpk[qpk]
)
for g in instance.set_G
for q, p in instance.set_QP
for qpk in ipp.time_frame.qpk()
}
# import costs
flow_in_cost = {
(g, q, p): pyo.value(
sum(
instance.var_if_glqpks[(g, l, q, p, k, s)]
* instance.param_p_glqks[(g, l, q, p, k, s)]
* instance.param_c_time_qpk[(q, p, k)]
for l in instance.set_L_imp[g]
for k in instance.set_K_q[q]
for s in instance.set_S[(g, l, q, p, k)]
# for (_g,l,q,p,k,s) in instance.var_if_glqpks
# if g == _g
)
# net cash flow
ncf_qpk = {
qpk: import_costs_qpk[qpk]-export_revenue_qpk[qpk]
for qpk in ipp.time_frame.qpk()
}
# aggregate static (end use) demand
aggregate_static_demand_qpk = {
qpk: pyo.value(
sum(
ipp.instance.param_v_base_glqk[(g, l, qpk[0], qpk[2])]
for g, l in other_node_keys
# for g in ipp.networks
# for l in ipp.networks[g].source_sink_nodes
if ipp.instance.param_v_base_glqk[(g, l, qpk[0], qpk[2])] >= 0
)
*ipp.instance.param_c_time_qpk[qpk]
)
for g in instance.set_G
for q, p in instance.set_QP
for qpk in ipp.time_frame.qpk()
}
# export revenue
flow_out_revenue = {
(g, q, p): pyo.value(
sum(
instance.var_ef_glqpks[(g, l, q, p, k, s)]
* instance.param_p_glqpks[(g, l, q, p, k, s)]
* instance.param_c_time_qpk[(q, p, k)]
for l in instance.set_L_exp[g]
for k in instance.set_K_q[q]
for s in instance.set_S[(g, l, q, p, k)]
# for (_g,l,q,p,k,s) in instance.var_ef_glqpks
# if g == _g
)
# aggregate static (primary) supply
aggregate_static_supply_qpk = {
qpk: -pyo.value(
sum(
ipp.instance.param_v_base_glqk[(g, l, qpk[0], qpk[2])]
for g, l in other_node_keys
# for g in ipp.networks
# for l in ipp.networks[g].source_sink_nodes
if ipp.instance.param_v_base_glqk[(g, l, qpk[0], qpk[2])] < 0
)
*ipp.instance.param_c_time_qpk[qpk]
)
for g in instance.set_G
for q, p in instance.set_QP
for qpk in ipp.time_frame.qpk()
}
# *************************************************************************
# *************************************************************************
return flow_in, flow_in_k, flow_out, flow_in_cost, flow_out_revenue
# *************************************************************************
# *************************************************************************
# *****************************************************************************
# *****************************************************************************
def compute_network_performance(solved_problem: InfrastructurePlanningProblem):
# gross results
network_flows_dict = compute_gross_network_flows(solved_problem)
# actual results
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(solved_problem.instance)
# losses
losses_dict = {
(g, q, p): abs(
# imports
flow_in[(g, q, p)]
+
# local supply
network_flows_dict["gross_supply_gq"][(g, q)]
-
# exports
flow_out[(g, q, p)]
-
# local demand
network_flows_dict["gross_demand_gq"][(g, q)]
)
for q in solved_problem.time_frame.assessments
for p in solved_problem.time_frame.reporting_periods[q]
for g in solved_problem.networks
}
# static nodal balance
aggregate_static_balance_qpk = {
qpk: aggregate_static_demand_qpk[qpk]-aggregate_static_supply_qpk[qpk]
for qpk in ipp.time_frame.qpk()
}
return (
network_flows_dict,
losses_dict,
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
)
# *****************************************************************************
# *****************************************************************************
# provide a summary of the results
def present_summary_results(
flow_in: dict,
flow_out: dict,
flow_in_cost: dict,
flow_out_revenue: dict,
flow_unit: str = "MWh",
currency: str = "EUR",
):
# *************************************************************************
# *************************************************************************
if len(flow_in) != 0:
print(">> Imports")
for g, q, p in flow_in:
print("Assessment: " + str(q))
print("Network: " + str(g))
print("Volume: " + str(flow_in[(g, q, p)]) + " " + str(flow_unit))
print("Cost: " + str(flow_in_cost[(g, q, p)]) + " " + str(currency))
if flow_in[(g, q, p)] != 0:
print(
"Average price: "
+ str(flow_in_cost[(g, q, p)] / flow_in[(g, q, p)])
+ " "
+ str(currency)
+ "/"
+ str(flow_unit)
)
else: # no flow
print("Average price: N/A (no flow imports are set to take place).")
# *************************************************************************
# *************************************************************************
if len(flow_out) != 0:
print(">> Exports")
for g, q, p in flow_out:
print("Assessment: " + str(q))
print("Network: " + str(g))
print("Volume: " + str(flow_out[(g, q, p)]) + " " + str(flow_unit))
print("Cost: " + str(flow_out_revenue[(g, q, p)]) + " " + str(currency))
if flow_out[(g, q, p)] != 0:
print(
"Average price: "
+ str(flow_out_revenue[(g, q, p)] / flow_out[(g, q, p)])
+ " "
+ str(currency)
+ "/"
+ str(flow_unit)
)
else: # no flow
print("Average price: N/A (no flow exports are set to take place).")
# *************************************************************************
# *************************************************************************
imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk
)
# *****************************************************************************
# *****************************************************************************
......@@ -421,97 +220,6 @@ def unused_node_key(network: nx.MultiDiGraph):
# it doesn't, return it
return i
# *****************************************************************************
# *****************************************************************************
# TODO: document
def compute_gross_network_flows(problem: InfrastructurePlanningProblem) -> dict:
gross_supply_g = {}
gross_demand_g = {}
gross_supply_gq = {}
gross_demand_gq = {}
gross_supply_gqk = {}
gross_demand_gqk = {}
for g, net in problem.networks.items():
end_use_node_keys = tuple(
node_key
for node_key in net.nodes()
if Network.KEY_NODE_BASE_FLOW in net.nodes[node_key]
if len(net.nodes[node_key][Network.KEY_NODE_BASE_FLOW]) != 0
)
# flow: q, k
gross_demand_qk = {
(g, q, k): sum(
net.nodes[node_key][Network.KEY_NODE_BASE_FLOW][(q, k)]
for node_key in end_use_node_keys
if net.nodes[node_key][Network.KEY_NODE_BASE_FLOW][(q, k)] >= 0
)
for q in problem.time_frame.assessments
for k in problem.time_frame.time_intervals[q]
}
gross_supply_qk = {
(g, q, k): -sum(
net.nodes[node_key][Network.KEY_NODE_BASE_FLOW][(q, k)]
for node_key in end_use_node_keys
if net.nodes[node_key][Network.KEY_NODE_BASE_FLOW][(q, k)] < 0
)
for q in problem.time_frame.assessments
for k in problem.time_frame.time_intervals[q]
}
# (g,q,k)
gross_supply_gqk.update(gross_supply_qk)
gross_demand_gqk.update(gross_demand_qk)
# (g,q)
gross_supply_gq.update(
{
(g, q): sum(
gross_supply_qk[(g, q, k)]
for k in problem.time_frame.time_intervals[q]
)
for q in problem.time_frame.assessments
}
)
gross_demand_gq.update(
{
(g, q): sum(
gross_demand_qk[(g, q, k)]
for k in problem.time_frame.time_intervals[q]
)
for q in problem.time_frame.assessments
}
)
# g
gross_supply_g.update({g: sum(supply for supply in gross_supply_qk.values())})
gross_demand_g.update({g: sum(demand for demand in gross_demand_qk.values())})
return {
"gross_supply_gqk": gross_supply_gqk,
"gross_demand_gqk": gross_demand_gqk,
"gross_supply_gq": gross_supply_gq,
"gross_demand_gq": gross_demand_gq,
"gross_supply_g": gross_supply_g,
"gross_demand_g": gross_demand_g,
}
# *****************************************************************************
# *****************************************************************************
......@@ -701,7 +409,6 @@ def describe_mves(obj: pyo.ConcreteModel):
print("******************************************************************")
print("******************************************************************")
# *****************************************************************************
# *****************************************************************************
......@@ -710,7 +417,6 @@ def describe_mves(obj: pyo.ConcreteModel):
# *****************************************************************************
# *****************************************************************************
def plot_networks(
ipp: InfrastructurePlanningProblem,
ax=None,
......@@ -999,7 +705,6 @@ def plot_networks(
# *****************************************************************************
# *****************************************************************************
def is_integer(variable: float, integrality_tolerance: float) -> bool:
"""Returns True if a given number qualifies as an integer."""
if integrality_tolerance >= 0.5:
......@@ -1011,381 +716,4 @@ def is_integer(variable: float, integrality_tolerance: float) -> bool:
# *****************************************************************************
# *****************************************************************************
def describe_solution(ipp: InfrastructurePlanningProblem):
# *************************************************************************
print("******************************************************************")
# for each grid
for grid_key, net in ipp.networks.items():
# describe the path from import nodes to demand nodes
print("Flow path analysis: grid " + str(grid_key))
# for each node
for node_key in net.nodes:
# as long as it is an import node
if node_key not in net.import_nodes:
continue
# for every node
for node2_key in net.nodes:
# except node_key or other import nodes
if node_key is node2_key or node2_key in net.import_nodes:
continue
# or if there is no path
if nx.has_path(net, node_key, node2_key) == False:
continue
# for each viable/potential path
for path in nx.all_simple_paths(net, node_key, node2_key):
# check if all the pairs of nodes on the path were selected
# if multiple technologies were selected, add the capacities
arc_flow_capacities = [
sum(
net.edges[(path[node_pair], path[node_pair + 1], j)][
Network.KEY_ARC_TECH
].capacity[
net.edges[(path[node_pair], path[node_pair + 1], j)][
Network.KEY_ARC_TECH
].options_selected.index(True)
]
for j in net._adj[path[node_pair]][path[node_pair + 1]]
if True
in net.edges[(path[node_pair], path[node_pair + 1], j)][
Network.KEY_ARC_TECH
].options_selected
)
for node_pair in range(len(path) - 1)
if (path[node_pair], path[node_pair + 1]) in net.edges
]
# skip if at least one arc has zero capacity
if 0 in arc_flow_capacities:
continue
arc_tech_efficiencies = [
(
min(
net.edges[(path[node_pair], path[node_pair + 1], uv_k)][
Network.KEY_ARC_TECH
].efficiency[(0, k)]
for uv_k in net._adj[path[node_pair]][
path[node_pair + 1]
]
if True
in net.edges[
(path[node_pair], path[node_pair + 1], uv_k)
][Network.KEY_ARC_TECH].options_selected
for k in range(
len(
net.edges[
(path[node_pair], path[node_pair + 1], uv_k)
][Network.KEY_ARC_TECH].efficiency
)
)
),
max(
net.edges[(path[node_pair], path[node_pair + 1], uv_k)][
Network.KEY_ARC_TECH
].efficiency[(0, k)]
for uv_k in net._adj[path[node_pair]][
path[node_pair + 1]
]
if True
in net.edges[
(path[node_pair], path[node_pair + 1], uv_k)
][Network.KEY_ARC_TECH].options_selected
for k in range(
len(
net.edges[
(path[node_pair], path[node_pair + 1], uv_k)
][Network.KEY_ARC_TECH].efficiency
)
)
),
)
for node_pair in range(len(path) - 1)
if (path[node_pair], path[node_pair + 1]) in net.edges
]
max_static_flow = [
max(
[
net.nodes[node][Network.KEY_NODE_BASE_FLOW][(0, k)]
for k in range(
len(
ipp.networks[grid_key].nodes[node][
Network.KEY_NODE_BASE_FLOW
]
)
)
]
)
if node in net.source_sink_nodes
else 0
for node in path
if node in net.nodes
]
min_static_flow = [
min(
[
net.nodes[node][Network.KEY_NODE_BASE_FLOW][(0, k)]
for k in range(
len(
ipp.networks[grid_key].nodes[node][
Network.KEY_NODE_BASE_FLOW
]
)
)
]
)
if node in net.source_sink_nodes
else 0
for node in path
if node in net.nodes
]
# for each pair of nodes on the path
if len(arc_flow_capacities) == len(path) - 1:
print("**********************************************")
print("Path: " + str(path))
print("Max. static flow: " + str(max_static_flow))
print("Min. static flow: " + str(min_static_flow))
print("Capacities: " + str(arc_flow_capacities))
print("Efficiencies: " + str(arc_tech_efficiencies))
for arc_flow_index in range(len(arc_flow_capacities) - 1):
if (
arc_flow_capacities[arc_flow_index]
< arc_flow_capacities[arc_flow_index + 1]
):
# the flow capacities are increasing, which
# usually indicates suboptimality
# tech_options_first = [
# tech[Network.KEY_ARC_TECH_CAPACITY]
# for tech in ipp.networks[
# grid_key].edges[
# (path[arc_flow_index],
# path[arc_flow_index+1])][
# net.KEY_ARC_TECH]
# if True in tech.options_selected]
# tech_options_sec = [
# tech[net.KEY_ARC_TECH_CAPACITY]
# for tech in ipp.networks[
# grid_key].edges[
# (path[arc_flow_index+1],
# path[arc_flow_index+2])][
# net.KEY_ARC_TECH]
# if True in tech.options_selected]
# print('******************')
print(
"Increasing capacities along the flow path have been detected between nodes "
+ str(path[arc_flow_index])
+ " and "
+ str(path[arc_flow_index + 2])
+ "."
)
# print(tech_options_first)
# print(tech_options_sec)
# print('******************')
# *****************************************************************
# *********************************************************************
# for each node
for node_key in net.nodes:
# as long as it is an export node
if node_key not in net.export_nodes:
continue
# for every node
for node2_key in net.nodes:
# except node_key or other export nodes
if node_key is node2_key or node2_key in net.export_nodes:
continue
# or if there is no path
if nx.has_path(net, node2_key, node_key) == False:
continue
# for each viable/potential path
for path in nx.all_simple_paths(net, node2_key, node_key):
# check if all the pairs of nodes on the path were selected
# if multiple technologies were selected, add the capacities
arc_flow_capacities = [
sum(
net.edges[(path[node_pair], path[node_pair + 1], k)][
Network.KEY_ARC_TECH
].capacity[
net.edges[(path[node_pair], path[node_pair + 1], k)][
Network.KEY_ARC_TECH
].options_selected.index(True)
]
for k in net._adj[path[node_pair]][path[node_pair + 1]]
if True
in net.edges[(path[node_pair], path[node_pair + 1], k)][
Network.KEY_ARC_TECH
].options_selected
)
for node_pair in range(len(path) - 1)
if (path[node_pair], path[node_pair + 1]) in net.edges
]
# skip if at least one arc has zero capacity
if 0 in arc_flow_capacities:
continue
arc_tech_efficiencies = [
(
min(
net.edges[(path[node_pair], path[node_pair + 1], uv_k)][
Network.KEY_ARC_TECH
].efficiency[(0, k)]
for uv_k in net._adj[path[node_pair]][
path[node_pair + 1]
]
if True
in net.edges[
(path[node_pair], path[node_pair + 1], uv_k)
][Network.KEY_ARC_TECH].options_selected
for k in range(
len(
net.edges[
(path[node_pair], path[node_pair + 1], uv_k)
][Network.KEY_ARC_TECH].efficiency
)
)
),
max(
net.edges[(path[node_pair], path[node_pair + 1], uv_k)][
Network.KEY_ARC_TECH
].efficiency[(0, k)]
for uv_k in net._adj[path[node_pair]][
path[node_pair + 1]
]
if True
in net.edges[
(path[node_pair], path[node_pair + 1], uv_k)
][Network.KEY_ARC_TECH].options_selected
for k in range(
len(
net.edges[
(path[node_pair], path[node_pair + 1], uv_k)
][Network.KEY_ARC_TECH].efficiency
)
)
),
)
for node_pair in range(len(path) - 1)
if (path[node_pair], path[node_pair + 1]) in net.edges
]
max_static_flow = [
max(
[
net.nodes[node][Network.KEY_NODE_BASE_FLOW][(0, k)]
for k in range(
len(
ipp.networks[grid_key].nodes[node][
Network.KEY_NODE_BASE_FLOW
]
)
)
]
)
if node in net.source_sink_nodes
else 0
for node in path
if node in net.nodes
]
min_static_flow = [
min(
[
net.nodes[node][Network.KEY_NODE_BASE_FLOW][(0, k)]
for k in range(
len(
ipp.networks[grid_key].nodes[node][
Network.KEY_NODE_BASE_FLOW
]
)
)
]
)
if node in net.source_sink_nodes
else 0
for node in path
if node in net.nodes
]
# for each pair of nodes on the path
if len(arc_flow_capacities) == len(path) - 1:
print("**********************************************")
print("Path: " + str(path))
print("Max. static flow: " + str(max_static_flow))
print("Min. static flow: " + str(min_static_flow))
print("Capacities: " + str(arc_flow_capacities))
print("Efficiencies: " + str(arc_tech_efficiencies))
for arc_flow_index in range(len(arc_flow_capacities) - 1):
if (
arc_flow_capacities[arc_flow_index]
< arc_flow_capacities[arc_flow_index + 1]
):
# the flow capacities are increasing, which
# usually indicates suboptimality
# print('******************')
print(
"Increasing capacities along the flow path have been detected between nodes "
+ str(path[arc_flow_index])
+ " and "
+ str(path[arc_flow_index + 2])
+ "."
)
# print(tech_options_first)
# print(tech_options_sec)
# print('******************')
# *****************************************************************
# *********************************************************************
print("******************************************************************")
# *************************************************************************
# *****************************************************************************
# *****************************************************************************
# *****************************************************************************
\ No newline at end of file
......@@ -3,9 +3,7 @@
# standard
import math
import random
from numbers import Real
from statistics import mean
import geopandas as gpd
......@@ -15,7 +13,6 @@ import geopandas as gpd
# local, internal
from src.topupopt.data.gis.utils import read_gdf_file
from src.topupopt.data.buildings.dk import heat
from src.topupopt.data.buildings.dk import bbr
# *****************************************************************************
# *****************************************************************************
......@@ -38,9 +35,11 @@ class TestDataBuildingsDK:
30*24*3600
for i in range(number_time_intervals)
]
annual_heat_demand_scenario = 1000
total_area = 1000
air_temperature_scenario = [10 for i in range(number_time_intervals)]
total_demand_true = 1000
total_area_true = 4563 # 5%: 4563 # 100%: 100882
assessments = ['q']
annual_heat_demand = {'q': 1000}
air_temperature = {'q': [5+i for i in range(number_time_intervals)]}
gdf_osm = gpd.read_file(osm_data_filename)
gdf_osm.set_index(['element_type', 'osmid'], drop=True, inplace=True)
......@@ -51,55 +50,259 @@ class TestDataBuildingsDK:
index='index'
)
# order by state
def verify_result(
out_dict,
out_area,
total_demand_true,
total_area_true,
# assessments,
# number_time_intervals
):
assert type(out_dict) == dict
assert isinstance(out_area, Real)
assert len(out_dict) == len(gdf_osm)
assert math.isclose(out_area, total_area_true, abs_tol=1e-3) # 5%: 4563 # 100%: 100882
for q in assessments:
assert math.isclose(
sum(sum(v[q]) for k, v in out_dict.items() if len(v[q]) != 0),
total_demand_true,
abs_tol=1e-3
)
# output dict must be keyed by entrance id and then by scenario
for k, v in out_dict.items():
assert k in gdf_osm.index
if len(v) == 0:
continue
for q in assessments:
assert q in v
assert len(v[q]) == number_time_intervals or len(v[q]) == 0
heat_demand_dict = heat.heat_demand_dict_by_building_entrance(
# drop entries to keep things fast
share_keeper_osm_entries = 0.05
number_osm_entries = len(gdf_osm)
for index in gdf_osm.index:
if len(gdf_osm) < round(share_keeper_osm_entries*number_osm_entries):
break
gdf_osm.drop(index=index, inplace=True)
# create profiles in accordance with a set of states and a positive gain
heat_demand_dict, total_area = heat.heat_demand_profiles(
gdf_osm=gdf_osm,
gdf_buildings=gdf_buildings,
time_interval_durations=intraperiod_time_interval_duration,
assessments=assessments,
annual_heat_demand=annual_heat_demand,
air_temperature=air_temperature,
deviation_gain=1
)
verify_result(heat_demand_dict, total_area, total_demand_true, total_area_true)
# create profiles in accordance with a set of states and a negative gain
heat_demand_dict, total_area = heat.heat_demand_profiles(
gdf_osm=gdf_osm,
gdf_buildings=gdf_buildings,
number_intervals=number_time_intervals,
time_interval_durations=intraperiod_time_interval_duration,
bdg_min_to_max_ratio={
index: min_to_max_ratio for index in gdf_buildings.index
},
bdg_specific_demand={
index: annual_heat_demand_scenario/total_area
for index in gdf_buildings.index
},
bdg_demand_phase_shift=None,
avg_state=air_temperature_scenario,
state_correlates_with_output=False
assessments=assessments,
annual_heat_demand=annual_heat_demand,
air_temperature=air_temperature,
deviation_gain=-1
)
assert type(heat_demand_dict) == dict
assert len(heat_demand_dict) == len(gdf_osm)
verify_result(heat_demand_dict, total_area, total_demand_true, total_area_true)
# create profiles in accordance with a sinusoidal function (no phase shift)
# no state preference, use phase shift
heat_demand_dict, total_area = heat.heat_demand_profiles(
gdf_osm=gdf_osm,
gdf_buildings=gdf_buildings,
time_interval_durations=intraperiod_time_interval_duration,
assessments=assessments,
annual_heat_demand=annual_heat_demand,
min_max_ratio=min_to_max_ratio,
# air_temperature=air_temperature,
# state_correlates_with_output=False
# deviation_gain=1
)
verify_result(heat_demand_dict, total_area, total_demand_true, total_area_true)
# create profiles in accordance with a sinusoidal function (with phase shift)
heat_demand_dict, total_area = heat.heat_demand_profiles(
gdf_osm=gdf_osm,
gdf_buildings=gdf_buildings,
time_interval_durations=intraperiod_time_interval_duration,
assessments=assessments,
annual_heat_demand=annual_heat_demand,
min_max_ratio=min_to_max_ratio,
phase_shift_radians=math.pi/2
# air_temperature=air_temperature,
# state_correlates_with_output=False
# deviation_gain=1
)
verify_result(heat_demand_dict, total_area, total_demand_true, total_area_true)
# create profiles in accordance with states but without a predefined gain
heat_demand_dict2 = heat.heat_demand_dict_by_building_entrance(
# create profile (no optimisation)
heat_demand_dict, total_area = heat.heat_demand_profiles(
gdf_osm=gdf_osm,
gdf_buildings=gdf_buildings,
number_intervals=number_time_intervals,
time_interval_durations=intraperiod_time_interval_duration,
bdg_min_to_max_ratio={
index: min_to_max_ratio for index in gdf_buildings.index
},
bdg_specific_demand={
index: annual_heat_demand_scenario/total_area
for index in gdf_buildings.index
},
bdg_demand_phase_shift={
index: 2*math.pi*random.random() for index in gdf_buildings.index
},
avg_state=None,
state_correlates_with_output=False
assessments=assessments,
annual_heat_demand=annual_heat_demand,
air_temperature=air_temperature,
min_max_ratio=min_to_max_ratio,
states_correlate_profile=True,
)
assert type(heat_demand_dict2) == dict
assert len(heat_demand_dict2) == len(gdf_osm)
verify_result(heat_demand_dict, total_area, total_demand_true, total_area_true)
# create profiles in accordance with states but without a predefined gain (optimisation)
# remove all but one osm entry (to keep things light)
for index in gdf_osm.index:
if len(gdf_osm) <= 1:
break
gdf_osm.drop(index=index, inplace=True)
# create profile
heat_demand_dict, total_area = heat.heat_demand_profiles(
gdf_osm=gdf_osm,
gdf_buildings=gdf_buildings,
time_interval_durations=intraperiod_time_interval_duration,
assessments=assessments,
annual_heat_demand=annual_heat_demand,
air_temperature=air_temperature,
min_max_ratio=min_to_max_ratio,
states_correlate_profile=True,
solver='glpk'
)
total_area_true = 200
verify_result(heat_demand_dict, total_area, total_demand_true, total_area_true)
# *************************************************************************
# *************************************************************************
# def test_demand_dict3(self):
# total heating area
# # heat_demand_dict_by_building_entrance
heating_area = heat.total_heating_area(gdf_osm, gdf_buildings)
assert isinstance(heating_area, Real)
assert math.isclose(heating_area, 100882, abs_tol=1e-3)
# osm_data_filename = 'tests/data/gdf_osm.gpkg'
# building_data_filename = 'tests/data/gdf_buildings.gpkg'
# bdg_gdf_container_columns = ('ejerskaber','koordinater','bygningspunkt')
# number_time_intervals = 12
# min_to_max_ratio = 0.1
# intraperiod_time_interval_duration = [
# 30*24*3600
# for i in range(number_time_intervals)
# ]
# annual_heat_demand_scenario = 1000
# total_area = 1000
# states = [10 for i in range(number_time_intervals)]
# gdf_osm = gpd.read_file(osm_data_filename)
# gdf_osm.set_index(['element_type', 'osmid'], drop=True, inplace=True)
# gdf_buildings = read_gdf_file(
# filename=building_data_filename,
# packed_columns=bdg_gdf_container_columns,
# index='index'
# )
# # sinusoidal
# heat_demand_dict = heat.heat_demand_dict_by_building_entrance2(
# gdf_osm=gdf_osm,
# gdf_buildings=gdf_buildings,
# number_intervals=number_time_intervals,
# time_interval_durations=intraperiod_time_interval_duration,
# min_max_ratio=min_to_max_ratio,
# specific_demand=annual_heat_demand_scenario/total_area,
# )
# assert type(heat_demand_dict) == dict
# assert len(heat_demand_dict) == len(gdf_osm)
# assert math.isclose(
# annual_heat_demand_scenario,
# sum(sum(value) for value in heat_demand_dict.values()),
# abs_tol=1e-3,
# )
# # sinusoidal with phase shift
# heat_demand_dict = heat.heat_demand_dict_by_building_entrance2(
# gdf_osm=gdf_osm,
# gdf_buildings=gdf_buildings,
# number_intervals=number_time_intervals,
# time_interval_durations=intraperiod_time_interval_duration,
# min_max_ratio=min_to_max_ratio,
# specific_demand=annual_heat_demand_scenario/total_area ,
# phase_shift_radians=math.pi,
# )
# assert type(heat_demand_dict) == dict
# assert len(heat_demand_dict) == len(gdf_osm)
# assert math.isclose(
# annual_heat_demand_scenario,
# sum(sum(value) for value in heat_demand_dict.values()),
# abs_tol=1e-3,
# )
# # predefined deviation gain, positive
# heat_demand_dict = heat.heat_demand_dict_by_building_entrance2(
# gdf_osm=gdf_osm,
# gdf_buildings=gdf_buildings,
# number_intervals=number_time_intervals,
# time_interval_durations=intraperiod_time_interval_duration,
# states=states,
# specific_demand=annual_heat_demand_scenario/total_area ,
# deviation_gain=3,
# )
# assert type(heat_demand_dict) == dict
# assert len(heat_demand_dict) == len(gdf_osm)
# assert math.isclose(
# annual_heat_demand_scenario,
# sum(sum(value) for value in heat_demand_dict.values()),
# abs_tol=1e-3,
# )
# # predefined deviation gain, negative
# heat_demand_dict = heat.heat_demand_dict_by_building_entrance2(
# gdf_osm=gdf_osm,
# gdf_buildings=gdf_buildings,
# number_intervals=number_time_intervals,
# time_interval_durations=intraperiod_time_interval_duration,
# states=states,
# specific_demand=annual_heat_demand_scenario/total_area ,
# deviation_gain=-3,
# )
# assert type(heat_demand_dict) == dict
# assert len(heat_demand_dict) == len(gdf_osm)
# assert math.isclose(
# annual_heat_demand_scenario,
# sum(sum(value) for value in heat_demand_dict.values()),
# abs_tol=1e-3,
# )
# # optimisation
# heat_demand_dict = heat.heat_demand_dict_by_building_entrance2(
# gdf_osm=gdf_osm,
# gdf_buildings=gdf_buildings,
# number_intervals=number_time_intervals,
# time_interval_durations=intraperiod_time_interval_duration,
# states=states,
# specific_demand=annual_heat_demand_scenario/total_area,
# states_correlate_profile=True,
# solver='glpk'
# )
# assert type(heat_demand_dict) == dict
# assert len(heat_demand_dict) == len(gdf_osm)
# assert math.isclose(
# annual_heat_demand_scenario,
# sum(sum(value) for value in heat_demand_dict.values()),
# abs_tol=1e-3,
# )
# *************************************************************************
# *************************************************************************
......
# imports
# standard
import random
import math
from statistics import mean
# local, internal
from src.topupopt.data.misc import utils
# ******************************************************************************
# ******************************************************************************
class TestDataUtils:
# *************************************************************************
# *************************************************************************
def test_profile_synching2(self):
integration_result = 10446
ratio_min_avg = 0.2
min_to_max_ratio = ratio_min_avg / (2 - ratio_min_avg)
min_max_ratio = ratio_min_avg / (2 - ratio_min_avg)
avg_state = [
states = [
2.66,
2.34,
3.54,
......@@ -62,10 +52,10 @@ class TestDataUtils:
new_profile = utils.create_profile_using_time_weighted_state(
integration_result=integration_result,
avg_state=avg_state,
states=states,
time_interval_durations=time_interval_durations,
min_to_max_ratio=min_to_max_ratio,
state_correlates_with_output=False,
min_max_ratio=min_max_ratio,
states_correlate_profile=False,
)
expected_result = [
......@@ -99,10 +89,10 @@ class TestDataUtils:
new_profile = utils.create_profile_using_time_weighted_state(
integration_result=integration_result,
avg_state=avg_state,
states=states,
time_interval_durations=time_interval_durations,
min_to_max_ratio=min_to_max_ratio,
state_correlates_with_output=True,
min_max_ratio=min_max_ratio,
states_correlate_profile=True,
)
expected_result = [
......@@ -136,7 +126,7 @@ class TestDataUtils:
integration_result=integration_result,
period=sum(time_interval_durations),
time_interval_duration=mean(time_interval_durations),
min_to_max_ratio=min_to_max_ratio,
min_max_ratio=min_max_ratio,
)
expected_pmax, expected_pmin = 1558.972133279683, 182.02786672031687
......@@ -155,10 +145,10 @@ class TestDataUtils:
try:
new_profile = utils.create_profile_using_time_weighted_state(
integration_result=integration_result,
avg_state=avg_state,
states=states,
time_interval_durations=time_interval_durations,
min_to_max_ratio=min_to_max_ratio,
state_correlates_with_output=True,
min_max_ratio=min_max_ratio,
states_correlate_profile=True,
)
except ValueError:
error_triggered = True
......@@ -219,12 +209,12 @@ class TestDataUtils:
integration_result = 100
min_to_max_ratio = 0.2
min_max_ratio = 0.2
profile = utils.discrete_sinusoid_matching_integral(
integration_result,
time_interval_durations,
min_to_max_ratio,
min_max_ratio,
phase_shift_radians=phase_shift_radians,
)
......@@ -267,12 +257,12 @@ class TestDataUtils:
integration_result = 100
min_to_max_ratio = 0.2
min_max_ratio = 0.2
profile = utils.discrete_sinusoid_matching_integral(
integration_result,
time_interval_durations,
min_to_max_ratio,
min_max_ratio,
phase_shift_radians=phase_shift_radians,
)
......@@ -315,10 +305,10 @@ class TestDataUtils:
integration_result = 100
min_to_max_ratio = 0.2
min_max_ratio = 0.2
profile = utils.discrete_sinusoid_matching_integral(
integration_result, time_interval_durations, min_to_max_ratio
integration_result, time_interval_durations, min_max_ratio
)
assert math.isclose(sum(profile), integration_result, abs_tol=0.01)
......@@ -372,9 +362,349 @@ class TestDataUtils:
assert new_key not in key_list
# **************************************************************************
# **************************************************************************
# *************************************************************************
# *************************************************************************
def test_state_correlated_profile(self):
# correlation: direct, inverse
# states: positive, negative
# time intervals: regular irregular
#
# profile with positive correlation, positive states, regular intervals
number_time_intervals = 10
states = [i+1 for i in range(number_time_intervals)]
integration_result = 100
time_interval_durations = [10 for i in range(number_time_intervals)]
states_correlate_profile = True
min_max_ratio = 0.2
profile, a, b = utils.generate_state_correlated_profile(
integration_result=integration_result,
states=states,
time_interval_durations=time_interval_durations,
states_correlate_profile=states_correlate_profile,
min_max_ratio=min_max_ratio,
solver='glpk'
)
# test profile
assert a > 0 and b > 0
assert len(profile) == number_time_intervals
assert math.isclose(sum(profile), integration_result, abs_tol=1e-3)
assert math.isclose(min(profile), max(profile)*min_max_ratio, abs_tol=1e-3)
assert max(profile) == profile[number_time_intervals-1]
# profile with inverse correlation, positive states, regular intervals
number_time_intervals = 10
states = [i+1 for i in range(number_time_intervals)]
integration_result = 100
time_interval_durations = [10 for i in range(number_time_intervals)]
states_correlate_profile = False
min_max_ratio = 0.2
profile, a, b = utils.generate_state_correlated_profile(
integration_result=integration_result,
states=states,
time_interval_durations=time_interval_durations,
states_correlate_profile=states_correlate_profile,
min_max_ratio=min_max_ratio,
solver='glpk'
)
# test profile
assert a < 0 and b > 0
assert len(profile) == number_time_intervals
assert math.isclose(sum(profile), integration_result, abs_tol=1e-3)
assert math.isclose(min(profile), max(profile)*min_max_ratio, abs_tol=1e-3)
assert min(profile) == profile[number_time_intervals-1]
# *************************************************************************
# *************************************************************************
def test_trigger_state_correlated_profile_error(self):
# trigger an error
number_time_intervals = 10
states = [i+1 for i in range(number_time_intervals)]
integration_result = 100
time_interval_durations = [10 for i in range(number_time_intervals+1)]
states_correlate_profile = True
min_max_ratio = 0.2
error_raised = False
try:
utils.generate_state_correlated_profile(
integration_result=integration_result,
states=states,
time_interval_durations=time_interval_durations,
states_correlate_profile=states_correlate_profile,
min_max_ratio=min_max_ratio,
solver='glpk'
)
except ValueError:
error_raised = True
assert error_raised
# *************************************************************************
# *************************************************************************
def test_manual_state_correlated_profile(self):
# correlation: direct, inverse
# states: positive, negative
# time intervals: regular irregular
# profile with positive correlation, positive states, regular intervals
number_time_intervals = 10
states = [i+1 for i in range(number_time_intervals)]
integration_result = 100
time_interval_durations = [10 for i in range(number_time_intervals)]
deviation_gain = 1
profile = utils.generate_manual_state_correlated_profile(
integration_result=integration_result,
states=states,
time_interval_durations=time_interval_durations,
deviation_gain=deviation_gain
)
# test profile
assert len(profile) == number_time_intervals
assert math.isclose(sum(profile), integration_result, abs_tol=1e-3)
assert max(profile) == profile[number_time_intervals-1]
# profile with inverse correlation, positive states, regular intervals
number_time_intervals = 10
states = [i+1 for i in range(number_time_intervals)]
integration_result = 100
time_interval_durations = [10 for i in range(number_time_intervals)]
deviation_gain = -1
profile = utils.generate_manual_state_correlated_profile(
integration_result=integration_result,
states=states,
time_interval_durations=time_interval_durations,
deviation_gain=deviation_gain
)
# test profile
assert len(profile) == number_time_intervals
assert math.isclose(sum(profile), integration_result, abs_tol=1e-3)
assert min(profile) == profile[number_time_intervals-1]
# *************************************************************************
# *************************************************************************
def test_trigger_manual_state_correlated_profile_error(self):
# trigger an error
number_time_intervals = 10
states = [i+1 for i in range(number_time_intervals)]
integration_result = 100
time_interval_durations = [10 for i in range(number_time_intervals+1)]
deviation_gain = -1
error_raised = False
try:
utils.generate_manual_state_correlated_profile(
integration_result=integration_result,
states=states,
time_interval_durations=time_interval_durations,
deviation_gain=deviation_gain
)
except ValueError:
error_raised = True
assert error_raised
# *************************************************************************
# *************************************************************************
def test_create_profile_sinusoidal(self):
number_intervals = 10
integration_result = 100
min_max_ratio = 0.25
# sinusoidal profile
profile = utils.generate_profile(
integration_result=integration_result,
time_interval_durations=[1 for i in range(number_intervals)],
min_max_ratio=min_max_ratio,
)
assert len(profile) == number_intervals
assert math.isclose(sum(profile), integration_result, abs_tol=1e-3)
# sinusoidal profile with phase shift
profile = utils.generate_profile(
integration_result=integration_result,
time_interval_durations=[1 for i in range(number_intervals)],
min_max_ratio=min_max_ratio,
phase_shift_radians=math.pi/2
)
assert len(profile) == number_intervals
assert math.isclose(sum(profile), integration_result, abs_tol=1e-3)
# use incorrect parameter
error_raised = False
try:
profile = utils.generate_profile(
integration_result=integration_result,
time_interval_durations=[1 for i in range(number_intervals)],
min_max_ratio=min_max_ratio,
deviation_gain=-1,
)
except TypeError:
error_raised = True
assert error_raised
# *************************************************************************
# *************************************************************************
def test_create_profile_predefined_gain(self):
number_intervals = 10
integration_result = 100
deviation_gain = 5
states = [number_intervals-i*0.5 for i in range(number_intervals)]
# predefined gain
profile = utils.generate_profile(
integration_result=integration_result,
time_interval_durations=[1 for i in range(number_intervals)],
states=states,
deviation_gain=deviation_gain
)
assert len(profile) == number_intervals
assert math.isclose(sum(profile), integration_result, abs_tol=1e-3)
# predefined gain, opposite sign
profile = utils.generate_profile(
integration_result=integration_result,
time_interval_durations=[1 for i in range(number_intervals)],
states=states,
deviation_gain=-deviation_gain
)
assert len(profile) == number_intervals
assert math.isclose(sum(profile), integration_result, abs_tol=1e-3)
# use incorrect parameter
error_raised = False
try:
profile = utils.generate_profile(
integration_result=integration_result,
time_interval_durations=[1 for i in range(number_intervals)],
states=states,
deviation_gain=-deviation_gain,
phase_shift_radians=math.pi
)
except TypeError:
error_raised = True
assert error_raised
# *************************************************************************
# *************************************************************************
def test_create_profile_via_sorting_sinusoid(self):
number_intervals = 10
integration_result = 100
states_correlate_profile = True
min_max_ratio = 0.25
states = [number_intervals-i*0.5 for i in range(number_intervals)]
# sorting and sinusoidal function
profile = utils.generate_profile(
integration_result=integration_result,
time_interval_durations=[1 for i in range(number_intervals)],
min_max_ratio=min_max_ratio,
states=states,
states_correlate_profile=states_correlate_profile,
)
assert len(profile) == number_intervals
assert math.isclose(sum(profile), integration_result, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
def test_create_profile_via_optimisation(self):
number_intervals = 10
integration_result = 100
states_correlate_profile = True
min_max_ratio = 0.25
solver = 'glpk'
states = [number_intervals-i*0.5 for i in range(number_intervals)]
# optimisation
# states_correlate_profile is necessary
# min_max_ratio is necessary
# solver is necessary
# states matter but the gain must be determined
profile = utils.generate_profile(
integration_result=integration_result,
time_interval_durations=[1 for i in range(number_intervals)],
min_max_ratio=min_max_ratio,
states=states,
states_correlate_profile=states_correlate_profile,
solver=solver
)
assert len(profile) == number_intervals
assert math.isclose(sum(profile), integration_result, abs_tol=1e-3)
assert math.isclose(min(profile),max(profile)*min_max_ratio, abs_tol=1e-3)
# optimisation but with states that do no warrant it
states = [5 for i in range(number_intervals)]
profile = utils.generate_profile(
integration_result=integration_result,
time_interval_durations=[1 for i in range(number_intervals)],
min_max_ratio=min_max_ratio,
states=states,
states_correlate_profile=states_correlate_profile,
solver=solver
)
assert len(profile) == number_intervals
assert math.isclose(sum(profile), integration_result, abs_tol=1e-3)
# the min to max ratio cannot be observed if the states do not change
assert math.isclose(min(profile), max(profile), abs_tol=1e-3)
# use incorrect parameter
error_raised = False
try:
profile = utils.generate_profile(
integration_result=integration_result,
time_interval_durations=[1 for i in range(number_intervals)],
min_max_ratio=min_max_ratio,
states=states,
states_correlate_profile=states_correlate_profile,
solver=solver,
phase_shift_radians=math.pi
)
except TypeError:
error_raised = True
assert error_raised
# *************************************************************************
# *************************************************************************
# ******************************************************************************
# ******************************************************************************
# *****************************************************************************
# *****************************************************************************
\ No newline at end of file
......@@ -23,16 +23,12 @@ from src.topupopt.data.misc.utils import generate_pseudo_unique_key
# *****************************************************************************
# *****************************************************************************
# TODO: add test for directed arcs between import and export nodes with static losses
class TestNetwork:
# *************************************************************************
# *************************************************************************
def test_arc_technologies_static_losses(self):
# *********************************************************************
# *********************************************************************
number_time_intervals = 3
number_scenarios = 2
......@@ -2001,6 +1997,15 @@ class TestNetwork:
error_triggered = True
assert error_triggered
# undirected arcs cannot involve import nor export nodes
error_triggered = False
try:
net.add_undirected_arc(node_key_a="I", node_key_b="E", arcs=lossy_arcs)
except ValueError:
error_triggered = True
assert error_triggered
# *********************************************************************
# trigger errors using non-identified nodes
......@@ -2165,12 +2170,10 @@ class TestNetwork:
# *************************************************************************
def test_tree_topology(self):
# create a network object with a tree topology
tree_network = binomial_tree(3, create_using=MultiDiGraph)
network = Network(tree_network)
network = Network(incoming_graph_data=tree_network)
for edge_key in network.edges(keys=True):
arc = ArcsWithoutLosses(
name=str(edge_key),
......@@ -2179,44 +2182,36 @@ class TestNetwork:
specific_capacity_cost=0,
capacity_is_instantaneous=False,
)
network.add_edge(*edge_key, **{Network.KEY_ARC_TECH: arc})
# assert that it does not have a tree topology
assert not network.has_tree_topology()
# select all the nodes
for edge_key in network.edges(keys=True):
network.edges[edge_key][Network.KEY_ARC_TECH].options_selected[0] = True
# assert that it has a tree topology
assert network.has_tree_topology()
# *************************************************************************
# *************************************************************************
def test_pseudo_unique_key_generation(self):
# create network
network = Network()
# add node A
network.add_waypoint_node(node_key="A")
# add node B
network.add_waypoint_node(node_key="B")
# identify nodes
network.identify_node_types()
# add arcs
key_list = [
"3e225573-4e78-48c8-bb08-efbeeb795c22",
"f6d30428-15d1-41e9-a952-0742eaaa5a31",
......@@ -2325,6 +2320,47 @@ class TestNetwork:
except ValueError:
error_raised = True
assert error_raised
# *************************************************************************
# *************************************************************************
def test_antiparallel_arcs(self):
# create network
net = Network()
# add nodes
node_a = 'A'
net.add_waypoint_node(node_a)
node_b = 'B'
net.add_waypoint_node(node_b)
node_c = 'C'
net.add_waypoint_node(node_c)
# add arcs
node_pairs = ((node_a, node_b), (node_b, node_a),)
# test network
for node_pair in node_pairs:
net.add_preexisting_directed_arc(
*node_pair,
efficiency=None,
static_loss=None,
capacity=1,
capacity_is_instantaneous=False
)
# identify the node types
net.identify_node_types()
# assert that it can detected the selected antiparallel arcs
assert net.has_selected_antiparallel_arcs()
# check that it finds the right node pairs
identified_node_pairs = net.find_selected_antiparallel_arcs()
assert (node_a, node_b) in identified_node_pairs
assert (node_b, node_a) in identified_node_pairs
# *************************************************************************
# *************************************************************************
# *****************************************************************************
# *****************************************************************************
# imports
# standard
import math
# local
# import numpy as np
# import networkx as nx
import pyomo.environ as pyo
# import src.topupopt.problems.esipp.utils as utils
from src.topupopt.data.misc.utils import generate_pseudo_unique_key
from src.topupopt.problems.esipp.problem import InfrastructurePlanningProblem
from src.topupopt.problems.esipp.network import Arcs, Network
from src.topupopt.problems.esipp.resource import ResourcePrice
# from src.topupopt.problems.esipp.utils import compute_cost_volume_metrics
from src.topupopt.problems.esipp.utils import statistics
from src.topupopt.problems.esipp.time import EconomicTimeFrame
# from src.topupopt.problems.esipp.converter import Converter
# *****************************************************************************
# *****************************************************************************
class TestESIPPProblem:
solver = 'glpk'
# solver = 'scip'
# solver = 'cbc'
def build_solve_ipp(
self,
solver: str = None,
solver_options: dict = None,
use_sos_arcs: bool = False,
arc_sos_weight_key: str = (InfrastructurePlanningProblem.SOS1_ARC_WEIGHTS_NONE),
arc_use_real_variables_if_possible: bool = False,
use_sos_sense: bool = False,
sense_sos_weight_key: int = (
InfrastructurePlanningProblem.SOS1_SENSE_WEIGHT_NOMINAL_HIGHER
),
sense_use_real_variables_if_possible: bool = False,
sense_use_arc_interfaces: bool = False,
perform_analysis: bool = False,
plot_results: bool = False,
print_solver_output: bool = False,
time_frame: EconomicTimeFrame = None,
networks: dict = None,
converters: dict = None,
static_losses_mode=None,
mandatory_arcs: list = None,
max_number_parallel_arcs: dict = None,
arc_groups_dict: dict = None,
init_aux_sets: bool = False,
# discount_rates: dict = None,
assessment_weights: dict = None,
simplify_problem: bool = False,
):
if type(solver) == type(None):
solver = self.solver
if type(assessment_weights) != dict:
assessment_weights = {} # default
if type(converters) != dict:
converters = {}
# time weights
# relative weight of time period
# one interval twice as long as the average is worth twice
# one interval half as long as the average is worth half
# time_weights = [
# [time_period_duration/average_time_interval_duration
# for time_period_duration in intraperiod_time_interval_duration]
# for p in range(number_periods)]
time_weights = None # nothing yet
normalised_time_interval_duration = None # nothing yet
# create problem object
ipp = InfrastructurePlanningProblem(
# discount_rates=discount_rates,
time_frame=time_frame,
# reporting_periods=time_frame.reporting_periods,
# time_intervals=time_frame.time_interval_durations,
time_weights=time_weights,
normalised_time_interval_duration=normalised_time_interval_duration,
assessment_weights=assessment_weights,
)
# add networks and systems
for netkey, net in networks.items():
ipp.add_network(network_key=netkey, network=net)
# add converters
for cvtkey, cvt in converters.items():
ipp.add_converter(converter_key=cvtkey, converter=cvt)
# define arcs as mandatory
if type(mandatory_arcs) == list:
for full_arc_key in mandatory_arcs:
ipp.make_arc_mandatory(full_arc_key[0], full_arc_key[1:])
# if make_all_arcs_mandatory:
# for network_key in ipp.networks:
# for arc_key in ipp.networks[network_key].edges(keys=True):
# # preexisting arcs are no good
# if ipp.networks[network_key].edges[arc_key][
# Network.KEY_ARC_TECH].has_been_selected():
# continue
# ipp.make_arc_mandatory(network_key, arc_key)
# set up the use of sos for arc selection
if use_sos_arcs:
for network_key in ipp.networks:
for arc_key in ipp.networks[network_key].edges(keys=True):
if (
ipp.networks[network_key]
.edges[arc_key][Network.KEY_ARC_TECH]
.has_been_selected()
):
continue
ipp.use_sos1_for_arc_selection(
network_key,
arc_key,
use_real_variables_if_possible=(
arc_use_real_variables_if_possible
),
sos1_weight_method=arc_sos_weight_key,
)
# set up the use of sos for flow sense determination
if use_sos_sense:
for network_key in ipp.networks:
for arc_key in ipp.networks[network_key].edges(keys=True):
if not ipp.networks[network_key].edges[arc_key][
Network.KEY_ARC_UND
]:
continue
ipp.use_sos1_for_flow_senses(
network_key,
arc_key,
use_real_variables_if_possible=(
sense_use_real_variables_if_possible
),
use_interface_variables=sense_use_arc_interfaces,
sos1_weight_method=sense_sos_weight_key,
)
elif sense_use_arc_interfaces: # set up the use of arc interfaces w/o sos1
for network_key in ipp.networks:
for arc_key in ipp.networks[network_key].edges(keys=True):
if (
ipp.networks[network_key]
.edges[arc_key][Network.KEY_ARC_TECH]
.has_been_selected()
):
continue
ipp.use_interface_variables_for_arc_selection(network_key, arc_key)
# static losses
if static_losses_mode == ipp.STATIC_LOSS_MODE_ARR:
ipp.place_static_losses_arrival_node()
elif static_losses_mode == ipp.STATIC_LOSS_MODE_DEP:
ipp.place_static_losses_departure_node()
elif static_losses_mode == ipp.STATIC_LOSS_MODE_US:
ipp.place_static_losses_upstream()
elif static_losses_mode == ipp.STATIC_LOSS_MODE_DS:
ipp.place_static_losses_downstream()
else:
raise ValueError("Unknown static loss modelling mode.")
# *********************************************************************
# groups
if type(arc_groups_dict) != type(None):
for key in arc_groups_dict:
ipp.create_arc_group(arc_groups_dict[key])
# *********************************************************************
# maximum number of parallel arcs
for key in max_number_parallel_arcs:
ipp.set_maximum_number_parallel_arcs(
network_key=key[0],
node_a=key[1],
node_b=key[2],
limit=max_number_parallel_arcs[key],
)
# *********************************************************************
if simplify_problem:
ipp.simplify_peak_total_assessments()
# *********************************************************************
# instantiate (disable the default case v-a-v fixed losses)
# ipp.instantiate(place_fixed_losses_upstream_if_possible=False)
ipp.instantiate(initialise_ancillary_sets=init_aux_sets)
# ipp.instance.pprint()
# optimise
ipp.optimise(
solver_name=solver,
solver_options=solver_options,
output_options={},
print_solver_output=print_solver_output,
)
# ipp.instance.pprint()
# return the problem object
return ipp
# *********************************************************************
# *********************************************************************
# *************************************************************************
# *************************************************************************
def test_problem_increasing_imp_prices(self):
# assessment
q = 0
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,)},
time_interval_durations={q: (1,)},
)
# 2 nodes: one import, one regular
mynet = Network()
# import node
node_IMP = 'I'
mynet.add_import_node(
node_key=node_IMP,
prices={
qpk: ResourcePrice(prices=[1.0, 2.0], volumes=[0.5, None])
for qpk in tf.qpk()
},
)
# other nodes
node_A = 'A'
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0})
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False
)
assert not ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 10
assert ipp.results["Problem"][0]["Number of variables"] == 11
assert ipp.results["Problem"][0]["Number of nonzeros"] == 20
# *********************************************************************
# *********************************************************************
# validation
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# the flows should be 1.0, 0.0 and 2.0
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]),
2.0,
abs_tol=1e-6,
)
# arc amplitude should be two
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]),
2.0,
abs_tol=0.01,
)
# capex should be four
assert math.isclose(pyo.value(ipp.instance.var_capex), 4.0, abs_tol=1e-3)
# sdncf should be -3.5
assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -3.5, abs_tol=1e-3)
# the objective function should be -7.5
assert math.isclose(pyo.value(ipp.instance.obj_f), -7.5, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
def test_problem_decreasing_imp_prices(self):
# assessment
q = 0
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,)},
time_interval_durations={q: (1,)},
)
# 2 nodes: one import, one regular
mynet = Network()
# import node
node_IMP = 'I'
mynet.add_import_node(
node_key=node_IMP,
prices={
qpk: ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, 3.0])
for qpk in tf.qpk()
},
)
# other nodes
node_A = 'A'
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0})
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False
)
assert not ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 14 # 10 prior to nonconvex block
assert ipp.results["Problem"][0]["Number of variables"] == 13 # 11 prior to nonconvex block
assert ipp.results["Problem"][0]["Number of nonzeros"] == 28 # 20 prior to nonconvex block
# *********************************************************************
# *********************************************************************
# validation
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# the flows should be 1.0, 0.0 and 2.0
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]),
2.0,
abs_tol=1e-6,
)
# arc amplitude should be two
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]),
2.0,
abs_tol=0.01,
)
# capex should be four
assert math.isclose(pyo.value(ipp.instance.var_capex), 4.0, abs_tol=1e-3)
# sdncf should be -2.5
assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -2.5, abs_tol=1e-3)
# the objective function should be -7.5
assert math.isclose(pyo.value(ipp.instance.obj_f), -6.5, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
def test_problem_decreasing_imp_prices_infinite_capacity(self):
# assessment
q = 0
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,)},
time_interval_durations={q: (1,)},
)
# 2 nodes: one import, one regular
mynet = Network()
# import node
node_IMP = 'I'
mynet.add_import_node(
node_key=node_IMP,
prices={
qpk: ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, None])
for qpk in tf.qpk()
},
)
# other nodes
node_A = 'A'
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0})
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA)
# identify node types
mynet.identify_node_types()
# trigger the error
error_raised = False
try:
# no sos, regular time intervals
self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False,
)
except Exception:
error_raised = True
assert error_raised
# *************************************************************************
# *************************************************************************
def test_problem_decreasing_exp_prices(self):
# assessment
q = 0
# time
number_intervals = 1
# periods
number_periods = 1
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,)},
time_interval_durations={q: (1,)},
)
# 2 nodes: one export, one regular
mynet = Network()
# import node
node_EXP = generate_pseudo_unique_key(mynet.nodes())
mynet.add_export_node(
node_key=node_EXP,
prices={
(q, p, k): ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, None])
for p in range(number_periods)
for k in range(number_intervals)
},
)
# other nodes
node_A = 'A'
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): -1.0})
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_IA)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False,
)
assert not ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 10
assert ipp.results["Problem"][0]["Number of variables"] == 11
assert ipp.results["Problem"][0]["Number of nonzeros"] == 20
# *********************************************************************
# *********************************************************************
# validation
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_A, node_EXP, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# the flows should be 1.0, 0.0 and 2.0
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 0)]),
1.0,
abs_tol=1e-6,
)
# arc amplitude should be two
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_A, node_EXP, 0)]),
1.0,
abs_tol=0.01,
)
# capex should be four
assert math.isclose(pyo.value(ipp.instance.var_capex), 3.0, abs_tol=1e-3)
# sdncf should be 1.0
assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), 1.0, abs_tol=1e-3)
# the objective function should be -7.5
assert math.isclose(pyo.value(ipp.instance.obj_f), -2.0, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
def test_problem_increasing_exp_prices(self):
# assessment
q = 0
# time
number_intervals = 1
# periods
number_periods = 1
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,)},
time_interval_durations={q: (1,)},
)
# 2 nodes: one export, one regular
mynet = Network()
# import node
node_EXP = generate_pseudo_unique_key(mynet.nodes())
mynet.add_export_node(
node_key=node_EXP,
prices={
(q, p, k): ResourcePrice(prices=[1.0, 2.0], volumes=[0.25, 3.0])
for p in range(number_periods)
for k in range(number_intervals)
},
)
# other nodes
node_A = 'A'
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): -1.0})
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_IA)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False,
)
assert not ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 14 # 10 before nonconvex block
assert ipp.results["Problem"][0]["Number of variables"] == 13 # 11 before nonconvex block
assert ipp.results["Problem"][0]["Number of nonzeros"] == 28 # 20 before nonconvex block
# *********************************************************************
# *********************************************************************
# validation
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_A, node_EXP, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# the flows should be 1.0, 0.0 and 2.0
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 0)]),
1.0,
abs_tol=1e-6,
)
# arc amplitude should be two
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_A, node_EXP, 0)]),
1.0,
abs_tol=0.01,
)
# capex should be four
assert math.isclose(pyo.value(ipp.instance.var_capex), 3.0, abs_tol=1e-3)
# sdncf should be 0.75
assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), 0.75, abs_tol=1e-3)
# the objective function should be -2.25
assert math.isclose(pyo.value(ipp.instance.obj_f), -2.25, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
def test_problem_increasing_exp_prices_infinite_capacity(self):
# assessment
q = 0
# time
number_intervals = 1
# periods
number_periods = 1
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,)},
time_interval_durations={q: (1,)},
)
# 2 nodes: one export, one regular
mynet = Network()
# import node
node_EXP = generate_pseudo_unique_key(mynet.nodes())
mynet.add_export_node(
node_key=node_EXP,
prices={
(q, p, k): ResourcePrice(prices=[1.0, 2.0], volumes=[0.25, None])
for p in range(number_periods)
for k in range(number_intervals)
},
)
# other nodes
node_A = 'A'
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): -1.0})
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_IA)
# identify node types
mynet.identify_node_types()
# trigger the error
error_raised = False
try:
# no sos, regular time intervals
self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False,
)
except Exception:
error_raised = True
assert error_raised
# *************************************************************************
# *************************************************************************
def test_problem_increasing_imp_decreasing_exp_prices(self):
# scenario
q = 0
# time
number_intervals = 2
# periods
number_periods = 1
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,1)},
time_interval_durations={q: (1,1)},
)
# 3 nodes: one import, one export, one regular
mynet = Network()
# import node
node_IMP = 'I'
mynet.add_import_node(
node_key=node_IMP,
prices={
(q, p, k): ResourcePrice(prices=[1.0, 2.0], volumes=[0.5, None])
for p in range(number_periods)
for k in range(number_intervals)
},
)
# export node
node_EXP = generate_pseudo_unique_key(mynet.nodes())
mynet.add_export_node(
node_key=node_EXP,
prices={
(q, p, k): ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, None])
for p in range(number_periods)
for k in range(number_intervals)
},
)
# other nodes
node_A = 'A'
mynet.add_source_sink_node(
node_key=node_A, base_flow={(q, 0): 1.0, (q, 1): -1.0}
)
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5, (q, 1): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA)
# arc AE
arc_tech_AE = Arcs(
name="any",
efficiency={(q, 0): 0.5, (q, 1): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_AE)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False,
# discount_rates={0: (0.0,)},
)
assert not ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 23
assert ipp.results["Problem"][0]["Number of variables"] == 26
assert ipp.results["Problem"][0]["Number of nonzeros"] == 57
# *********************************************************************
# *********************************************************************
# validation
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_A, node_EXP, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# interval 0: import only
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]),
2.0,
abs_tol=1e-6,
)
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 0)]),
0.0,
abs_tol=1e-6,
)
# interval 1: export only
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 1)]),
0.0,
abs_tol=1e-6,
)
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 1)]),
1.0,
abs_tol=1e-6,
)
# IA amplitude
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]),
2.0,
abs_tol=0.01,
)
# AE amplitude
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_A, node_EXP, 0)]),
1.0,
abs_tol=0.01,
)
# capex should be 7.0: 4+3
assert math.isclose(pyo.value(ipp.instance.var_capex), 7.0, abs_tol=1e-3)
# sdncf should be -2.5: -3.5+1.0
assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -2.5, abs_tol=1e-3)
# the objective function should be -9.5: -7.5-2.5
assert math.isclose(pyo.value(ipp.instance.obj_f), -9.5, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
def test_direct_imp_exp_network_higher_exp_prices(self):
# time frame
q = 0
tf = EconomicTimeFrame(
discount_rate=3.5/100,
reporting_periods={q: (0,1)},
reporting_period_durations={q: (365 * 24 * 3600,365 * 24 * 3600)},
time_intervals={q: (0,1)},
time_interval_durations={q: (1,1)},
)
# 4 nodes: one import, one export, two supply/demand nodes
mynet = Network()
# import node
imp_node_key = 'thatimpnode'
imp_prices = {
qpk: ResourcePrice(
prices=0.5,
volumes=None,
)
for qpk in tf.qpk()
}
mynet.add_import_node(
node_key=imp_node_key,
prices=imp_prices
)
# export node
exp_node_key = 'thatexpnode'
exp_prices = {
qpk: ResourcePrice(
prices=1.5,
volumes=None,
)
for qpk in tf.qpk()
}
mynet.add_export_node(
node_key=exp_node_key,
prices=exp_prices,
)
# add arc without fixed losses from import node to export
arc_tech_IE = Arcs(
name="IE",
# efficiency=[1, 1, 1, 1],
efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1},
efficiency_reverse=None,
static_loss=None,
validate=False,
capacity=[0.5, 1.0, 2.0],
minimum_cost=[5, 5.1, 5.2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
)
mynet.add_directed_arc(
node_key_a=imp_node_key, node_key_b=exp_node_key, arcs=arc_tech_IE
)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
networks={"mynet": mynet},
time_frame=tf,
static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP,
mandatory_arcs=[],
max_number_parallel_arcs={}
)
# export prices are higher: it makes sense to install the arc since the
# revenue (@ max. cap.) exceeds the cost of installing the arc
assert (
True
in ipp.networks["mynet"]
.edges[(imp_node_key, exp_node_key, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# overview
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
# there should be no imports
abs_tol = 1e-6
abs_tol = 1e-3
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert imports_qp > 0.0 - abs_tol
abs_tol = 1e-3
import_costs_qp = sum(import_costs_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert import_costs_qp > 0.0 - abs_tol
# there should be no exports
abs_tol = 1e-2
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
export_revenue_qp = sum(export_revenue_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert exports_qp > 0.0 - abs_tol
assert export_revenue_qp > 0.0 - abs_tol
# the revenue should exceed the costs
abs_tol = 1e-2
assert (
export_revenue_qp > import_costs_qp - abs_tol
)
# the capex should be positive
abs_tol = 1e-6
assert pyo.value(ipp.instance.var_capex) > 0 - abs_tol
# *************************************************************************
# *************************************************************************
# *****************************************************************************
# *****************************************************************************
\ No newline at end of file
......@@ -14,7 +14,8 @@ from src.topupopt.problems.esipp.problem import InfrastructurePlanningProblem
from src.topupopt.problems.esipp.network import Arcs, Network
from src.topupopt.problems.esipp.network import ArcsWithoutStaticLosses
from src.topupopt.problems.esipp.resource import ResourcePrice
from src.topupopt.problems.esipp.utils import compute_cost_volume_metrics
# from src.topupopt.problems.esipp.utils import compute_cost_volume_metrics
from src.topupopt.problems.esipp.utils import statistics
from src.topupopt.problems.esipp.time import EconomicTimeFrame
# from src.topupopt.problems.esipp.converter import Converter
......@@ -29,7 +30,7 @@ class TestESIPPProblem:
def build_solve_ipp(
self,
# solver: str = "glpk",
solver: str = None,
solver_options: dict = None,
use_sos_arcs: bool = False,
arc_sos_weight_key: str = (InfrastructurePlanningProblem.SOS1_ARC_WEIGHTS_NONE),
......@@ -55,6 +56,8 @@ class TestESIPPProblem:
assessment_weights: dict = None,
simplify_problem: bool = False,
):
if type(solver) == type(None):
solver = self.solver
if type(assessment_weights) != dict:
assessment_weights = {} # default
......@@ -226,7 +229,7 @@ class TestESIPPProblem:
# optimise
ipp.optimise(
solver_name=self.solver,
solver_name=solver,
solver_options=solver_options,
output_options={},
print_solver_output=print_solver_output,
......@@ -458,7 +461,7 @@ class TestESIPPProblem:
# *********************************************************************
# validation
# TODO: make a dict with the results and a for loop to reduce extent
# the arc should be installed since it is required for feasibility
assert (
True
......@@ -467,39 +470,18 @@ class TestESIPPProblem:
.options_selected
)
# the flows should be 1.0, 0.0 and 2.0
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]),
1.0,
abs_tol=1e-6,
)
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 1)]),
0.0,
abs_tol=1e-6,
)
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 2)]),
2.0,
abs_tol=1e-6,
)
# flows
true_v_glljqk = {
("mynet", node_IMP, node_A, 0, q, 0): 1,
("mynet", node_IMP, node_A, 0, q, 1): 0,
("mynet", node_IMP, node_A, 0, q, 2): 2,
("mynet", node_A, node_EXP, 0, q, 0): 0,
("mynet", node_A, node_EXP, 0, q, 1): 1.5,
("mynet", node_A, node_EXP, 0, q, 2): 0
}
# the flows should be 1.0, 0.0 and 2.0
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 0)]),
0.0,
abs_tol=1e-6,
)
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 1)]),
1.5,
abs_tol=1e-6,
)
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 2)]),
0.0,
abs_tol=1e-6,
)
for key, v in true_v_glljqk.items():
assert math.isclose(pyo.value(ipp.instance.var_v_glljqk[key]), v, abs_tol=1e-6)
# arc amplitude should be two
assert math.isclose(
......@@ -586,351 +568,14 @@ class TestESIPPProblem:
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=True,
)
assert ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 16 # 20
assert ipp.results["Problem"][0]["Number of variables"] == 15 # 19
assert ipp.results["Problem"][0]["Number of nonzeros"] == 28 # 36
# *********************************************************************
# *********************************************************************
# validation
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# capex should be four
assert math.isclose(pyo.value(ipp.instance.var_capex), 4.0, abs_tol=1e-3)
# the objective function should be -9.7
assert math.isclose(pyo.value(ipp.instance.obj_f), -9.7, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
def test_problem_increasing_imp_prices(self):
# assessment
q = 0
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,)},
time_interval_durations={q: (1,)},
)
# 2 nodes: one import, one regular
mynet = Network()
# import node
node_IMP = generate_pseudo_unique_key(mynet.nodes())
mynet.add_import_node(
node_key=node_IMP,
prices={
# (q, p, k): ResourcePrice(prices=[1.0, 2.0], volumes=[0.5, None])
# for p in range(number_periods)
# for k in range(number_intervals)
qpk: ResourcePrice(prices=[1.0, 2.0], volumes=[0.5, None])
for qpk in tf.qpk()
},
)
# other nodes
node_A = generate_pseudo_unique_key(mynet.nodes())
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0})
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False
)
assert not ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 10
assert ipp.results["Problem"][0]["Number of variables"] == 11
assert ipp.results["Problem"][0]["Number of nonzeros"] == 20
# *********************************************************************
# *********************************************************************
# validation
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# the flows should be 1.0, 0.0 and 2.0
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]),
2.0,
abs_tol=1e-6,
)
# arc amplitude should be two
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]),
2.0,
abs_tol=0.01,
)
# capex should be four
assert math.isclose(pyo.value(ipp.instance.var_capex), 4.0, abs_tol=1e-3)
# sdncf should be -3.5
assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -3.5, abs_tol=1e-3)
# the objective function should be -7.5
assert math.isclose(pyo.value(ipp.instance.obj_f), -7.5, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
def test_problem_decreasing_exp_prices(self):
# assessment
q = 0
# time
number_intervals = 1
# periods
number_periods = 1
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,)},
time_interval_durations={q: (1,)},
)
# 2 nodes: one export, one regular
mynet = Network()
# import node
node_EXP = generate_pseudo_unique_key(mynet.nodes())
mynet.add_export_node(
node_key=node_EXP,
prices={
(q, p, k): ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, None])
for p in range(number_periods)
for k in range(number_intervals)
},
)
# other nodes
node_A = generate_pseudo_unique_key(mynet.nodes())
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): -1.0})
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_IA)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False,
)
assert not ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 10
assert ipp.results["Problem"][0]["Number of variables"] == 11
assert ipp.results["Problem"][0]["Number of nonzeros"] == 20
# *********************************************************************
# *********************************************************************
# validation
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_A, node_EXP, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# the flows should be 1.0, 0.0 and 2.0
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 0)]),
1.0,
abs_tol=1e-6,
)
# arc amplitude should be two
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_A, node_EXP, 0)]),
1.0,
abs_tol=0.01,
)
# capex should be four
assert math.isclose(pyo.value(ipp.instance.var_capex), 3.0, abs_tol=1e-3)
# sdncf should be 1.0
assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), 1.0, abs_tol=1e-3)
# the objective function should be -7.5
assert math.isclose(pyo.value(ipp.instance.obj_f), -2.0, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
def test_problem_increasing_imp_decreasing_exp_prices(self):
# scenario
q = 0
# time
number_intervals = 2
# periods
number_periods = 1
tf = EconomicTimeFrame(
discount_rate=0.0,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,1)},
time_interval_durations={q: (1,1)},
)
# 3 nodes: one import, one export, one regular
mynet = Network()
# import node
node_IMP = generate_pseudo_unique_key(mynet.nodes())
mynet.add_import_node(
node_key=node_IMP,
prices={
(q, p, k): ResourcePrice(prices=[1.0, 2.0], volumes=[0.5, None])
for p in range(number_periods)
for k in range(number_intervals)
},
)
# export node
node_EXP = generate_pseudo_unique_key(mynet.nodes())
mynet.add_export_node(
node_key=node_EXP,
prices={
(q, p, k): ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, None])
for p in range(number_periods)
for k in range(number_intervals)
},
)
# other nodes
node_A = generate_pseudo_unique_key(mynet.nodes())
mynet.add_source_sink_node(
node_key=node_A, base_flow={(q, 0): 1.0, (q, 1): -1.0}
)
# arc IA
arc_tech_IA = Arcs(
name="any",
efficiency={(q, 0): 0.5, (q, 1): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA)
# arc AE
arc_tech_AE = Arcs(
name="any",
efficiency={(q, 0): 0.5, (q, 1): 0.5},
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_AE)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=False,
# discount_rates={0: (0.0,)},
simplify_problem=True,
)
assert not ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 23
assert ipp.results["Problem"][0]["Number of variables"] == 26
assert ipp.results["Problem"][0]["Number of nonzeros"] == 57
assert ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 16 # 20
assert ipp.results["Problem"][0]["Number of variables"] == 15 # 19
assert ipp.results["Problem"][0]["Number of nonzeros"] == 28 # 36
# *********************************************************************
# *********************************************************************
......@@ -943,58 +588,12 @@ class TestESIPPProblem:
.edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# the arc should be installed since it is required for feasibility
assert (
True
in ipp.networks["mynet"]
.edges[(node_A, node_EXP, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# interval 0: import only
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]),
2.0,
abs_tol=1e-6,
)
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 0)]),
0.0,
abs_tol=1e-6,
)
# interval 1: export only
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 1)]),
0.0,
abs_tol=1e-6,
)
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 1)]),
1.0,
abs_tol=1e-6,
)
# IA amplitude
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]),
2.0,
abs_tol=0.01,
)
# AE amplitude
assert math.isclose(
pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_A, node_EXP, 0)]),
1.0,
abs_tol=0.01,
)
# capex should be 7.0: 4+3
assert math.isclose(pyo.value(ipp.instance.var_capex), 7.0, abs_tol=1e-3)
# sdncf should be -2.5: -3.5+1.0
assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -2.5, abs_tol=1e-3)
# capex should be four
assert math.isclose(pyo.value(ipp.instance.var_capex), 4.0, abs_tol=1e-3)
# the objective function should be -9.5: -7.5-2.5
assert math.isclose(pyo.value(ipp.instance.obj_f), -9.5, abs_tol=1e-3)
# the objective function should be -9.7
assert math.isclose(pyo.value(ipp.instance.obj_f), -9.7, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
......@@ -1100,36 +699,16 @@ class TestESIPPProblem:
)
# the flows should be 1.0, 0.0 and 2.0
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, 0, 0)]),
1.0,
abs_tol=1e-6,
)
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, 0, 1)]),
0.0,
abs_tol=1e-6,
)
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, 0, 2)]),
2.0,
abs_tol=1e-6,
)
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, 1, 0)]),
2.5,
abs_tol=1e-6,
)
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, 1, 1)]),
0.6,
abs_tol=1e-6,
)
true_v_glljqk = {
("mynet", node_IMP, node_A, 0, 0, 0): 1,
("mynet", node_IMP, node_A, 0, 0, 1): 0,
("mynet", node_IMP, node_A, 0, 0, 2): 2,
("mynet", node_IMP, node_A, 0, 1, 0): 2.5,
("mynet", node_IMP, node_A, 0, 1, 1): 0.6,
}
for key, v in true_v_glljqk.items():
assert math.isclose(pyo.value(ipp.instance.var_v_glljqk[key]), v, abs_tol=1e-6)
# arc amplitude should be two
......@@ -1819,7 +1398,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -1829,7 +1408,7 @@ class TestESIPPProblem:
)
# export node
exp_node_key = generate_pseudo_unique_key(mynet.nodes())
exp_node_key = 'thatexpnode'
mynet.add_export_node(
node_key=exp_node_key,
prices={
......@@ -1989,7 +1568,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -1999,7 +1578,7 @@ class TestESIPPProblem:
)
# export node
exp_node_key = generate_pseudo_unique_key(mynet.nodes())
exp_node_key = 'thatexpnode'
mynet.add_export_node(
node_key=exp_node_key,
prices={
......@@ -2157,7 +1736,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -2167,7 +1746,7 @@ class TestESIPPProblem:
)
# export node
exp_node_key = generate_pseudo_unique_key(mynet.nodes())
exp_node_key = 'thatexpnode'
mynet.add_export_node(
node_key=exp_node_key,
prices={
......@@ -2297,7 +1876,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -2307,7 +1886,7 @@ class TestESIPPProblem:
)
# export node
exp_node_key = generate_pseudo_unique_key(mynet.nodes())
exp_node_key = 'thatexpnode'
mynet.add_export_node(
node_key=exp_node_key,
prices={
......@@ -2387,7 +1966,7 @@ class TestESIPPProblem:
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
solver_options={},solver='scip',
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
......@@ -2420,11 +1999,8 @@ class TestESIPPProblem:
)
# there should be no opex (imports or exports), only capex from arcs
assert pyo.value(ipp.instance.var_sdncf_q[q]) < 0
assert pyo.value(ipp.instance.var_capex) > 0
assert (
pyo.value(
ipp.instance.var_capex_arc_gllj[
......@@ -2642,14 +2218,16 @@ class TestESIPPProblem:
# *********************************************************************
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
q = 0
capex_ind = 0.75
......@@ -2699,45 +2277,38 @@ class TestESIPPProblem:
.edges[(imp1_node_key, node_A, arc_key_I1A)][Network.KEY_ARC_TECH]
.options_selected.index(True)
)
h2 = (
ipp.networks["mynet"]
.edges[(imp2_node_key, node_A, arc_key_I2A)][Network.KEY_ARC_TECH]
.options_selected.index(True)
)
h3 = (
ipp.networks["mynet"]
.edges[(imp3_node_key, node_A, arc_key_I3A)][Network.KEY_ARC_TECH]
.options_selected.index(True)
)
assert h1 == h2
assert h1 == h3
# the capex have to be higher than those of the best individual arc
abs_tol = 1e-3
assert math.isclose(
pyo.value(ipp.instance.var_capex), capex_group, abs_tol=abs_tol
)
# there should be no exports
assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol)
abs_tol = 1e-3
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0, abs_tol=abs_tol)
# the imports should be higher than with individual arcs
abs_tol = 1e-3
assert math.isclose(flow_in[("mynet", 0, 0)], imp_group, abs_tol=abs_tol)
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(imports_qp, imp_group, abs_tol=abs_tol)
# the operating results should be lower than with an individual arc
abs_tol = 1e-3
assert math.isclose(
pyo.value(ipp.instance.var_sdncf_q[q]), sdncf_group, abs_tol=abs_tol
)
......@@ -2745,13 +2316,11 @@ class TestESIPPProblem:
# the externalities should be zero
abs_tol = 1e-3
assert math.isclose(pyo.value(ipp.instance.var_sdext_q[q]), 0, abs_tol=abs_tol)
# the objective function should be -6.3639758220728595-1.5
abs_tol = 1e-3
assert math.isclose(pyo.value(ipp.instance.obj_f), obj_group, abs_tol=abs_tol)
# the imports should be greater than or equal to the losses for all arx
......@@ -2784,7 +2353,7 @@ class TestESIPPProblem:
assert math.isclose(losses_model, losses_data, abs_tol=abs_tol)
assert flow_in[("mynet", 0, 0)] >= losses_model
assert imports_qp >= losses_model
# *************************************************************************
# *************************************************************************
......@@ -2987,14 +2556,15 @@ class TestESIPPProblem:
# **************************************************************************
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
q = 0
capex_ind = 0.75
......@@ -3040,39 +2610,32 @@ class TestESIPPProblem:
)
# there should be no exports
assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol)
abs_tol = 1e-3
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0, abs_tol=abs_tol)
# the imports should be lower than with a group of arcs
abs_tol = 1e-3
assert math.isclose(flow_in[("mynet", 0, 0)], imp_ind, abs_tol=abs_tol)
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(imports_qp, imp_ind, abs_tol=abs_tol)
# the operating results should be lower than with an individual arc
abs_tol = 1e-3
assert math.isclose(
pyo.value(ipp.instance.var_sdncf_q[q]), sdncf_ind, abs_tol=abs_tol
)
# the externalities should be zero
abs_tol = 1e-3
assert math.isclose(
pyo.value(ipp.instance.var_sdext_q[q]), sdext_ind, abs_tol=abs_tol
)
# the objective function should be -6.3639758220728595-1.5
abs_tol = 1e-3
assert math.isclose(pyo.value(ipp.instance.obj_f), obj_ind, abs_tol=abs_tol)
# the imports should be greater than or equal to the losses for all arx
losses_model = sum(
pyo.value(
ipp.instance.var_w_glljqk[
......@@ -3092,7 +2655,7 @@ class TestESIPPProblem:
for k in range(tf.number_time_intervals(q))
)
assert flow_in[("mynet", 0, 0)] >= losses_model
assert imports_qp >= losses_model
# *************************************************************************
# *************************************************************************
......@@ -3113,7 +2676,7 @@ class TestESIPPProblem:
mynet = Network()
# import nodes
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -3279,14 +2842,15 @@ class TestESIPPProblem:
)
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
capex_ind = 3
capex_group = 4
......@@ -3357,49 +2921,39 @@ class TestESIPPProblem:
assert h1 == h2
# the capex have to be higher than those of the best individual arc
abs_tol = 1e-3
assert math.isclose(
pyo.value(ipp.instance.var_capex), capex_group, abs_tol=abs_tol
)
# there should be no exports
assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol)
abs_tol = 1e-3
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0, abs_tol=abs_tol)
# the imports should be higher than with individual arcs
abs_tol = 1e-3
assert math.isclose(flow_in[("mynet", 0, 0)], imp_group, abs_tol=abs_tol)
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(imports_qp, imp_group, abs_tol=abs_tol)
assert imp_group > imp_ind
# the operating results should be lower than with an individual arc
abs_tol = 1e-3
assert math.isclose(
pyo.value(ipp.instance.var_sdncf_q[q]), sdncf_group, abs_tol=abs_tol
)
# the externalities should be zero
abs_tol = 1e-3
assert math.isclose(
pyo.value(ipp.instance.var_sdext_q[q]), sdnext_group, abs_tol=abs_tol
)
# the objective function should be -6.3639758220728595-1.5
abs_tol = 1e-3
assert math.isclose(pyo.value(ipp.instance.obj_f), obj_group, abs_tol=abs_tol)
# the imports should be greater than or equal to the losses for all arx
losses_model = sum(
pyo.value(
ipp.instance.var_w_glljqk[("mynet", node_A, node_B, arc_key_AB, q, k)]
......@@ -3438,7 +2992,7 @@ class TestESIPPProblem:
mynet = Network()
# import nodes
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -3591,12 +3145,15 @@ class TestESIPPProblem:
)
# overview
(flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
capex_ind = 3
capex_group = 4
......@@ -3653,159 +3210,36 @@ class TestESIPPProblem:
)
# there should be no exports
assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol)
# the imports should be lower than with a group of arcs
abs_tol = 1e-3
assert math.isclose(flow_in[("mynet", 0, 0)], imp_ind, abs_tol=abs_tol)
# the operating results should be lower than with an individual arc
abs_tol = 1e-3
assert math.isclose(
pyo.value(ipp.instance.var_sdncf_q[q]), sdncf_ind, abs_tol=abs_tol
)
# the externalities should be zero
abs_tol = 1e-3
assert math.isclose(pyo.value(ipp.instance.var_sdext_q[q]), 0, abs_tol=abs_tol)
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0, abs_tol=abs_tol)
# the objective function should be -6.3639758220728595-1.5
# the imports should be lower than with a group of arcs
abs_tol = 1e-3
assert math.isclose(pyo.value(ipp.instance.obj_f), obj_ind, abs_tol=abs_tol)
# the imports should be greater than or equal to the losses for all arx
assert math.isclose(losses_model, losses_ind, abs_tol=abs_tol)
# *************************************************************************
# *************************************************************************
# TODO: trigger error with static losses
def test_direct_imp_exp_network(self):
# time frame
q = 0
tf = EconomicTimeFrame(
discount_rate=3.5/100,
reporting_periods={q: (0,1)},
reporting_period_durations={q: (365 * 24 * 3600,365 * 24 * 3600)},
time_intervals={q: (0,1)},
time_interval_durations={q: (1,1)},
)
# 4 nodes: one import, one export, two supply/demand nodes
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_prices = {
qpk: ResourcePrice(
prices=1.5,
volumes=None,
)
for qpk in tf.qpk()
}
mynet.add_import_node(
node_key=imp_node_key,
prices=imp_prices
)
# export node
exp_node_key = generate_pseudo_unique_key(mynet.nodes())
exp_prices = {
qpk: ResourcePrice(
prices=0.5,
volumes=None,
)
for qpk in tf.qpk()
}
mynet.add_export_node(
node_key=exp_node_key,
prices=exp_prices,
)
# add arc without fixed losses from import node to export
arc_tech_IE = Arcs(
name="IE",
# efficiency=[1, 1, 1, 1],
efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1},
efficiency_reverse=None,
static_loss=None,
validate=False,
capacity=[0.5, 1.0, 2.0],
minimum_cost=[5, 5.1, 5.2],
specific_capacity_cost=1,
capacity_is_instantaneous=False,
)
mynet.add_directed_arc(
node_key_a=imp_node_key, node_key_b=exp_node_key, arcs=arc_tech_IE
)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
networks={"mynet": mynet},
time_frame=tf,
static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP,
mandatory_arcs=[],
max_number_parallel_arcs={}
)
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(imports_qp, imp_ind, abs_tol=abs_tol)
# *********************************************************************
# *********************************************************************
# import prices are higher: it makes no sense to install the arc
# the arc should not be installed (unless prices allow for it)
assert (
True
not in ipp.networks["mynet"]
.edges[(imp_node_key, exp_node_key, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
# there should be no imports
abs_tol = 1e-6
assert math.isclose(flow_in[("mynet", 0, 0)], 0.0, abs_tol=abs_tol)
assert math.isclose(flow_in_cost[("mynet", 0, 0)], 0.0, abs_tol=abs_tol)
# there should be no exports
abs_tol = 1e-2
assert math.isclose(flow_out[("mynet", 0, 0)], 0.0, abs_tol=abs_tol)
assert math.isclose(flow_out_revenue[("mynet", 0, 0)], 0.0, abs_tol=abs_tol)
# there should be no capex
abs_tol = 1e-6
assert math.isclose(pyo.value(ipp.instance.var_capex), 0.0, abs_tol=abs_tol)
# the operating results should be lower than with an individual arc
abs_tol = 1e-3
assert math.isclose(
pyo.value(ipp.instance.var_sdncf_q[q]), sdncf_ind, abs_tol=abs_tol
)
# the externalities should be zero
abs_tol = 1e-3
assert math.isclose(pyo.value(ipp.instance.var_sdext_q[q]), 0, abs_tol=abs_tol)
# the objective function should be -6.3639758220728595-1.5
abs_tol = 1e-3
assert math.isclose(pyo.value(ipp.instance.obj_f), obj_ind, abs_tol=abs_tol)
# the imports should be greater than or equal to the losses for all arx
assert math.isclose(losses_model, losses_ind, abs_tol=abs_tol)
# *************************************************************************
# *************************************************************************
def test_direct_imp_exp_network_higher_exp_prices(self):
def test_direct_imp_exp_network(self):
# time frame
q = 0
......@@ -3821,10 +3255,10 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
imp_prices = {
qpk: ResourcePrice(
prices=0.5,
prices=1.5,
volumes=None,
)
for qpk in tf.qpk()
......@@ -3835,10 +3269,10 @@ class TestESIPPProblem:
)
# export node
exp_node_key = generate_pseudo_unique_key(mynet.nodes())
exp_node_key = 'thatexpnode'
exp_prices = {
qpk: ResourcePrice(
prices=1.5,
prices=0.5,
volumes=None,
)
for qpk in tf.qpk()
......@@ -3881,55 +3315,53 @@ class TestESIPPProblem:
max_number_parallel_arcs={}
)
# export prices are higher: it makes sense to install the arc since the
# revenue (@ max. cap.) exceeds the cost of installing the arc
# *********************************************************************
# *********************************************************************
# import prices are higher: it makes no sense to install the arc
# the arc should not be installed (unless prices allow for it)
assert (
True
in ipp.networks["mynet"]
not in ipp.networks["mynet"]
.edges[(imp_node_key, exp_node_key, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
# there should be no imports
abs_tol = 1e-6
assert flow_in[("mynet", 0, 0)] > 0.0 - abs_tol
abs_tol = 1e-3
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(imports_qp, 0.0, abs_tol=abs_tol)
assert flow_in_cost[("mynet", 0, 0)] > 0.0 - abs_tol
abs_tol = 1e-3
import_costs_qp = sum(import_costs_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(import_costs_qp, 0.0, abs_tol=abs_tol)
# there should be no exports
abs_tol = 1e-2
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0.0, abs_tol=abs_tol)
assert flow_out[("mynet", 0, 0)] > 0.0 - abs_tol
assert flow_out_revenue[("mynet", 0, 0)] > 0.0 - abs_tol
# the revenue should exceed the costs
abs_tol = 1e-2
assert (
flow_out_revenue[("mynet", 0, 0)] > flow_in_cost[("mynet", 0, 0)] - abs_tol
)
# the capex should be positive
export_revenue_qp = sum(export_revenue_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(export_revenue_qp, 0.0, abs_tol=abs_tol)
# there should be no capex
abs_tol = 1e-6
assert pyo.value(ipp.instance.var_capex) > 0 - abs_tol
assert math.isclose(pyo.value(ipp.instance.var_capex), 0.0, abs_tol=abs_tol)
# *************************************************************************
# *************************************************************************
......@@ -4103,14 +3535,15 @@ class TestESIPPProblem:
)
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
# the flow through AB should be from A to B during interval 0
......@@ -4161,14 +3594,14 @@ class TestESIPPProblem:
# there should be imports
abs_tol = 1e-6
assert math.isclose(flow_in[("mynet", 0, 0)], (1.2 + 1.2), abs_tol=abs_tol)
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(imports_qp, (1.2 + 1.2), abs_tol=abs_tol)
# there should be no exports
abs_tol = 1e-6
assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol)
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0, abs_tol=abs_tol)
# flow through I1A must be 1.0 during time interval 0
# flow through I1A must be 0.2 during time interval 1
......@@ -4493,14 +3926,15 @@ class TestESIPPProblem:
)
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
# the flow through AB should be from A to B during interval 0
......@@ -4551,14 +3985,17 @@ class TestESIPPProblem:
# there should be imports
abs_tol = 1e-6
assert math.isclose(flow_in[("mynet", 0, 0)], (1.2 + 1.2), abs_tol=abs_tol)
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(imports_qp, (1.2 + 1.2), abs_tol=abs_tol)
# there should be no exports
abs_tol = 1e-6
assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol)
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
export_revenue_qp = sum(export_revenue_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0, abs_tol=abs_tol)
# flow through I1A must be 1.0 during time interval 0
# flow through I1A must be 0.2 during time interval 1
......@@ -4874,28 +4311,32 @@ class TestESIPPProblem:
)
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
# there should be imports
abs_tol = 1e-6
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(
flow_in[("mynet", 0, 0)], (1 + 1 + 2 + 0.3 + 1), abs_tol=abs_tol
imports_qp, (1 + 1 + 2 + 0.3 + 1), abs_tol=abs_tol
)
# there should be no exports
abs_tol = 1e-6
assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol)
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
export_revenue_qp = sum(export_revenue_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0, abs_tol=abs_tol)
# flow through I1A must be 1.1 during time interval 0
# flow through I1A must be 0.0 during time interval 1
......@@ -5497,28 +4938,32 @@ class TestESIPPProblem:
)
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
# there should be imports
abs_tol = 1e-6
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(
flow_in[("mynet", 0, 0)], (1 + 1 + 2 + 0.3 + 1), abs_tol=abs_tol
imports_qp, (1 + 1 + 2 + 0.3 + 1), abs_tol=abs_tol
)
# there should be no exports
abs_tol = 1e-6
assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol)
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
export_revenue_qp = sum(export_revenue_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0, abs_tol=abs_tol)
# flow through I1A must be 1.1 during time interval 0
# flow through I1A must be 0.0 during time interval 1
......@@ -5975,7 +5420,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -6033,7 +5478,7 @@ class TestESIPPProblem:
solver_options={},
perform_analysis=False,
plot_results=False,
print_solver_output=True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=static_losses_mode,
......@@ -6056,21 +5501,28 @@ class TestESIPPProblem:
)
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
# there should be imports
abs_tol = 1e-6
assert math.isclose(flow_in[("mynet", 0, 0)], 0.35, abs_tol=abs_tol)
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(imports_qp, 0.35, abs_tol=abs_tol)
# there should be no exports
abs_tol = 1e-6
assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol)
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
export_revenue_qp = sum(export_revenue_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0, abs_tol=abs_tol)
# flow through IA must be 0.35
abs_tol = 1e-6
......@@ -6117,7 +5569,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -6173,11 +5625,11 @@ class TestESIPPProblem:
]:
# TODO: make this work with GLPK and SCIP
ipp = self.build_solve_ipp(
# solver='cbc', # does not work with GLPK nor SCIP
solver='cbc', # does not work with GLPK nor SCIP
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=static_losses_mode,
......@@ -6200,21 +5652,28 @@ class TestESIPPProblem:
)
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
# there should be imports
abs_tol = 1e-6
assert math.isclose(flow_in[("mynet", 0, 0)], 0.35, abs_tol=abs_tol)
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(imports_qp, 0.35, abs_tol=abs_tol)
# there should be no exports
abs_tol = 1e-6
assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol)
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
export_revenue_qp = sum(export_revenue_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0, abs_tol=abs_tol)
# flow through IA must be 0.35
abs_tol = 1e-6
......@@ -6263,7 +5722,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -6344,13 +5803,15 @@ class TestESIPPProblem:
)
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
# the flow through AB should be from A to B during interval 0
abs_tol = 1e-6
......@@ -6393,10 +5854,14 @@ class TestESIPPProblem:
)
# there should be imports
abs_tol = 1e-6
assert math.isclose(flow_in[("mynet", 0, 0)], (0.35 + 0.15), abs_tol=abs_tol)
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(imports_qp, (0.35 + 0.15), abs_tol=abs_tol)
# there should be no exports
abs_tol = 1e-6
assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol)
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
export_revenue_qp = sum(export_revenue_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0, abs_tol=abs_tol)
# flow through IA must be 0.35 during time interval 0
# flow through IA must be 0.15 during time interval 1
abs_tol = 1e-6
......@@ -6540,7 +6005,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -6615,13 +6080,15 @@ class TestESIPPProblem:
)
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
# the flow through AB should be from A to B during interval 0
abs_tol = 1e-6
......@@ -6664,10 +6131,14 @@ class TestESIPPProblem:
)
# there should be imports
abs_tol = 1e-6
assert math.isclose(flow_in[("mynet", 0, 0)], (0.35 + 0.15), abs_tol=abs_tol)
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(imports_qp, (0.35 + 0.15), abs_tol=abs_tol)
# there should be no exports
abs_tol = 1e-6
assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol)
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
export_revenue_qp = sum(export_revenue_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0, abs_tol=abs_tol)
# flow through IA must be 0.35 during time interval 0
# flow through IA must be 0.15 during time interval 1
abs_tol = 1e-6
......@@ -6811,7 +6282,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -6892,13 +6363,15 @@ class TestESIPPProblem:
)
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
# the flow through AB should be from A to B during interval 0
abs_tol = 1e-6
......@@ -6941,10 +6414,14 @@ class TestESIPPProblem:
)
# there should be imports
abs_tol = 1e-6
assert math.isclose(flow_in[("mynet", 0, 0)], (0.35 + 0.15), abs_tol=abs_tol)
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(imports_qp, (0.35 + 0.15), abs_tol=abs_tol)
# there should be no exports
abs_tol = 1e-6
assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol)
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
export_revenue_qp = sum(export_revenue_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0, abs_tol=abs_tol)
# flow through IA must be 0.35 during time interval 0
# flow through IA must be 0.15 during time interval 1
abs_tol = 1e-6
......@@ -7089,7 +6566,7 @@ class TestESIPPProblem:
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -7164,13 +6641,15 @@ class TestESIPPProblem:
)
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
# the flow through AB should be from A to B during interval 0
abs_tol = 1e-6
......@@ -7213,10 +6692,14 @@ class TestESIPPProblem:
)
# there should be imports
abs_tol = 1e-6
assert math.isclose(flow_in[("mynet", 0, 0)], (0.35 + 0.15), abs_tol=abs_tol)
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(imports_qp, (0.35 + 0.15), abs_tol=abs_tol)
# there should be no exports
abs_tol = 1e-6
assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol)
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
export_revenue_qp = sum(export_revenue_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0, abs_tol=abs_tol)
# flow through IA must be 0.35 during time interval 0
# flow through IA must be 0.15 during time interval 1
abs_tol = 1e-6
......@@ -7474,21 +6957,27 @@ class TestESIPPProblem:
)
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
# there should be imports
abs_tol = 1e-6
assert math.isclose(flow_in[("mynet", 0, 0)], 1.1, abs_tol=abs_tol)
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(imports_qp, 1.1, abs_tol=abs_tol)
# there should be no exports
abs_tol = 1e-6
assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol)
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
export_revenue_qp = sum(export_revenue_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0, abs_tol=abs_tol)
# interval 0: flow through IA1 must be 1
abs_tol = 1e-6
......@@ -7602,7 +7091,7 @@ class TestESIPPProblem:
# no sos, regular time intervals
ipp = self.build_solve_ipp(
# solver='cbc', # TODO: make this work with other solvers
solver='cbc', # TODO: make this work with other solvers
solver_options={},
plot_results=False, # True,
print_solver_output=False,
......@@ -7636,21 +7125,27 @@ class TestESIPPProblem:
)
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
# there should be imports
abs_tol = 1e-6
assert math.isclose(flow_in[("mynet", 0, 0)], 1.1, abs_tol=abs_tol)
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(imports_qp, 1.1, abs_tol=abs_tol)
# there should be no exports
abs_tol = 1e-6
assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol)
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
export_revenue_qp = sum(export_revenue_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0, abs_tol=abs_tol)
# interval 0: flow through IA1 must be 1
abs_tol = 1e-6
......@@ -7673,12 +7168,12 @@ class TestESIPPProblem:
0.1,
abs_tol=abs_tol,
)
# *************************************************************************
# *************************************************************************
def test_directed_arc_static_downstream_new(self):
# time
q = 0
tf = EconomicTimeFrame(
......@@ -7688,43 +7183,38 @@ class TestESIPPProblem:
time_intervals={q: (0,)},
time_interval_durations={q: (1,)},
)
number_intervals = 2
number_intervals = 1
number_periods = 2
# 4 nodes: one import, one export, two supply/demand nodes
# 4 nodes: one import, one export, two supply/demand nodes
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
# import node
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
(q, p, k): ResourcePrice(prices=0.1, volumes=None)
(q, p, k): ResourcePrice(prices=1 + 0.1, volumes=None)
for p in range(number_periods)
for k in range(number_intervals)
},
)
# other nodes
node_A = generate_pseudo_unique_key(mynet.nodes())
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0, (q, 1): 1.3})
# add arcs
node_A = 'A'
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0})
# IA1
arcs_ia1 = Arcs(
name="IA1",
efficiency={(q, 0): 0.9, (q, 1): 0.9},
efficiency_reverse=None,
static_loss={(0, q, 0): 0.0, (0, q, 1): 0.1},
capacity=tuple([0.5 / 0.9]),
minimum_cost=tuple([0.1]),
specific_capacity_cost=0,
# add arcs
# IA1
mynet.add_preexisting_directed_arc(
node_key_a=imp_node_key,
node_key_b=node_A,
efficiency={(q, 0): 0.9},
static_loss={(q, 0, 0): 0.1},
capacity=0.5,
capacity_is_instantaneous=False,
validate=True,
)
mynet.add_directed_arc(node_key_a=imp_node_key, node_key_b=node_A, arcs=arcs_ia1)
# IA2
arcs_ia2 = Arcs(
name="IA2",
......@@ -7739,29 +7229,34 @@ class TestESIPPProblem:
)
mynet.add_directed_arc(node_key_a=imp_node_key, node_key_b=node_A, arcs=arcs_ia2)
# identify node types
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
# no sos, regular time intervals
ipp = self.build_solve_ipp(
# solver=solver,
solver='cbc', # TODO: make this work with other solvers
solver_options={},
plot_results=False, # True,
print_solver_output=False,
networks={"mynet": mynet},
time_frame=tf,
static_losses_mode=True,
# static_losses_mode=True,
static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_ARR,
mandatory_arcs=[],
max_number_parallel_arcs={}
max_number_parallel_arcs={},
)
# **************************************************************************
# all arcs should be installed (they are not new)
assert (
True
in ipp.networks["mynet"]
.edges[(imp_node_key, node_A, 0)][Network.KEY_ARC_TECH]
.options_selected
)
assert (
True
in ipp.networks["mynet"]
......@@ -7770,50 +7265,43 @@ class TestESIPPProblem:
)
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
# there should be imports
abs_tol = 1e-6
assert math.isclose(
flow_in[("mynet", 0, 0)], (1.2 + 0.1 / 0.9 + 1.0 + 0.1), abs_tol=abs_tol
)
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(imports_qp, (1.0 + 0.1), abs_tol=abs_tol)
# there should be no exports
abs_tol = 1e-6
assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol)
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
export_revenue_qp = sum(export_revenue_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0, abs_tol=abs_tol)
# interval 0: flow through IA1 must be 0
# interval 1: flow through IA1 must be 0.1+0.1/0.9
# flow through IA1 must be 0.1
abs_tol = 1e-6
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", imp_node_key, node_A, 0, 0, 0)]),
0,
abs_tol=abs_tol,
)
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", imp_node_key, node_A, 0, 0, 1)]),
0.1 + 0.1 / 0.9,
0.1,
abs_tol=abs_tol,
)
# interval 0: flow through IA2 must be 1.0
# interval 1: flow through IA2 must be 1.2
# flow through IA2 must be 1.0
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", imp_node_key, node_A, 1, 0, 0)]),
1.0,
abs_tol=abs_tol,
)
assert math.isclose(
pyo.value(ipp.instance.var_v_glljqk[("mynet", imp_node_key, node_A, 1, 0, 1)]),
1.2,
abs_tol=abs_tol,
)
# *************************************************************************
# *************************************************************************
......@@ -7833,11 +7321,10 @@ class TestESIPPProblem:
number_periods = 2
# 4 nodes: one import, one export, two supply/demand nodes
mynet = Network()
# import node
imp_node_key = generate_pseudo_unique_key(mynet.nodes())
imp_node_key = 'thatimpnode'
mynet.add_import_node(
node_key=imp_node_key,
prices={
......@@ -7848,7 +7335,7 @@ class TestESIPPProblem:
)
# other nodes
node_A = generate_pseudo_unique_key(mynet.nodes())
node_A = 'A'
mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0})
# add arcs
......@@ -7877,13 +7364,14 @@ class TestESIPPProblem:
# no sos, regular time intervals
ipp = self.build_solve_ipp(
# solver='cbc', # TODO: make this work with other solvers
solver='cbc', # TODO: make this work with other solvers
solver_options={},
plot_results=False, # True,
print_solver_output=False,
networks={"mynet": mynet},
time_frame=tf,
static_losses_mode=True,
# static_losses_mode=True,
static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_ARR,
mandatory_arcs=[],
max_number_parallel_arcs={},
)
......@@ -7907,23 +7395,28 @@ class TestESIPPProblem:
)
# overview
(
flow_in,
flow_in_k,
flow_out,
flow_in_cost,
flow_out_revenue,
) = compute_cost_volume_metrics(ipp.instance, True)
(imports_qpk,
exports_qpk,
balance_qpk,
import_costs_qpk,
export_revenue_qpk,
ncf_qpk,
aggregate_static_demand_qpk,
aggregate_static_supply_qpk,
aggregate_static_balance_qpk) = statistics(ipp)
# there should be imports
abs_tol = 1e-6
assert math.isclose(flow_in[("mynet", 0, 0)], (1.0 + 0.1), abs_tol=abs_tol)
imports_qp = sum(imports_qpk[qpk] for qpk in tf.qpk() if qpk[1] == 0)
assert math.isclose(imports_qp, (1.0 + 0.1), abs_tol=abs_tol)
# there should be no exports
abs_tol = 1e-6
assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol)
exports_qp = sum(exports_qpk[(q, 0, k)] for k in tf.time_intervals[q])
assert math.isclose(exports_qp, 0, abs_tol=abs_tol)
# flow through IA1 must be 0.1
abs_tol = 1e-6
......@@ -8116,7 +7609,7 @@ class TestESIPPProblem:
)
# 2 nodes: one import, one regular
mynet = Network()
mynet = Network(network_type=Network.NET_TYPE_TREE)
# import node
node_IMP = "thatimpnode"
......@@ -8202,7 +7695,7 @@ class TestESIPPProblem:
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
......@@ -8210,9 +7703,8 @@ class TestESIPPProblem:
max_number_parallel_arcs={},
simplify_problem=True,
)
assert ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 61
assert ipp.results["Problem"][0]["Number of constraints"] == 61
assert ipp.results["Problem"][0]["Number of variables"] == 53
assert ipp.results["Problem"][0]["Number of nonzeros"] == 143
......@@ -8245,6 +7737,152 @@ class TestESIPPProblem:
assert math.isclose(pyo.value(ipp.instance.var_capex), 10.0, abs_tol=1e-3)
# the objective function
assert math.isclose(pyo.value(ipp.instance.obj_f), -1.193236715e+01, abs_tol=1e-3)
# *************************************************************************
# *************************************************************************
def test_problem_with_reverse_tree_network(self):
# assessment
q = 0
tf = EconomicTimeFrame(
discount_rate=3.5/100,
reporting_periods={q: (0,)},
reporting_period_durations={q: (365 * 24 * 3600,)},
time_intervals={q: (0,)},
time_interval_durations={q: (1,)},
)
# 2 nodes: one import, one regular
mynet = Network(network_type=Network.NET_TYPE_REV_TREE)
# export node
node_EXP = "thatexpnode"
mynet.add_export_node(
node_key=node_EXP,
prices={
qpk: ResourcePrice(prices=1.0, volumes=None)
for qpk in tf.qpk()
},
)
# node A
node_A = "thatnodea"
mynet.add_source_sink_node(
node_key=node_A,
base_flow={(q, 0): -0.50},
)
# node B
node_B = "thatnodeb"
mynet.add_source_sink_node(
node_key=node_B,
base_flow={(q, 0): -0.25},
)
# node C
node_C = "thatnodec"
mynet.add_source_sink_node(
node_key=node_C,
base_flow={(q, 0): -1.25},
)
list_exp_arcs = [
(node_A, node_EXP), # AE
(node_B, node_EXP), # BE
(node_C, node_EXP), # CE
]
for i, node_pair in enumerate(list_exp_arcs):
# import arcs: AE, BE, CE
new_arc = Arcs(
name="arc_"+str(node_pair),
efficiency=None,
efficiency_reverse=None,
static_loss=None,
capacity=[2],
minimum_cost=[6],
specific_capacity_cost=i,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(*node_pair, arcs=new_arc)
# arcs: AB, BA, BC, CB, AC, CA
list_other_arcs = [
(node_A, node_B), # AB
(node_B, node_A), # BA
(node_B, node_C), # BC
(node_C, node_B), # CB
(node_A, node_C), # AC
(node_C, node_A), # CA
]
for node_pair in list_other_arcs:
# arc
new_arc_tech = Arcs(
name="any",
efficiency=None,
efficiency_reverse=None,
static_loss=None,
capacity=[3],
minimum_cost=[2],
specific_capacity_cost=0,
capacity_is_instantaneous=False,
validate=False,
)
mynet.add_directed_arc(*node_pair, arcs=new_arc_tech)
# identify node types
mynet.identify_node_types()
# no sos, regular time intervals
ipp = self.build_solve_ipp(
solver_options={},
perform_analysis=False,
plot_results=False, # True,
print_solver_output=False,
time_frame=tf,
networks={"mynet": mynet},
static_losses_mode=True, # just to reach a line,
mandatory_arcs=[],
max_number_parallel_arcs={},
simplify_problem=True,
)
assert ipp.has_peak_total_assessments()
assert ipp.results["Problem"][0]["Number of constraints"] == 61
assert ipp.results["Problem"][0]["Number of variables"] == 53
assert ipp.results["Problem"][0]["Number of nonzeros"] == 143 #
# *********************************************************************
# *********************************************************************
# validation
# only the AE arc should be installed
true_exp_arcs_selected = [True, False, False]
for node_pair, true_arc_decision in zip(list_exp_arcs, true_exp_arcs_selected):
assert (
true_arc_decision
in ipp.networks["mynet"]
.edges[(*node_pair, 0)][Network.KEY_ARC_TECH]
.options_selected
)
# only two arcs between A, B and C can be installed
arcs_selected = tuple(
1
for node_pair in list_other_arcs
if True in ipp.networks["mynet"]
.edges[(*node_pair, 0)][Network.KEY_ARC_TECH]
.options_selected
)
assert sum(arcs_selected) == 2
# the network must be tree-shaped
assert ipp.networks["mynet"].has_tree_topology()
# capex
assert math.isclose(pyo.value(ipp.instance.var_capex), 10.0, abs_tol=1e-3)
# the objective function
assert math.isclose(pyo.value(ipp.instance.obj_f), -(10+(-11.93236715+10)), abs_tol=1e-3)
# *****************************************************************************
# *****************************************************************************
\ No newline at end of file
......@@ -132,8 +132,8 @@ class TestResourcePrice:
volumes = None
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=[prices], volumes=[volumes])
assert res_p1.is_equivalent(res_p2)
assert res_p2.is_equivalent(res_p1)
assert res_p1 == res_p2
assert res_p2 == res_p1
# *********************************************************************
......@@ -144,8 +144,8 @@ class TestResourcePrice:
volumes = None
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=[prices + 1], volumes=[volumes])
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
......@@ -156,8 +156,8 @@ class TestResourcePrice:
volumes = None
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=prices, volumes=volumes)
assert res_p1.is_equivalent(res_p2)
assert res_p2.is_equivalent(res_p1)
assert res_p1 == res_p2
assert res_p2 == res_p1
# *********************************************************************
......@@ -168,8 +168,8 @@ class TestResourcePrice:
volumes = None
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=prices + 1, volumes=volumes)
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
# *********************************************************************
......@@ -183,8 +183,8 @@ class TestResourcePrice:
volumes = 1
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=[prices], volumes=[volumes])
assert res_p1.is_equivalent(res_p2)
assert res_p2.is_equivalent(res_p1)
assert res_p1 == res_p2
assert res_p2 == res_p1
# *********************************************************************
......@@ -195,8 +195,8 @@ class TestResourcePrice:
volumes = 1
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=[prices + 1], volumes=[volumes])
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
......@@ -207,8 +207,8 @@ class TestResourcePrice:
volumes = 1
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=prices, volumes=volumes)
assert res_p1.is_equivalent(res_p2)
assert res_p2.is_equivalent(res_p1)
assert res_p1 == res_p2
assert res_p2 == res_p1
# *********************************************************************
......@@ -219,8 +219,8 @@ class TestResourcePrice:
volumes = 1
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=prices + 1, volumes=volumes)
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
......@@ -231,8 +231,8 @@ class TestResourcePrice:
volumes = 1
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=[prices], volumes=[volumes + 1])
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
......@@ -243,8 +243,8 @@ class TestResourcePrice:
volumes = 1
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=prices, volumes=volumes + 1)
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
......@@ -255,8 +255,8 @@ class TestResourcePrice:
volumes = 1
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=[prices], volumes=[None])
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
......@@ -267,8 +267,8 @@ class TestResourcePrice:
volumes = 1
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=prices, volumes=None)
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
# *********************************************************************
......@@ -294,8 +294,8 @@ class TestResourcePrice:
volumes = [1, None]
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=prices, volumes=volumes)
assert res_p1.is_equivalent(res_p2)
assert res_p2.is_equivalent(res_p1)
assert res_p1 == res_p2
assert res_p2 == res_p1
# two segments, no volume limit, same format
# prices do not match = False
......@@ -306,8 +306,8 @@ class TestResourcePrice:
prices = [2, 3]
volumes = [1, None]
res_p2 = ResourcePrice(prices=prices, volumes=volumes)
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
......@@ -320,8 +320,8 @@ class TestResourcePrice:
volumes = [1, 3]
res_p1 = ResourcePrice(prices=prices, volumes=volumes)
res_p2 = ResourcePrice(prices=prices, volumes=volumes)
assert res_p1.is_equivalent(res_p2)
assert res_p2.is_equivalent(res_p1)
assert res_p1 == res_p2
assert res_p2 == res_p1
# two segments, volume limit, same format: False
# prices do not match = False
......@@ -332,8 +332,8 @@ class TestResourcePrice:
prices = [1, 4]
volumes = [1, 4]
res_p2 = ResourcePrice(prices=prices, volumes=volumes)
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
......@@ -348,8 +348,8 @@ class TestResourcePrice:
prices = [1, 3]
volumes = [1, 5]
res_p2 = ResourcePrice(prices=prices, volumes=volumes)
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# single segment, volume limit, same format
# volumes do not match = False
......@@ -360,8 +360,8 @@ class TestResourcePrice:
prices = [1, 3]
volumes = [1, None]
res_p2 = ResourcePrice(prices=prices, volumes=volumes)
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
# *********************************************************************
......@@ -374,8 +374,8 @@ class TestResourcePrice:
prices = [1, 3, 5]
volumes = [1, 4, None]
res_p2 = ResourcePrice(prices=prices, volumes=volumes)
assert not res_p1.is_equivalent(res_p2)
assert not res_p2.is_equivalent(res_p1)
assert not res_p1 == res_p2
assert not res_p2 == res_p1
# *********************************************************************
# *********************************************************************
......