From bc6335c7d1ae8004105a8fcf37bedb73cae186e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pedro=20L=2E=20Magalh=C3=A3es?= <pedro.magalhaes@uni-bremen.de> Date: Fri, 8 Mar 2024 20:36:35 +0100 Subject: [PATCH] Applied black. --- src/topupopt/__init__.py | 2 +- src/topupopt/data/__init__.py | 1 - src/topupopt/data/buildings/__init__.py | 1 - src/topupopt/data/buildings/dk/__init__.py | 1 - src/topupopt/data/buildings/dk/bbr.py | 706 +- src/topupopt/data/buildings/dk/heat.py | 154 +- src/topupopt/data/dhn/__init__.py | 1 - src/topupopt/data/dhn/network.py | 305 +- src/topupopt/data/dhn/utils.py | 309 +- src/topupopt/data/finance/__init__.py | 1 - src/topupopt/data/finance/invest.py | 468 +- src/topupopt/data/gis/__init__.py | 2 +- src/topupopt/data/gis/calculate.py | 196 +- src/topupopt/data/gis/identify.py | 1120 +- src/topupopt/data/gis/modify.py | 1089 +- src/topupopt/data/gis/osm.py | 82 +- src/topupopt/data/gis/utils.py | 1050 +- src/topupopt/data/misc/__init__.py | 1 - src/topupopt/data/misc/units.py | 16 +- src/topupopt/data/misc/utils.py | 300 +- src/topupopt/problems/__init__.py | 2 +- src/topupopt/problems/esipp/__init__.py | 2 +- src/topupopt/problems/esipp/converter.py | 641 +- src/topupopt/problems/esipp/dynsys.py | 1399 +- src/topupopt/problems/esipp/model.py | 5414 ++++---- src/topupopt/problems/esipp/network.py | 1442 +-- src/topupopt/problems/esipp/problem.py | 4762 ++++--- src/topupopt/problems/esipp/resource.py | 256 +- src/topupopt/problems/esipp/signal.py | 1333 +- src/topupopt/problems/esipp/system.py | 319 +- src/topupopt/problems/esipp/utils.py | 1619 +-- src/topupopt/solvers/__init__.py | 2 +- src/topupopt/solvers/interface.py | 432 +- tests/examples_esipp.py | 1620 +-- tests/examples_esipp_network.py | 2567 ++-- tests/examples_esipp_problem.py | 12619 +++++++++---------- tests/examples_signal.py | 1792 ++- tests/test_all.py | 227 +- tests/test_data_finance.py | 1947 ++- tests/test_data_utils.py | 469 +- tests/test_dhn.py | 791 +- tests/test_dhn_utils.py | 741 +- tests/test_esipp_converter.py | 307 +- tests/test_esipp_dynsys.py | 2132 ++-- tests/test_esipp_network.py | 2524 ++-- tests/test_esipp_problem.py | 1006 +- tests/test_esipp_resource.py | 652 +- tests/test_esipp_utils.py | 24 +- tests/test_gis_calculate.py | 406 +- tests/test_gis_identify.py | 4821 +++---- tests/test_gis_modify.py | 3824 +++--- tests/test_gis_utils.py | 2633 ++-- tests/test_solvers.py | 874 +- 53 files changed, 31738 insertions(+), 33666 deletions(-) diff --git a/src/topupopt/__init__.py b/src/topupopt/__init__.py index e7ce4a7..80cf2ba 100644 --- a/src/topupopt/__init__.py +++ b/src/topupopt/__init__.py @@ -1,2 +1,2 @@ # -*- coding: utf-8 -*- -#from . import mvesipp \ No newline at end of file +# from . import mvesipp diff --git a/src/topupopt/data/__init__.py b/src/topupopt/data/__init__.py index 633f866..40a96af 100644 --- a/src/topupopt/data/__init__.py +++ b/src/topupopt/data/__init__.py @@ -1,2 +1 @@ # -*- coding: utf-8 -*- - diff --git a/src/topupopt/data/buildings/__init__.py b/src/topupopt/data/buildings/__init__.py index 633f866..40a96af 100644 --- a/src/topupopt/data/buildings/__init__.py +++ b/src/topupopt/data/buildings/__init__.py @@ -1,2 +1 @@ # -*- coding: utf-8 -*- - diff --git a/src/topupopt/data/buildings/dk/__init__.py b/src/topupopt/data/buildings/dk/__init__.py index 633f866..40a96af 100644 --- a/src/topupopt/data/buildings/dk/__init__.py +++ b/src/topupopt/data/buildings/dk/__init__.py @@ -1,2 +1 @@ # -*- coding: utf-8 -*- - diff --git a/src/topupopt/data/buildings/dk/bbr.py b/src/topupopt/data/buildings/dk/bbr.py index d0d2fc1..f5c1185 100644 --- a/src/topupopt/data/buildings/dk/bbr.py +++ b/src/topupopt/data/buildings/dk/bbr.py @@ -5,7 +5,7 @@ import json import urllib.request -from typing import Tuple # not needed with python 3.9 +from typing import Tuple # not needed with python 3.9 # local, external @@ -17,22 +17,24 @@ from shapely.geometry import Point # ***************************************************************************** # URLs - + # url prefix to find data using the building entrance id # url_prefix_entrance = 'https://api.dataforsyningen.dk/adgangsadresser/' -url_prefix_entrance = 'https://api.dataforsyningen.dk/bbrlight/opgange?adgangsadresseid=' +url_prefix_entrance = ( + "https://api.dataforsyningen.dk/bbrlight/opgange?adgangsadresseid=" +) # url prefix to find BBR building data # url_prefix_buildings = 'https://api.dataforsyningen.dk/bbrlight/bygninger?id=' -url_prefix_buildings = 'https://api.dataforsyningen.dk/bbrlight/bygninger/' +url_prefix_buildings = "https://api.dataforsyningen.dk/bbrlight/bygninger/" # url prefix to find the building location data -url_prefix_building_point = 'https://api.dataforsyningen.dk/bbrlight/bygningspunkter/' +url_prefix_building_point = "https://api.dataforsyningen.dk/bbrlight/bygningspunkter/" # ***************************************************************************** # ***************************************************************************** @@ -69,12 +71,10 @@ BBR_BDG_ENTR_LABELS = [ "Aendr_Funk", "Ophoert_ts", "Gyldighedsdato", - "href"] + "href", +] -SELECT_BBR_BDG_ENTR_LABELS = [ - "Opgang_id", - "AdgAdr_id", - "Bygning_id"] +SELECT_BBR_BDG_ENTR_LABELS = ["Opgang_id", "AdgAdr_id", "Bygning_id"] BBR_BDG_POINT_LABELS = [ "ois_id", @@ -95,15 +95,16 @@ BBR_BDG_POINT_LABELS = [ "Aendr_Funk", "Ophoert_ts", "BygPktKilde", - "koordinater", # 'koordinater' returns a list - "href"] + "koordinater", # 'koordinater' returns a list + "href", +] SELECT_BBR_BDG_POINT_LABELS = [ "KoorOest", "KoorNord", "KoorSystem", - "koordinater" # 'koordinater' returns a list - ] + "koordinater", # 'koordinater' returns a list +] BBR_BDG_LABELS = [ "ois_id", @@ -196,8 +197,9 @@ BBR_BDG_LABELS = [ "BygSkadeOmfatFors", "Gyldighedsdato", "href", - "ejerskaber", # 'ejerskaber' returns a list - "bygningspunkt"] # 'bygningspunkt' returns a dict + "ejerskaber", # 'ejerskaber' returns a list + "bygningspunkt", +] # 'bygningspunkt' returns a dict SELECT_BBR_BDG_LABELS = [ "BYG_ANVEND_KODE", @@ -220,13 +222,14 @@ SELECT_BBR_BDG_LABELS = [ "VARMEINSTAL_KODE", "OPVARMNING_KODE", "VARME_SUPPL_KODE", - "BygPkt_id",] + "BygPkt_id", +] BBR_CONTAINER_LABELS = { - 'bygningspunkt': dict, - 'ejerskaber': list, - 'koordinater': list, - } + "bygningspunkt": dict, + "ejerskaber": list, + "koordinater": list, +} # ***************************************************************************** # ***************************************************************************** @@ -238,16 +241,16 @@ BBR_CONTAINER_LABELS = { # OPVARMNING_KODE: Arten af det opvarmningsmiddel, der anvendes i eget anlæg # Translation: Nature of the heating agent used in own system -# Codes: +# Codes: # OPVARMNING_KODE 1 Elektricitet # OPVARMNING_KODE 2 Gasværksgas # OPVARMNING_KODE 3 Flydende brændsel # OPVARMNING_KODE 4 Fast brændsel # OPVARMNING_KODE 6 Halm # OPVARMNING_KODE 7 Naturgas -# OPVARMNING_KODE 9 Andet - -# VARME_SUPPL_KODE: Angives når der udover den hovedsagelige varmeinstallation +# OPVARMNING_KODE 9 Andet + +# VARME_SUPPL_KODE: Angives når der udover den hovedsagelige varmeinstallation # tillige opvarmes af en supplerende varmekilde # Translation: To be indicated when, in addition to the main heating installation, # it is also heated by a supplementary heat source @@ -268,7 +271,7 @@ BBR_CONTAINER_LABELS = { # anføres koden for den installation, der opvarmer den største delen. Supplerende # omflyttelige ovne registreres ikke # Translation: If there are several different heating installations, enter the -# code of the installation that heats the largest part. Additional relocating +# code of the installation that heats the largest part. Additional relocating # ovens are not registered # VARMEINSTAL_KODE 1 Fjernvarme/blokvarme @@ -283,186 +286,187 @@ BBR_CONTAINER_LABELS = { # fuel type -label_bbr_fuel_type = 'OPVARMNING_KODE' +label_bbr_fuel_type = "OPVARMNING_KODE" dict_bbr_fuel_type_codes = { - '1': 'Elektricitet', - '2': 'Gasværksgas', - '3': 'Flydende brændsel', - '4': 'Fast brændsel', - '6': 'Halm', - '7': 'Naturgas', - '9': 'Andet'} + "1": "Elektricitet", + "2": "Gasværksgas", + "3": "Flydende brændsel", + "4": "Fast brændsel", + "6": "Halm", + "7": "Naturgas", + "9": "Andet", +} # supplementary heating system -label_bbr_extra_heating = 'VARME_SUPPL_KODE' +label_bbr_extra_heating = "VARME_SUPPL_KODE" dict_bbr_extra_heating_codes = { - '0': 'Ikke oplyst', - '1': 'Varmepumpeanlæg', - '10': 'Biogasanlæg', - '2': 'Ovne til fast eller flydende brændsel', - '3': 'Ovne til flydende brændsel', - '4': 'Solpaneler', - '5': 'Pejs', - '6': 'Gasradiator', - '7': 'Elvarme', - '80': 'Andet', - '90': '(UDFASES) Bygningen har ingen supplerende varme' - } + "0": "Ikke oplyst", + "1": "Varmepumpeanlæg", + "10": "Biogasanlæg", + "2": "Ovne til fast eller flydende brændsel", + "3": "Ovne til flydende brændsel", + "4": "Solpaneler", + "5": "Pejs", + "6": "Gasradiator", + "7": "Elvarme", + "80": "Andet", + "90": "(UDFASES) Bygningen har ingen supplerende varme", +} # main heating system -label_bbr_heating_system = 'VARMEINSTAL_KODE' +label_bbr_heating_system = "VARMEINSTAL_KODE" dict_bbr_heating_system_codes = { - '1': 'Fjernvarme/blokvarme', - '2': 'Centralvarme med én fyringsenhed', - '3': 'Ovn til fast og flydende brændsel', - '5': 'Varmepumpe', - '6': 'Centralvarme med to fyringsenheder', - '7': 'Elvarme', - '8': 'Gasradiator', - '9': 'Ingen varmeinstallation', - '99': 'Blandet' - } + "1": "Fjernvarme/blokvarme", + "2": "Centralvarme med én fyringsenhed", + "3": "Ovn til fast og flydende brændsel", + "5": "Varmepumpe", + "6": "Centralvarme med to fyringsenheder", + "7": "Elvarme", + "8": "Gasradiator", + "9": "Ingen varmeinstallation", + "99": "Blandet", +} # coordinate system -label_bbr_bygningpunkt_koorsys = 'KoorSystem' +label_bbr_bygningpunkt_koorsys = "KoorSystem" label_bbr_bygningpunkt_koorsys_codes = { - '1': 'System 34', - '2': 'System 45', - '3': 'KP2000 (System 2000)', - '4': 'UTM ED50', - '5': 'WGS 84' - } + "1": "System 34", + "2": "System 45", + "3": "KP2000 (System 2000)", + "4": "UTM ED50", + "5": "WGS 84", +} # building use -label_bbr_building_uses = 'BYG_ANVEND_KODE' # byganvendelse +label_bbr_building_uses = "BYG_ANVEND_KODE" # byganvendelse dict_bbr_building_use_codes = { - '110': 'Stuehus til landbrugsejendom', - '120': 'Fritliggende enfamiliehus', - '121': 'Sammenbygget enfamiliehus', - '122': 'Fritliggende enfamiliehus i tæt-lav bebyggelse', - '130': '(UDFASES) Række-, kæde-, eller dobbelthus (lodret adskillelse mellem enhederne).', - '131': 'Række-, kæde- og klyngehus', - '132': 'Dobbelthus', - '140': 'Etagebolig-bygning, flerfamiliehus eller to-familiehus', - '150': 'Kollegium', - '160': 'Boligbygning til døgninstitution', - '185': 'Anneks i tilknytning til helårsbolig.', - '190': 'Anden bygning til helårsbeboelse', - '210': '(UDFASES) Bygning til erhvervsmæssig produktion vedrørende landbrug, gartneri, råstofudvinding o. lign', - '211': 'Stald til svin', - '212': 'Stald til kvæg, får mv.', - '213': 'Stald til fjerkræ', - '214': 'Minkhal', - '215': 'Væksthus', - '216': 'Lade til foder, afgrøder mv.', - '217': 'Maskinhus, garage mv.', - '218': 'Lade til halm, hø mv.', - '219': 'Anden bygning til landbrug mv.', - '220': '(UDFASES) Bygning til erhvervsmæssig produktion vedrørende industri, håndværk m.v. (fabrik, værksted o.lign.)', - '221': 'Bygning til industri med integreret produktionsapparat', - '222': 'Bygning til industri uden integreret produktionsapparat', - '223': 'Værksted', - '229': 'Anden bygning til produktion', - '230': '(UDFASES) El-, gas-, vand- eller varmeværk, forbrændingsanstalt m.v.', - '231': 'Bygning til energiproduktion', - '232': 'Bygning til forsyning- og energidistribution', - '233': 'Bygning til vandforsyning', - '234': 'Bygning til håndtering af affald og spildevand', - '239': 'Anden bygning til energiproduktion og -distribution', - '290': '(UDFASES) Anden bygning til landbrug, industri etc.', - '310': '(UDFASES) Transport- og garageanlæg (fragtmandshal, lufthavnsbygning, banegårdsbygning, parkeringshus). Garage med plads til et eller to køretøjer registreres med anvendelseskode 910', - '311': 'Bygning til jernbane- og busdrift', - '312': 'Bygning til luftfart', - '313': 'Bygning til parkering- og transportanlæg', - '314': 'Bygning til parkering af flere end to køretøjer i tilknytning til boliger', - '315': 'Havneanlæg', - '319': 'Andet transportanlæg', - '320': '(UDFASES) Bygning til kontor, handel, lager, herunder offentlig administration', - '321': 'Bygning til kontor', - '322': 'Bygning til detailhandel', - '323': 'Bygning til lager', - '324': 'Butikscenter', - '325': 'Tankstation', - '329': 'Anden bygning til kontor, handel og lager', - '330': '(UDFASES) Bygning til hotel, restaurant, vaskeri, frisør og anden servicevirksomhed', - '331': 'Hotel, kro eller konferencecenter med overnatning', - '332': 'Bed & breakfast mv.', - '333': 'Restaurant, café og konferencecenter uden overnatning', - '334': 'Privat servicevirksomhed som frisør, vaskeri, netcafé mv.', - '339': 'Anden bygning til serviceerhverv', - '390': '(UDFASES) Anden bygning til transport, handel etc', - '410': '(UDFASES) Bygning til biograf, teater, erhvervsmæssig udstilling, bibliotek, museum, kirke o. lign.', - '411': 'Biograf, teater, koncertsted mv.', - '412': 'Museum', - '413': 'Bibliotek', - '414': 'Kirke eller anden bygning til trosudøvelse for statsanerkendte trossamfund', - '415': 'Forsamlingshus', - '416': 'Forlystelsespark', - '419': 'Anden bygning til kulturelle formål', - '420': '(UDFASES) Bygning til undervisning og forskning (skole, gymnasium, forskningslabratorium o.lign.).', - '421': 'Grundskole', - '422': 'Universitet', - '429': 'Anden bygning til undervisning og forskning', - '430': '(UDFASES) Bygning til hospital, sygehjem, fødeklinik o. lign.', - '431': 'Hospital og sygehus', - '432': 'Hospice, behandlingshjem mv.', - '433': 'Sundhedscenter, lægehus, fødeklinik mv.', - '439': 'Anden bygning til sundhedsformål', - '440': '(UDFASES) Bygning til daginstitution', - '441': 'Daginstitution', - '442': 'Servicefunktion på døgninstitution', - '443': 'Kaserne', - '444': 'Fængsel, arresthus mv.', - '449': 'Anden bygning til institutionsformål', - '490': '(UDFASES) Bygning til anden institution, herunder kaserne, fængsel o. lign.', - '510': 'Sommerhus', - '520': '(UDFASES) Bygning til feriekoloni, vandrehjem o.lign. bortset fra sommerhus', - '521': 'Feriecenter, center til campingplads mv.', - '522': 'Bygning med ferielejligheder til erhvervsmæssig udlejning', - '523': 'Bygning med ferielejligheder til eget brug', - '529': 'Anden bygning til ferieformål', - '530': '(UDFASES) Bygning i forbindelse med idrætsudøvelse (klubhus, idrætshal, svømmehal o. lign.)', - '531': 'Klubhus i forbindelse med fritid og idræt', - '532': 'Svømmehal', - '533': 'Idrætshal', - '534': 'Tribune i forbindelse med stadion', - '535': 'Bygning til træning og opstaldning af heste', - '539': 'Anden bygning til idrætformål', - '540': 'Kolonihavehus', - '585': 'Anneks i tilknytning til fritids- og sommerhus', - '590': 'Anden bygning til fritidsformål', - '910': 'Garage (med plads til et eller to køretøjer)', - '920': 'Carport', - '930': 'Udhus', - '940': 'Drivhus', - '950': 'Fritliggende overdækning', - '960': 'Fritliggende udestue', - '970': 'Tiloversbleven landbrugsbygning', - '990': 'Faldefærdig bygning', - '999': 'Ukendt bygning' - } + "110": "Stuehus til landbrugsejendom", + "120": "Fritliggende enfamiliehus", + "121": "Sammenbygget enfamiliehus", + "122": "Fritliggende enfamiliehus i tæt-lav bebyggelse", + "130": "(UDFASES) Række-, kæde-, eller dobbelthus (lodret adskillelse mellem enhederne).", + "131": "Række-, kæde- og klyngehus", + "132": "Dobbelthus", + "140": "Etagebolig-bygning, flerfamiliehus eller to-familiehus", + "150": "Kollegium", + "160": "Boligbygning til døgninstitution", + "185": "Anneks i tilknytning til helårsbolig.", + "190": "Anden bygning til helårsbeboelse", + "210": "(UDFASES) Bygning til erhvervsmæssig produktion vedrørende landbrug, gartneri, råstofudvinding o. lign", + "211": "Stald til svin", + "212": "Stald til kvæg, får mv.", + "213": "Stald til fjerkræ", + "214": "Minkhal", + "215": "Væksthus", + "216": "Lade til foder, afgrøder mv.", + "217": "Maskinhus, garage mv.", + "218": "Lade til halm, hø mv.", + "219": "Anden bygning til landbrug mv.", + "220": "(UDFASES) Bygning til erhvervsmæssig produktion vedrørende industri, håndværk m.v. (fabrik, værksted o.lign.)", + "221": "Bygning til industri med integreret produktionsapparat", + "222": "Bygning til industri uden integreret produktionsapparat", + "223": "Værksted", + "229": "Anden bygning til produktion", + "230": "(UDFASES) El-, gas-, vand- eller varmeværk, forbrændingsanstalt m.v.", + "231": "Bygning til energiproduktion", + "232": "Bygning til forsyning- og energidistribution", + "233": "Bygning til vandforsyning", + "234": "Bygning til håndtering af affald og spildevand", + "239": "Anden bygning til energiproduktion og -distribution", + "290": "(UDFASES) Anden bygning til landbrug, industri etc.", + "310": "(UDFASES) Transport- og garageanlæg (fragtmandshal, lufthavnsbygning, banegårdsbygning, parkeringshus). Garage med plads til et eller to køretøjer registreres med anvendelseskode 910", + "311": "Bygning til jernbane- og busdrift", + "312": "Bygning til luftfart", + "313": "Bygning til parkering- og transportanlæg", + "314": "Bygning til parkering af flere end to køretøjer i tilknytning til boliger", + "315": "Havneanlæg", + "319": "Andet transportanlæg", + "320": "(UDFASES) Bygning til kontor, handel, lager, herunder offentlig administration", + "321": "Bygning til kontor", + "322": "Bygning til detailhandel", + "323": "Bygning til lager", + "324": "Butikscenter", + "325": "Tankstation", + "329": "Anden bygning til kontor, handel og lager", + "330": "(UDFASES) Bygning til hotel, restaurant, vaskeri, frisør og anden servicevirksomhed", + "331": "Hotel, kro eller konferencecenter med overnatning", + "332": "Bed & breakfast mv.", + "333": "Restaurant, café og konferencecenter uden overnatning", + "334": "Privat servicevirksomhed som frisør, vaskeri, netcafé mv.", + "339": "Anden bygning til serviceerhverv", + "390": "(UDFASES) Anden bygning til transport, handel etc", + "410": "(UDFASES) Bygning til biograf, teater, erhvervsmæssig udstilling, bibliotek, museum, kirke o. lign.", + "411": "Biograf, teater, koncertsted mv.", + "412": "Museum", + "413": "Bibliotek", + "414": "Kirke eller anden bygning til trosudøvelse for statsanerkendte trossamfund", + "415": "Forsamlingshus", + "416": "Forlystelsespark", + "419": "Anden bygning til kulturelle formål", + "420": "(UDFASES) Bygning til undervisning og forskning (skole, gymnasium, forskningslabratorium o.lign.).", + "421": "Grundskole", + "422": "Universitet", + "429": "Anden bygning til undervisning og forskning", + "430": "(UDFASES) Bygning til hospital, sygehjem, fødeklinik o. lign.", + "431": "Hospital og sygehus", + "432": "Hospice, behandlingshjem mv.", + "433": "Sundhedscenter, lægehus, fødeklinik mv.", + "439": "Anden bygning til sundhedsformål", + "440": "(UDFASES) Bygning til daginstitution", + "441": "Daginstitution", + "442": "Servicefunktion på døgninstitution", + "443": "Kaserne", + "444": "Fængsel, arresthus mv.", + "449": "Anden bygning til institutionsformål", + "490": "(UDFASES) Bygning til anden institution, herunder kaserne, fængsel o. lign.", + "510": "Sommerhus", + "520": "(UDFASES) Bygning til feriekoloni, vandrehjem o.lign. bortset fra sommerhus", + "521": "Feriecenter, center til campingplads mv.", + "522": "Bygning med ferielejligheder til erhvervsmæssig udlejning", + "523": "Bygning med ferielejligheder til eget brug", + "529": "Anden bygning til ferieformål", + "530": "(UDFASES) Bygning i forbindelse med idrætsudøvelse (klubhus, idrætshal, svømmehal o. lign.)", + "531": "Klubhus i forbindelse med fritid og idræt", + "532": "Svømmehal", + "533": "Idrætshal", + "534": "Tribune i forbindelse med stadion", + "535": "Bygning til træning og opstaldning af heste", + "539": "Anden bygning til idrætformål", + "540": "Kolonihavehus", + "585": "Anneks i tilknytning til fritids- og sommerhus", + "590": "Anden bygning til fritidsformål", + "910": "Garage (med plads til et eller to køretøjer)", + "920": "Carport", + "930": "Udhus", + "940": "Drivhus", + "950": "Fritliggende overdækning", + "960": "Fritliggende udestue", + "970": "Tiloversbleven landbrugsbygning", + "990": "Faldefærdig bygning", + "999": "Ukendt bygning", +} # floor types -label_bbr_floor_types = 'ETAGER_AFVIG_KODE' +label_bbr_floor_types = "ETAGER_AFVIG_KODE" dict_bbr_floor_type_codes = { - '0': 'Bygningen har ikke afvigende etager', - '10': 'Bygningen har afvigende etager', - '11': 'Bygningen indeholder hems', - '12': 'Bygningen indeholder dobbelt højt rum', - '13': 'Bygningen indeholder indskudt etage' - } - + "0": "Bygningen har ikke afvigende etager", + "10": "Bygningen har afvigende etager", + "11": "Bygningen indeholder hems", + "12": "Bygningen indeholder dobbelt højt rum", + "13": "Bygningen indeholder indskudt etage", +} + # all codes bbr_codes = { @@ -471,34 +475,34 @@ bbr_codes = { label_bbr_heating_system: dict_bbr_heating_system_codes, label_bbr_bygningpunkt_koorsys: label_bbr_bygningpunkt_koorsys_codes, label_bbr_building_uses: dict_bbr_building_use_codes, - label_bbr_floor_types: dict_bbr_floor_type_codes - } + label_bbr_floor_types: dict_bbr_floor_type_codes, +} # BBR labels - + # label under which the building id can be found in the building entrance obj. -label_bbr_opgang_id = 'Opgang_id' +label_bbr_opgang_id = "Opgang_id" -label_bbr_entrance_id = 'AdgAdr_id' +label_bbr_entrance_id = "AdgAdr_id" -label_bbr_building_id = 'Bygning_id' +label_bbr_building_id = "Bygning_id" -label_bbr_bygningpunkt = 'bygningspunkt' +label_bbr_bygningpunkt = "bygningspunkt" -label_bbr_bygningpunkt_coord = 'koordinater' +label_bbr_bygningpunkt_coord = "koordinater" -label_bbr_opgang_id = 'Opgang_id' +label_bbr_opgang_id = "Opgang_id" -label_bbr_entrance_id = 'AdgAdr_id' +label_bbr_entrance_id = "AdgAdr_id" -label_bbr_building_id = 'Bygning_id' +label_bbr_building_id = "Bygning_id" -label_bbr_building_area = 'BYG_BOLIG_ARL_SAML' +label_bbr_building_area = "BYG_BOLIG_ARL_SAML" -label_bbr_housing_area = 'BYG_BEBYG_ARL' +label_bbr_housing_area = "BYG_BEBYG_ARL" -label_bbr_number_floors = 'ETAGER_ANT' +label_bbr_number_floors = "ETAGER_ANT" list_labels_bbr = [ label_bbr_building_id, @@ -508,286 +512,280 @@ list_labels_bbr = [ label_bbr_building_uses, label_bbr_number_floors, label_bbr_floor_types, - label_bbr_extra_heating - ] + label_bbr_extra_heating, +] # ***************************************************************************** # ***************************************************************************** -def get_bbr_building_data_geodataframe( - building_entrance_ids: list, - selected_bbr_bdg_entrance_labels: list = SELECT_BBR_BDG_ENTR_LABELS, - selected_bbr_building_labels: list = SELECT_BBR_BDG_LABELS, - selected_bbr_building_point_labels: list = SELECT_BBR_BDG_POINT_LABELS - ) -> Tuple[GeoDataFrame,list]: +def get_bbr_building_data_geodataframe( + building_entrance_ids: list, + selected_bbr_bdg_entrance_labels: list = SELECT_BBR_BDG_ENTR_LABELS, + selected_bbr_building_labels: list = SELECT_BBR_BDG_LABELS, + selected_bbr_building_point_labels: list = SELECT_BBR_BDG_POINT_LABELS, +) -> Tuple[GeoDataFrame, list]: # ************************************************************************* # ************************************************************************* - + # get data about building entrances - + dict_building_entrances, list_failures = fetch_building_entrance_data( building_entrance_ids - ) - + ) + if selected_bbr_bdg_entrance_labels == None: - # includes all labels - + selected_bbr_bdg_entrance_labels = BBR_BDG_ENTR_LABELS - + list_entries = [ - [value[bbr_key] - for bbr_key in value - if bbr_key in selected_bbr_bdg_entrance_labels] - for key, value in dict_building_entrances.items()] - + [ + value[bbr_key] + for bbr_key in value + if bbr_key in selected_bbr_bdg_entrance_labels + ] + for key, value in dict_building_entrances.items() + ] + df_building_entrances = DataFrame( data=list_entries, columns=selected_bbr_bdg_entrance_labels, - index=dict_building_entrances.keys() - ) + index=dict_building_entrances.keys(), + ) # ************************************************************************* # ************************************************************************* - + # get data about buildings - - dict_buildings = fetch_building_data( - df_building_entrances.index - ) - + + dict_buildings = fetch_building_data(df_building_entrances.index) + if selected_bbr_building_labels == None: - # includes all labels - + selected_bbr_building_labels = BBR_BDG_LABELS - + # create dataframe with building data - + list_entries = [ - [value[bbr_key] - for bbr_key in value - if bbr_key in selected_bbr_building_labels] - for key, value in dict_buildings.items()] - + [value[bbr_key] for bbr_key in value if bbr_key in selected_bbr_building_labels] + for key, value in dict_buildings.items() + ] + df_buildings = DataFrame( data=list_entries, columns=selected_bbr_building_labels, - index=dict_buildings.keys() - ) + index=dict_buildings.keys(), + ) # ************************************************************************* # ************************************************************************* - + # get building point data - + if selected_bbr_building_point_labels == None: - # includes all labels - + selected_bbr_building_point_labels = BBR_BDG_POINT_LABELS - + dict_buildings_points = { - building_entrance_id: #( - dict_buildings[ - building_entrance_id][ - label_bbr_bygningpunkt] - #if building_entrance_id in dict_buildings else None) + building_entrance_id: dict_buildings[building_entrance_id][ # ( + label_bbr_bygningpunkt + ] + # if building_entrance_id in dict_buildings else None) for building_entrance_id in dict_building_entrances - if building_entrance_id in dict_buildings # excludes failures - } - + if building_entrance_id in dict_buildings # excludes failures + } + # create dataframe with building point data - + list_entries = [ - [value[bbr_key] - for bbr_key in value - if bbr_key in selected_bbr_building_point_labels] - for key, value in dict_buildings_points.items()] - + [ + value[bbr_key] + for bbr_key in value + if bbr_key in selected_bbr_building_point_labels + ] + for key, value in dict_buildings_points.items() + ] + df_building_points = DataFrame( data=list_entries, columns=selected_bbr_building_point_labels, - index=dict_buildings_points.keys() - ) - + index=dict_buildings_points.keys(), + ) + # merge all three, two at a time - - df_buildings = merge(df_buildings, - df_building_points, - right_index=True, - left_index=True, - suffixes=(None,"_x")) # adds "_x" to duplicate columns - - df_buildings = merge(df_buildings, - df_building_entrances, - right_index=True, - left_index=True, - suffixes=(None,"_y")) # adds "_y" to duplicate columns + + df_buildings = merge( + df_buildings, + df_building_points, + right_index=True, + left_index=True, + suffixes=(None, "_x"), + ) # adds "_x" to duplicate columns + + df_buildings = merge( + df_buildings, + df_building_entrances, + right_index=True, + left_index=True, + suffixes=(None, "_y"), + ) # adds "_y" to duplicate columns # ************************************************************************* # ************************************************************************* - + # create a geodataframe whose geometry is that of building points - + # specify the coordinate system - - coordinate_system = "EPSG:4326" # latitude, longitude - - key_bbr_coordinate_system = 5 # WGS 84 = EPSG:4326 - + + coordinate_system = "EPSG:4326" # latitude, longitude + + key_bbr_coordinate_system = 5 # WGS 84 = EPSG:4326 + # raise an error if different coordinates systems are being used - + for building_entrance_id in dict_building_entrances: - - if dict_buildings[building_entrance_id][ - label_bbr_bygningpunkt][ - label_bbr_bygningpunkt_koorsys] != key_bbr_coordinate_system: - - raise NotImplementedError('Only WGS 84 coordinates can be used.') - + if ( + dict_buildings[building_entrance_id][label_bbr_bygningpunkt][ + label_bbr_bygningpunkt_koorsys + ] + != key_bbr_coordinate_system + ): + raise NotImplementedError("Only WGS 84 coordinates can be used.") + # create a dictionary with the building point geometries (i.e. points) - + dict_building_point_geometry = { building_entrance_id: Point( - dict_buildings[building_entrance_id][ - label_bbr_bygningpunkt][ - label_bbr_bygningpunkt_coord] - ) + dict_buildings[building_entrance_id][label_bbr_bygningpunkt][ + label_bbr_bygningpunkt_coord + ] + ) for building_entrance_id in dict_building_entrances - if dict_buildings[building_entrance_id][ - label_bbr_bygningpunkt][ - label_bbr_bygningpunkt_koorsys] == key_bbr_coordinate_system - } - + if dict_buildings[building_entrance_id][label_bbr_bygningpunkt][ + label_bbr_bygningpunkt_koorsys + ] + == key_bbr_coordinate_system + } + # create geodataframe - + gdf_buildings = GeoDataFrame( data=df_buildings, geometry=GeoSeries(data=dict_building_point_geometry), - crs=coordinate_system - ) - + crs=coordinate_system, + ) + return gdf_buildings, list_failures - + # ************************************************************************* # ************************************************************************* + # ***************************************************************************** # ***************************************************************************** -def fetch_building_entrance_data(building_entrance_ids: list) -> Tuple[dict, - list]: +def fetch_building_entrance_data(building_entrance_ids: list) -> Tuple[dict, list]: # ************************************************************************* # ************************************************************************* - + # retrieve data about each node identified through OSM - + dict_building_entrances = {} - + list_failures = [] - + # # determine the number of osm entries - + # number_building_entrance_ids = len(building_entrance_ids) - + # for each building entrance id - - for building_entrance_id in building_entrance_ids: - + + for building_entrance_id in building_entrance_ids: # compose the url from which to get bbr data associated with the id - - _url = url_prefix_entrance+building_entrance_id - + + _url = url_prefix_entrance + building_entrance_id + try: - # retrieve the building entrance data - + with urllib.request.urlopen(_url) as response: - # parse the data - - bbr_entrance_data_json = json.loads( - response.read().decode('utf-8') - ) - + + bbr_entrance_data_json = json.loads(response.read().decode("utf-8")) + # store the data - + if len(bbr_entrance_data_json) != 0: - for bbr_entry in bbr_entrance_data_json: - dict_building_entrances[ bbr_entry[label_bbr_building_id] - ] = bbr_entry - + ] = bbr_entry + else: - list_failures.append(building_entrance_id) - + response.close() - + except Exception: - response.close() - + # ************************************************************************* # ************************************************************************* - + return dict_building_entrances, list_failures - + # ************************************************************************* # ************************************************************************* + # ***************************************************************************** # ***************************************************************************** + def fetch_building_data(building_codes: list): - # ************************************************************************* # ************************************************************************* - + # get data about each specific building - + dict_buildings = {} - - # for each building id - + + # for each building id + for building_id in building_codes: - # compose a url with it - - _url = url_prefix_buildings+building_id - + + _url = url_prefix_buildings + building_id + # try statement - + try: - # retrieve that data - + with urllib.request.urlopen(_url) as response: - # parse the data - - bbr_data = json.loads(response.read().decode('utf-8')) - + + bbr_data = json.loads(response.read().decode("utf-8")) + dict_buildings[building_id] = bbr_data - + response.close() - + except Exception: - response.close() - + # ************************************************************************* # ************************************************************************* - + return dict_buildings - + # ************************************************************************* # ************************************************************************* + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/src/topupopt/data/buildings/dk/heat.py b/src/topupopt/data/buildings/dk/heat.py index 3b35b05..c492c29 100644 --- a/src/topupopt/data/buildings/dk/heat.py +++ b/src/topupopt/data/buildings/dk/heat.py @@ -12,22 +12,19 @@ from .bbr import label_bbr_entrance_id, label_bbr_housing_area # labels -selected_bbr_adgang_labels = [ - "Opgang_id", - "AdgAdr_id", - "Bygning_id"] +selected_bbr_adgang_labels = ["Opgang_id", "AdgAdr_id", "Bygning_id"] selected_bbr_building_point_labels = [ "KoorOest", "KoorNord", "KoorSystem", - #"koordinater" # corresponds to a list, which cannot be written to a file - ] + # "koordinater" # corresponds to a list, which cannot be written to a file +] selected_bbr_building_labels = [ "BYG_ANVEND_KODE", - "OPFOERELSE_AAR", # new - "OMBYG_AAR", # new + "OPFOERELSE_AAR", # new + "OMBYG_AAR", # new "BYG_ARL_SAML", "BYG_BOLIG_ARL_SAML", "ERHV_ARL_SAML", @@ -47,145 +44,138 @@ selected_bbr_building_labels = [ "VARMEINSTAL_KODE", "OPVARMNING_KODE", "VARME_SUPPL_KODE", - "BygPkt_id"] + "BygPkt_id", +] # label under which building entrance ids can be found in OSM -label_osm_entrance_id = 'osak:identifier' +label_osm_entrance_id = "osak:identifier" # ***************************************************************************** # ***************************************************************************** + def heat_demand_dict_by_building_entrance( - gdf_osm: GeoDataFrame, - gdf_buildings: GeoDataFrame, - number_intervals: int, - time_interval_durations: list, - bdg_specific_demand: dict, - bdg_ratio_min_max: dict, - bdg_demand_phase_shift: dict = None, - key_osm_entr_id: str = label_osm_entrance_id, - key_bbr_entr_id: str = label_bbr_entrance_id, - avg_state: list = None, - state_correlates_with_output: bool = False - ) -> dict: - + gdf_osm: GeoDataFrame, + gdf_buildings: GeoDataFrame, + number_intervals: int, + time_interval_durations: list, + bdg_specific_demand: dict, + bdg_ratio_min_max: dict, + bdg_demand_phase_shift: dict = None, + key_osm_entr_id: str = label_osm_entrance_id, + key_bbr_entr_id: str = label_bbr_entrance_id, + avg_state: list = None, + state_correlates_with_output: bool = False, +) -> dict: # initialise dict for each building entrance - + demand_dict = {} - + # for each building entrance - + for osm_index in gdf_osm.index: - # initialise dict for each building consumption point - + heat_demand_profiles = [] - + # find the indexes for each building leading to the curr. cons. point - - building_indexes = ( - gdf_buildings[ - gdf_buildings[key_bbr_entr_id] == - gdf_osm.loc[osm_index][key_osm_entr_id] - ].index - ) - + + building_indexes = gdf_buildings[ + gdf_buildings[key_bbr_entr_id] == gdf_osm.loc[osm_index][key_osm_entr_id] + ].index + # for each building - + for building_index in building_indexes: - # get relevant data - + # base_load_avg_ratio = 0.3 - + # specific_demand = 107 # kWh/m2/year - + area = gdf_buildings.loc[building_index][label_bbr_housing_area] - + # estimate its demand - + if type(avg_state) == type(None): - # ignore states - + heat_demand_profiles.append( np.array( discrete_sinusoid_matching_integral( - bdg_specific_demand[building_index]*area, - time_interval_durations=time_interval_durations, + bdg_specific_demand[building_index] * area, + time_interval_durations=time_interval_durations, bdg_ratio_min_max=bdg_ratio_min_max[building_index], phase_shift_radians=( bdg_demand_phase_shift[building_index] - # bdg_demand_phase_shift_amplitude*np.random.random() - # if (type(bdg_demand_phase_shift_amplitude) == + # bdg_demand_phase_shift_amplitude*np.random.random() + # if (type(bdg_demand_phase_shift_amplitude) == # type(None)) else None - ) - ) + ), ) ) - + ) + else: - # states matter - + heat_demand_profiles.append( np.array( create_profile_using_time_weighted_state( integration_result=( - bdg_specific_demand[building_index]*area - ), - avg_state=avg_state, - time_interval_durations=time_interval_durations, + bdg_specific_demand[building_index] * area + ), + avg_state=avg_state, + time_interval_durations=time_interval_durations, bdg_ratio_min_max=bdg_ratio_min_max[building_index], - state_correlates_with_output=state_correlates_with_output - ) + state_correlates_with_output=state_correlates_with_output, ) ) - + ) + # ***************************************************************** - + # add the profiles, time step by time step if len(heat_demand_profiles) == 0: final_profile = [] else: - final_profile = sum(profile - for profile in heat_demand_profiles) - + final_profile = sum(profile for profile in heat_demand_profiles) + # ********************************************************************* - + # store the demand profile demand_dict[osm_index] = final_profile - + # ********************************************************************* - + # return return demand_dict + # ***************************************************************************** # ***************************************************************************** + def total_heating_area( - gdf_osm: GeoDataFrame, - gdf_buildings: GeoDataFrame, - key_osm_entr_id: str = label_osm_entrance_id, - key_bbr_entr_id: str = label_bbr_entrance_id - ) -> float: - + gdf_osm: GeoDataFrame, + gdf_buildings: GeoDataFrame, + key_osm_entr_id: str = label_osm_entrance_id, + key_bbr_entr_id: str = label_bbr_entrance_id, +) -> float: area = 0 for osm_index in gdf_osm.index: # find the indexes for each building leading to the curr. cons. point - building_indexes = ( - gdf_buildings[ - gdf_buildings[label_bbr_entrance_id] == - gdf_osm.loc[osm_index][label_osm_entrance_id] - ].index - ) + building_indexes = gdf_buildings[ + gdf_buildings[label_bbr_entrance_id] + == gdf_osm.loc[osm_index][label_osm_entrance_id] + ].index # for each building for building_index in building_indexes: # get relevant data area += gdf_buildings.loc[building_index][label_bbr_housing_area] return area + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/src/topupopt/data/dhn/__init__.py b/src/topupopt/data/dhn/__init__.py index 633f866..40a96af 100644 --- a/src/topupopt/data/dhn/__init__.py +++ b/src/topupopt/data/dhn/__init__.py @@ -1,2 +1 @@ # -*- coding: utf-8 -*- - diff --git a/src/topupopt/data/dhn/network.py b/src/topupopt/data/dhn/network.py index 7e0d4d1..14f31be 100644 --- a/src/topupopt/data/dhn/network.py +++ b/src/topupopt/data/dhn/network.py @@ -24,29 +24,29 @@ from ...data.finance.utils import ArcInvestments # constants -KEY_DHT_OPTIONS_OBJ = 'trench' -KEY_DHT_LENGTH = 'length' -KEY_DHT_UCF = 'capacity_unit_conversion_factor' -KEY_HHT_DHT_PIPES = 'pipes' -KEY_HHT_STD_PIPES = 'pipe_tuple' +KEY_DHT_OPTIONS_OBJ = "trench" +KEY_DHT_LENGTH = "length" +KEY_DHT_UCF = "capacity_unit_conversion_factor" +KEY_HHT_DHT_PIPES = "pipes" +KEY_HHT_STD_PIPES = "pipe_tuple" # ***************************************************************************** # ***************************************************************************** + class PipeTrenchOptions(ArcsWithoutProportionalLosses): "A class for defining investments in district heating trenches." - + def __init__( - self, - trench: SupplyReturnPipeTrench, - name: str, - length: float, - specific_capacity_cost: float or list = None, - minimum_cost: list or tuple = None, # default: pipes - capacity_is_instantaneous: bool = False, - unit_conversion_factor: float = 1.0, - ): - + self, + trench: SupplyReturnPipeTrench, + name: str, + length: float, + specific_capacity_cost: float or list = None, + minimum_cost: list or tuple = None, # default: pipes + capacity_is_instantaneous: bool = False, + unit_conversion_factor: float = 1.0, + ): # store the unit conversion self.unit_conversion_factor = unit_conversion_factor # keep the trench object @@ -54,36 +54,33 @@ class PipeTrenchOptions(ArcsWithoutProportionalLosses): # keep the trench length self.length = ( [length for i in range(trench.number_options())] - if trench.vector_mode else - length - ) + if trench.vector_mode + else length + ) # determine the rated heat capacity - rhc = trench.rated_heat_capacity( - unit_conversion_factor=unit_conversion_factor - ) + rhc = trench.rated_heat_capacity(unit_conversion_factor=unit_conversion_factor) # initialise the object using the mother class ArcsWithoutProportionalLosses.__init__( - self, - name=name, - static_loss=None, - capacity=[rhc] if isinstance(rhc, Real) else rhc, - minimum_cost=minimum_cost, + self, + name=name, + static_loss=None, + capacity=[rhc] if isinstance(rhc, Real) else rhc, + minimum_cost=minimum_cost, specific_capacity_cost=( 0 - if type(specific_capacity_cost) == type(None) else - specific_capacity_cost - ), - capacity_is_instantaneous=False - ) + if type(specific_capacity_cost) == type(None) + else specific_capacity_cost + ), + capacity_is_instantaneous=False, + ) # initialise the minimum cost if type(minimum_cost) == type(None): self.set_minimum_cost() # ************************************************************************* # ************************************************************************* - - def set_minimum_cost(self, minimum_cost = None): - + + def set_minimum_cost(self, minimum_cost=None): # minimum arc cost # if no external minimum cost list was provided, calculate it if type(minimum_cost) == type(None): @@ -91,22 +88,21 @@ class PipeTrenchOptions(ArcsWithoutProportionalLosses): if self.trench.vector_mode: # multiple options self.minimum_cost = tuple( - (pipe.sp*length # twin pipes: one twin pipe - if self.trench.twin_pipes else - pipe.sp*length*2) # single pipes: two single pipes - for pipe, length in zip( - self.trench.supply_pipe, - self.length - ) - ) - else: # only one option - self.minimum_cost = (self.trench.supply_pipe.sp*self.length,) - else: # use an external minimum cost + ( + pipe.sp * length # twin pipes: one twin pipe + if self.trench.twin_pipes + else pipe.sp * length * 2 + ) # single pipes: two single pipes + for pipe, length in zip(self.trench.supply_pipe, self.length) + ) + else: # only one option + self.minimum_cost = (self.trench.supply_pipe.sp * self.length,) + else: # use an external minimum cost self.minimum_cost = tuple(minimum_cost) - + # ************************************************************************* # ************************************************************************* - + def set_capacity(self, **kwargs): # retrieve the rated heat capacity rhc = self.trench.rated_heat_capacity(**kwargs) @@ -116,56 +112,52 @@ class PipeTrenchOptions(ArcsWithoutProportionalLosses): else: # one option, rhc is one value self.capacity = (rhc,) - + # ************************************************************************* # ************************************************************************* - + def set_static_losses( - self, - scenario_key, - ground_thermal_conductivity: float or list, - ground_air_heat_transfer_coefficient: float or list, - time_interval_duration: float or list, - temperature_surroundings: float or list, - length: float or list = None, - unit_conversion_factor: float = None, - **kwargs): - + self, + scenario_key, + ground_thermal_conductivity: float or list, + ground_air_heat_transfer_coefficient: float or list, + time_interval_duration: float or list, + temperature_surroundings: float or list, + length: float or list = None, + unit_conversion_factor: float = None, + **kwargs + ): hts = self.trench.heat_transfer_surroundings( ground_thermal_conductivity=ground_thermal_conductivity, - ground_air_heat_transfer_coefficient=( - ground_air_heat_transfer_coefficient), + ground_air_heat_transfer_coefficient=(ground_air_heat_transfer_coefficient), time_interval_duration=time_interval_duration, temperature_surroundings=temperature_surroundings, - length=( - self.length - if type(length) == type(None) else - length - ), + length=(self.length if type(length) == type(None) else length), unit_conversion_factor=( - self.unit_conversion_factor - if type(unit_conversion_factor) == type(None) else - unit_conversion_factor - ), - **kwargs) - + self.unit_conversion_factor + if type(unit_conversion_factor) == type(None) + else unit_conversion_factor + ), + **kwargs + ) + if self.trench.vector_mode: # multiple options: hts is a vector - if (hasattr(self, "static_loss") and - type(self.static_loss) != type(None)): + if hasattr(self, "static_loss") and type(self.static_loss) != type(None): # update the static loss dictionary if type(hts[0]) == list: # multiple time intervals - self.static_loss.update({ - (h, scenario_key, k): hts[h][k] - for h, hts_h in enumerate(hts) - for k, hts_hk in enumerate(hts_h) - }) - else: # not a list: one time interval - self.static_loss.update({ - (h, scenario_key, 0): hts[h] - for h, hts_h in enumerate(hts) - }) + self.static_loss.update( + { + (h, scenario_key, k): hts[h][k] + for h, hts_h in enumerate(hts) + for k, hts_hk in enumerate(hts_h) + } + ) + else: # not a list: one time interval + self.static_loss.update( + {(h, scenario_key, 0): hts[h] for h, hts_h in enumerate(hts)} + ) else: # no static loss dictionary, create it if type(hts[0]) == list: @@ -174,59 +166,52 @@ class PipeTrenchOptions(ArcsWithoutProportionalLosses): (h, scenario_key, k): hts[h][k] for h, hts_h in enumerate(hts) for k, hts_hk in enumerate(hts_h) - } - else: # not a list: one time interval + } + else: # not a list: one time interval self.static_loss = { - (h, scenario_key, 0): hts[h] - for h, hts_h in enumerate(hts) - } + (h, scenario_key, 0): hts[h] for h, hts_h in enumerate(hts) + } else: # one option: hts might be a number - if (hasattr(self, "static_loss") and - type(self.static_loss) != type(None)): + if hasattr(self, "static_loss") and type(self.static_loss) != type(None): # update the static loss dictionary if not isinstance(hts, Real): # multiple time intervals - self.static_loss.update({ - (0, scenario_key, k): hts[k] - for k, hts_k in enumerate(hts) - }) - else: # not a list: one time interval - self.static_loss.update({ - (0, scenario_key, 0): hts - }) + self.static_loss.update( + {(0, scenario_key, k): hts[k] for k, hts_k in enumerate(hts)} + ) + else: # not a list: one time interval + self.static_loss.update({(0, scenario_key, 0): hts}) else: # no static loss dictionary, create it if not isinstance(hts, Real): # multiple time intervals self.static_loss = { - (0, scenario_key, k): hts_k - for k, hts_k in enumerate(hts) - } - else: # not a list: one time interval - self.static_loss = { - (0, scenario_key, 0): hts - } + (0, scenario_key, k): hts_k for k, hts_k in enumerate(hts) + } + else: # not a list: one time interval + self.static_loss = {(0, scenario_key, 0): hts} + # ***************************************************************************** # ***************************************************************************** + class PipeTrenchInvestments(ArcInvestments, PipeTrenchOptions): "A class for defining investments in district heating trenches." - + def __init__( - self, - trench: SupplyReturnPipeTrench, - name: str, - length: float, - investments: tuple, - static_loss: dict = None, - specific_capacity_cost: float or list = None, - capacity_is_instantaneous: bool = False, - unit_conversion_factor: float = 1.0, - **kwargs - ): - + self, + trench: SupplyReturnPipeTrench, + name: str, + length: float, + investments: tuple, + static_loss: dict = None, + specific_capacity_cost: float or list = None, + capacity_is_instantaneous: bool = False, + unit_conversion_factor: float = 1.0, + **kwargs + ): # store the unit conversion self.unit_conversion_factor = unit_conversion_factor # keep the trench object @@ -234,36 +219,34 @@ class PipeTrenchInvestments(ArcInvestments, PipeTrenchOptions): # keep the trench length self.length = ( [length for i in range(trench.number_options())] - if trench.vector_mode else - length - ) + if trench.vector_mode + else length + ) # determine the rated heat capacity - rhc = trench.rated_heat_capacity( - unit_conversion_factor=unit_conversion_factor - ) + rhc = trench.rated_heat_capacity(unit_conversion_factor=unit_conversion_factor) # initialise the object using the mother class ArcInvestments.__init__( - self, - investments=investments, - name=name, - efficiency=None, - efficiency_reverse=None, + self, + investments=investments, + name=name, + efficiency=None, + efficiency_reverse=None, static_loss=static_loss, capacity=[rhc] if isinstance(rhc, Real) else rhc, specific_capacity_cost=( 0 - if type(specific_capacity_cost) == type(None) else - specific_capacity_cost - ), - capacity_is_instantaneous=False, - validate=False - ) + if type(specific_capacity_cost) == type(None) + else specific_capacity_cost + ), + capacity_is_instantaneous=False, + validate=False, + ) # # ************************************************************************* # # ************************************************************************* - + # def set_minimum_cost(self, minimum_cost = None): - + # # minimum arc cost # # if no external minimum cost list was provided, calculate it # if type(minimum_cost) == type(None): @@ -272,10 +255,10 @@ class PipeTrenchInvestments(ArcInvestments, PipeTrenchOptions): # # multiple options # self.minimum_cost = tuple( # (pipe.sp*length # twin pipes: one twin pipe - # if self.trench.twin_pipes else + # if self.trench.twin_pipes else # pipe.sp*length*2) # single pipes: two single pipes # for pipe, length in zip( - # self.trench.supply_pipe, + # self.trench.supply_pipe, # self.length # ) # ) @@ -283,10 +266,10 @@ class PipeTrenchInvestments(ArcInvestments, PipeTrenchOptions): # self.minimum_cost = (self.trench.supply_pipe.sp*self.length,) # else: # use an external minimum cost # self.minimum_cost = tuple(minimum_cost) - + # # ************************************************************************* # # ************************************************************************* - + # def set_capacity(self, **kwargs): # # retrieve the rated heat capacity # rhc = self.trench.rated_heat_capacity(**kwargs) @@ -296,12 +279,12 @@ class PipeTrenchInvestments(ArcInvestments, PipeTrenchOptions): # else: # # one option, rhc is one value # self.capacity = (rhc,) - + # # ************************************************************************* # # ************************************************************************* - + # def set_static_losses( - # self, + # self, # scenario_key, # ground_thermal_conductivity: float or list, # ground_air_heat_transfer_coefficient: float or list, @@ -310,7 +293,7 @@ class PipeTrenchInvestments(ArcInvestments, PipeTrenchOptions): # length: float or list = None, # unit_conversion_factor: float = None, # **kwargs): - + # hts = self.trench.heat_transfer_surroundings( # ground_thermal_conductivity=ground_thermal_conductivity, # ground_air_heat_transfer_coefficient=( @@ -318,20 +301,20 @@ class PipeTrenchInvestments(ArcInvestments, PipeTrenchOptions): # time_interval_duration=time_interval_duration, # temperature_surroundings=temperature_surroundings, # length=( - # self.length - # if type(length) == type(None) else + # self.length + # if type(length) == type(None) else # length # ), # unit_conversion_factor=( - # self.unit_conversion_factor - # if type(unit_conversion_factor) == type(None) else + # self.unit_conversion_factor + # if type(unit_conversion_factor) == type(None) else # unit_conversion_factor # ), # **kwargs) - + # if self.trench.vector_mode: # # multiple options: hts is a vector - # if (hasattr(self, "static_loss") and + # if (hasattr(self, "static_loss") and # type(self.static_loss) != type(None)): # # update the static loss dictionary # if type(hts[0]) == list: @@ -362,7 +345,7 @@ class PipeTrenchInvestments(ArcInvestments, PipeTrenchOptions): # } # else: # # one option: hts might be a number - # if (hasattr(self, "static_loss") and + # if (hasattr(self, "static_loss") and # type(self.static_loss) != type(None)): # # update the static loss dictionary # if not isinstance(hts, Real): @@ -387,21 +370,25 @@ class PipeTrenchInvestments(ArcInvestments, PipeTrenchOptions): # self.static_loss = { # (0, scenario_key, 0): hts # } - + + # ***************************************************************************** # ***************************************************************************** + class ExistingPipeTrench(PipeTrenchOptions): "A class for existing pipe trenches." - + def __init__(self, option_selected: int, **kwargs): # initialise PipeTrenchOptions.__init__( self, - minimum_cost=[0 for i in range(kwargs['trench'].number_options())], - **kwargs) + minimum_cost=[0 for i in range(kwargs["trench"].number_options())], + **kwargs + ) # define the option that already exists self.options_selected[option_selected] = True + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/src/topupopt/data/dhn/utils.py b/src/topupopt/data/dhn/utils.py index 8ae75f3..1957ebe 100644 --- a/src/topupopt/data/dhn/utils.py +++ b/src/topupopt/data/dhn/utils.py @@ -15,12 +15,12 @@ from ...problems.esipp.network import Network from .network import PipeTrenchOptions from topupheat.pipes.trenches import SupplyReturnPipeTrench from numbers import Real - + # ***************************************************************************** # ***************************************************************************** - -def cost_pipes(trench: SupplyReturnPipeTrench, - length: float or tuple) -> tuple: + + +def cost_pipes(trench: SupplyReturnPipeTrench, length: float or tuple) -> tuple: """ Returns the costs of each trench option for a given trench length. @@ -45,84 +45,87 @@ def cost_pipes(trench: SupplyReturnPipeTrench, # use the specific pipe cost that features in the database if trench.vector_mode: # multiple options - if (type(length) == tuple and - len(length) == trench.number_options()): + if type(length) == tuple and len(length) == trench.number_options(): # multiple trench lengths return tuple( - (pipe.sp*length # twin pipes: one twin pipe - if trench.twin_pipes else - pipe.sp*length*2) # single pipes: two single pipes + ( + pipe.sp * length # twin pipes: one twin pipe + if trench.twin_pipes + else pipe.sp * length * 2 + ) # single pipes: two single pipes for pipe, length in zip(trench.supply_pipe, length) - ) + ) elif isinstance(length, Real): # one trench length return tuple( - (pipe.sp*length # twin pipes: one twin pipe - if trench.twin_pipes else - pipe.sp*length*2) # single pipes: two single pipes + ( + pipe.sp * length # twin pipes: one twin pipe + if trench.twin_pipes + else pipe.sp * length * 2 + ) # single pipes: two single pipes for pipe in trench.supply_pipe - ) + ) else: - raise ValueError('Unrecognised input combination.') - elif (not trench.vector_mode and isinstance(length, Real)): + raise ValueError("Unrecognised input combination.") + elif not trench.vector_mode and isinstance(length, Real): # only one option - return (trench.supply_pipe.sp*length,) - else: # only one option - raise ValueError('Unrecognised input combination.') - + return (trench.supply_pipe.sp * length,) + else: # only one option + raise ValueError("Unrecognised input combination.") + # # keep the trench length # self.length = ( # [length for i in range(trench.number_options())] - # if trench.vector_mode else + # if trench.vector_mode else # length # ) - + + # ***************************************************************************** # ***************************************************************************** + def summarise_network_by_pipe_technology( - network: Network, - print_output: bool = False - ) -> dict: + network: Network, print_output: bool = False +) -> dict: "A method to summarise a network by pipe technology." - + # ************************************************************************* # ************************************************************************* - + # create a dictionary that compiles the lengths of each arc technology - + length_dict = {} - + # ************************************************************************* # ************************************************************************* - + # for each arc for arc_key in network.edges(keys=True): # check if it is a PipeTrench object if not isinstance( - network.edges[arc_key][Network.KEY_ARC_TECH], - PipeTrenchOptions - ): + network.edges[arc_key][Network.KEY_ARC_TECH], PipeTrenchOptions + ): # if not, skip arc continue - + # for each arc technology option for h, tech_option in enumerate( - network.edges[arc_key][Network.KEY_ARC_TECH].options_selected - ): + network.edges[arc_key][Network.KEY_ARC_TECH].options_selected + ): # check if the tech option was selected if tech_option: # technology option was selected # get the length of the arc arc_length = ( network.edges[arc_key][Network.KEY_ARC_TECH].length[h] - if type(network.edges[arc_key][ - Network.KEY_ARC_TECH].length) == list else - network.edges[arc_key][Network.KEY_ARC_TECH].length - ) + if type(network.edges[arc_key][Network.KEY_ARC_TECH].length) == list + else network.edges[arc_key][Network.KEY_ARC_TECH].length + ) # identify the option tech_option_label = network.edges[arc_key][ - Network.KEY_ARC_TECH].trench.printable_description(h) + Network.KEY_ARC_TECH + ].trench.printable_description(h) # if the arc technology has been previously selected... if tech_option_label in length_dict: # ...increment the total length @@ -130,158 +133,158 @@ def summarise_network_by_pipe_technology( else: # if not, add a new arc technology to the dictionary length_dict[tech_option_label] = arc_length - + # ************************************************************************* # ************************************************************************* - - if print_output: - print('printing the arc technologies selected by pipe size...') + + if print_output: + print("printing the arc technologies selected by pipe size...") for key, value in sorted( - (tech, length) - for tech, length in length_dict.items() - ): - print(str(key)+': '+str(value)) - print('total: '+str(sum(length_dict.values()))) - + (tech, length) for tech, length in length_dict.items() + ): + print(str(key) + ": " + str(value)) + print("total: " + str(sum(length_dict.values()))) + return length_dict - + # ************************************************************************* # ************************************************************************* - + + # ***************************************************************************** # ***************************************************************************** -def plot_network_layout(network: Network, - include_basemap: bool = False, - figure_size: tuple = (25, 25), - min_linewidth: float = 1.0, - max_linewidth: float = 3.0, - legend_fontsize: float = 20.0, - basemap_zoom_level: float = 15, - legend_location: str = 'lower left', - legend_with_brand_model: bool = False, - legend_transparency: float = None): +def plot_network_layout( + network: Network, + include_basemap: bool = False, + figure_size: tuple = (25, 25), + min_linewidth: float = 1.0, + max_linewidth: float = 3.0, + legend_fontsize: float = 20.0, + basemap_zoom_level: float = 15, + legend_location: str = "lower left", + legend_with_brand_model: bool = False, + legend_transparency: float = None, +): # convert graph object to GDF - + _, my_gdf_arcs = ox.graph_to_gdfs(network) - + # convert to final plot CRS - + my_gdf = my_gdf_arcs.to_crs(epsg=3857) # dict: keys are the pipe tuples and the values are lists of edge keys - + arc_tech_summary_dict = {} - + # for each edge for arc_key in my_gdf.index: # check if it is a PipeTrenchOptions object if not isinstance( - network.edges[arc_key][Network.KEY_ARC_TECH], - PipeTrenchOptions - ): + network.edges[arc_key][Network.KEY_ARC_TECH], PipeTrenchOptions + ): # if not, skip arc continue - + # find the trench's description, if it was selected - - try: + + try: selected_option = ( - my_gdf[Network.KEY_ARC_TECH].loc[ - arc_key].trench.printable_description( - my_gdf[Network.KEY_ARC_TECH].loc[ - arc_key].options_selected.index(True) - ) + my_gdf[Network.KEY_ARC_TECH] + .loc[arc_key] + .trench.printable_description( + my_gdf[Network.KEY_ARC_TECH] + .loc[arc_key] + .options_selected.index(True) ) + ) except ValueError: continue - + # if the pipe tuple already exists as a key in the dict if selected_option in arc_tech_summary_dict: # append the edge_key to the list obtained via that pipe tuple key arc_tech_summary_dict[selected_option].append(arc_key) - else: # if not + else: # if not # add a new dict entry whose key is the pipe tuple and create a list arc_tech_summary_dict[selected_option] = [arc_key] - + list_sorted = sorted( (int(printable_description[2:]), printable_description) for printable_description in arc_tech_summary_dict.keys() - ) - (list_sorted_dn, - list_sorted_descriptions) = list(map(list,zip(*list_sorted))) - - list_arc_widths = [ - min_linewidth+ - (max_linewidth-min_linewidth)* - iteration/(len(list_sorted_dn)-1) - for iteration, _ in enumerate(list_sorted_dn) - ] if len(list_sorted_dn) != 1 else [(max_linewidth+min_linewidth)/2] - + ) + (list_sorted_dn, list_sorted_descriptions) = list(map(list, zip(*list_sorted))) + + list_arc_widths = ( + [ + min_linewidth + + (max_linewidth - min_linewidth) * iteration / (len(list_sorted_dn) - 1) + for iteration, _ in enumerate(list_sorted_dn) + ] + if len(list_sorted_dn) != 1 + else [(max_linewidth + min_linewidth) / 2] + ) + # ************************************************************************* # ************************************************************************* - - fig, ax = plt.subplots(1,1) - + + fig, ax = plt.subplots(1, 1) + fig.set_size_inches(*figure_size) - - for description, arc_width in zip( - list_sorted_descriptions, - list_arc_widths - ): - + + for description, arc_width in zip(list_sorted_descriptions, list_arc_widths): # prepare plot - + my_gdf.loc[arc_tech_summary_dict[description]].plot( - edgecolor='k', - legend=True, - linewidth=arc_width, - ax=ax) - + edgecolor="k", legend=True, linewidth=arc_width, ax=ax + ) + # adjust legend labels - - ax.legend(list_sorted_descriptions, - fontsize=legend_fontsize, - loc=legend_location, - framealpha=( - legend_transparency - if type(legend_transparency) != type(None) else None - ) - ) - + + ax.legend( + list_sorted_descriptions, + fontsize=legend_fontsize, + loc=legend_location, + framealpha=( + legend_transparency if type(legend_transparency) != type(None) else None + ), + ) + # add base map - + if include_basemap: - - cx.add_basemap(ax, - zoom=basemap_zoom_level, - source=cx.providers.OpenStreetMap.Mapnik, - #crs=gdf_map.crs, - ) - + cx.add_basemap( + ax, + zoom=basemap_zoom_level, + source=cx.providers.OpenStreetMap.Mapnik, + # crs=gdf_map.crs, + ) + # ************************************************************************* # ************************************************************************* - + + # ***************************************************************************** # ***************************************************************************** + def plot_heating_demand( - losses: list, - end_use_demand: list, - labels: list, - ylabel: str = 'Heating demand [MWh]', - title: str = 'Heat demand by month' - ): - + losses: list, + end_use_demand: list, + labels: list, + ylabel: str = "Heating demand [MWh]", + title: str = "Heat demand by month", +): energy_totals = { - 'Losses (optimised)': np.array(losses), - 'End use (estimated)': np.array(end_use_demand), - } + "Losses (optimised)": np.array(losses), + "End use (estimated)": np.array(end_use_demand), + } colors = { - 'Losses (optimised)': 'tab:orange', - 'End use (estimated)': 'tab:blue', - } + "Losses (optimised)": "tab:orange", + "End use (estimated)": "tab:blue", + } # width = 0.8 # the width of the bars: can also be len(x) sequence # make sure the grid lines are behind the bars @@ -290,28 +293,28 @@ def plot_heating_demand( fig, ax = plt.subplots() bottom = np.zeros(len(labels)) - figure_size = (8,4) + figure_size = (8, 4) fig.set_size_inches(figure_size[0], figure_size[1]) for energy_category, energy_total in energy_totals.items(): - p = ax.bar( - labels, - energy_total, - label=energy_category, - bottom=bottom, - color=colors[energy_category], - zorder=zorder_bars - ) + labels, + energy_total, + label=energy_category, + bottom=bottom, + color=colors[energy_category], + zorder=zorder_bars, + ) bottom += energy_total - ax.bar_label(p, fmt='{:,.0f}', label_type='center') + ax.bar_label(p, fmt="{:,.0f}", label_type="center") # ax.bar_label(p, fmt='{:,.0f}') - ax.grid(zorder=zorder_grid) # zorder=0 to make the grid + ax.grid(zorder=zorder_grid) # zorder=0 to make the grid ax.set(ylabel=ylabel, title=title) ax.legend() plt.show() - + + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/src/topupopt/data/finance/__init__.py b/src/topupopt/data/finance/__init__.py index 633f866..40a96af 100644 --- a/src/topupopt/data/finance/__init__.py +++ b/src/topupopt/data/finance/__init__.py @@ -1,2 +1 @@ # -*- coding: utf-8 -*- - diff --git a/src/topupopt/data/finance/invest.py b/src/topupopt/data/finance/invest.py index da6f6d0..f886c6f 100644 --- a/src/topupopt/data/finance/invest.py +++ b/src/topupopt/data/finance/invest.py @@ -10,19 +10,22 @@ from statistics import mean # TODO: enable swapping the polarity + class Investment: """This class is meant to enable analysis of specific investments.""" - + # self.discount_rates: N samples # self.net_cash_flows: N+1 samples - + # TODO: consider using dicts to make things more intuitive, time-wise - - def __init__(self, - discount_rates: list, - net_cash_flows: list = None, - discount_rate: float = None, - analysis_period_span: int = None): + + def __init__( + self, + discount_rates: list, + net_cash_flows: list = None, + discount_rate: float = None, + analysis_period_span: int = None, + ): """ Create an object for investment analysis using typical information. @@ -39,186 +42,161 @@ class Investment: """ # validate the inputs - + if type(discount_rates) != type(None): - - # discount_rates is not None: - + # discount_rates is not None: + if type(discount_rates) != tuple: - - raise TypeError( - 'The discount rates must be provided as a tuple.') - + raise TypeError("The discount rates must be provided as a tuple.") + self.discount_rates = tuple(discount_rates) - - self.analysis_period_span = len(self.discount_rates) - + + self.analysis_period_span = len(self.discount_rates) + if self.analysis_period_span <= 0: - raise ValueError( - 'The duration of the period under analysis must be '+ - 'positive.' - ) - + "The duration of the period under analysis must be " + "positive." + ) + else: - - # discount_rates is None: + # discount_rates is None: # discount rate must be positive real under 1 # analysis_period_span must be an int - + if type(discount_rate) != float: - - raise TypeError( - 'The discount rate must be provided as a float.') - + raise TypeError("The discount rate must be provided as a float.") + if discount_rate <= 0 or discount_rate >= 1: - raise ValueError( - 'The discount rate must be in the open interval between 0'+ - ' and 1.' - ) - + "The discount rate must be in the open interval between 0" + + " and 1." + ) + if type(analysis_period_span) != int: - raise TypeError( - 'The duration of the period under consideration must be '+ - 'provided as an integer.') - + "The duration of the period under consideration must be " + + "provided as an integer." + ) + if analysis_period_span <= 0: - raise ValueError( - 'The duration of the period under analysis must be '+ - 'positive.' - ) - + "The duration of the period under analysis must be " + "positive." + ) + self.analysis_period_span = analysis_period_span - + self.discount_rates = tuple( discount_rate for i in range(self.analysis_period_span) - ) - + ) + # check the net cash flows - + if type(net_cash_flows) != type(None): - if type(net_cash_flows) != list: - - raise TypeError( - 'The net cash flows must be provided as a list.') - - if len(net_cash_flows) != self.analysis_period_span+1: - - raise ValueError( - 'The inputs are consistent in terms of length.' - ) - + raise TypeError("The net cash flows must be provided as a list.") + + if len(net_cash_flows) != self.analysis_period_span + 1: + raise ValueError("The inputs are consistent in terms of length.") + self.net_cash_flows = list(net_cash_flows) - + else: - # net_cash_flows is None: initialise it as a list of zeros - - self.net_cash_flows = list( - 0 for i in range(self.analysis_period_span+1) - ) - + + self.net_cash_flows = list(0 for i in range(self.analysis_period_span + 1)) + # discount factors - + self.discount_factors = tuple( discount_factor(self.discount_rates[:i]) - for i in range(self.analysis_period_span+1) - ) - + for i in range(self.analysis_period_span + 1) + ) + # ************************************************************************* # ************************************************************************* - - def add_investment(self, - investment: float, - investment_period: int, - investment_longevity: int, - commissioning_delay_after_investment: int = 0, - salvage_value_method: str = 'annuity'): - - if salvage_value_method == 'annuity': - + + def add_investment( + self, + investment: float, + investment_period: int, + investment_longevity: int, + commissioning_delay_after_investment: int = 0, + salvage_value_method: str = "annuity", + ): + if salvage_value_method == "annuity": mean_discount_rate = mean(self.discount_rates) - + residual_value = salvage_value_annuity( - investment=investment, - investment_longevity=investment_longevity, - investment_period=investment_period, - discount_rate=mean_discount_rate, - analysis_period_span=self.analysis_period_span - ) - + investment=investment, + investment_longevity=investment_longevity, + investment_period=investment_period, + discount_rate=mean_discount_rate, + analysis_period_span=self.analysis_period_span, + ) + self.net_cash_flows[investment_period] += investment self.net_cash_flows[self.analysis_period_span] += -residual_value - + else: - residual_value = salvage_value_linear_depreciation( - investment=investment, - investment_period=investment_period, - investment_longevity=investment_longevity, + investment=investment, + investment_period=investment_period, + investment_longevity=investment_longevity, analysis_period_span=self.analysis_period_span, commissioning_delay_after_investment=( commissioning_delay_after_investment - ) - ) - + ), + ) + self.net_cash_flows[investment_period] += investment self.net_cash_flows[self.analysis_period_span] += -residual_value - + # ************************************************************************* # ************************************************************************* - - def add_operational_cash_flows(self, - cash_flow: float or int, - start_period: int, - longevity: int = None): + + def add_operational_cash_flows( + self, cash_flow: float or int, start_period: int, longevity: int = None + ): """Adds a sequence of cash flows to the analysis.""" - + if type(longevity) == type(None): - # until the planning horizon - - for i in range(self.analysis_period_span-start_period+1): - + + for i in range(self.analysis_period_span - start_period + 1): # add operational cash flows - - self.net_cash_flows[i+start_period] += cash_flow - + + self.net_cash_flows[i + start_period] += cash_flow + else: - # limited longevity - + for i in range(longevity): - - if i+start_period >= self.analysis_period_span+1: - + if i + start_period >= self.analysis_period_span + 1: break - + # add operational cash flows - - self.net_cash_flows[i+start_period] += cash_flow - + + self.net_cash_flows[i + start_period] += cash_flow + # ************************************************************************* # ************************************************************************* - + def net_present_value(self): """Returns the net present value for the investment under analysis.""" - + return npv(self.discount_rates, self.net_cash_flows) - + # ************************************************************************* # ************************************************************************* + # ***************************************************************************** # ***************************************************************************** -def npv(discount_rates: list, - net_cash_flows: list, - return_discount_factors: bool = False) -> float or tuple: + +def npv( + discount_rates: list, net_cash_flows: list, return_discount_factors: bool = False +) -> float or tuple: """ Calculates the net present value using the information provided. @@ -244,49 +222,48 @@ def npv(discount_rates: list, If True, returns the net present value in addition to a list with the discount factors used in the calculation. - """ + """ # check sizes - - if len(discount_rates) != len(net_cash_flows)-1: - + + if len(discount_rates) != len(net_cash_flows) - 1: # the inputs do not match, return None - - raise ValueError('The inputs are inconsistent.') - + + raise ValueError("The inputs are inconsistent.") + discount_factors = [ - discount_factor(discount_rates[:t]) - for t in range(len(discount_rates)+1) - ] - + discount_factor(discount_rates[:t]) for t in range(len(discount_rates) + 1) + ] + if return_discount_factors: - - return sum( - ncf_t*df_t - for (ncf_t, df_t) in zip(net_cash_flows, discount_factors) - ), discount_factors - + return ( + sum( + ncf_t * df_t for (ncf_t, df_t) in zip(net_cash_flows, discount_factors) + ), + discount_factors, + ) + else: - return sum( - ncf_t*df_t - for (ncf_t, df_t) in zip(net_cash_flows, discount_factors) - ) + ncf_t * df_t for (ncf_t, df_t) in zip(net_cash_flows, discount_factors) + ) + # ***************************************************************************** # ***************************************************************************** - + + def discount_factor(discount_rates: list) -> float: """ Return the discount factor consistent with the discount rates provided. - + To calculate the net present value, we need to quantify the effect time - has on the net cash flows. This amounts to a factor, which depends on + has on the net cash flows. This amounts to a factor, which depends on the discount rates between the time of the cash flow and the present. Parameters ---------- discount_rates : list - A list with the discount rates for each time interval between a + A list with the discount rates for each time interval between a given time interval and the present. The order is irrelevant. Returns @@ -295,24 +272,27 @@ def discount_factor(discount_rates: list) -> float: The discount factor consistent with the discount rates provided. It uses all discount rates in the list. - """ - return prod([1/(1+i) for i in discount_rates]) + """ + return prod([1 / (1 + i) for i in discount_rates]) + # ***************************************************************************** # ***************************************************************************** + def salvage_value_linear_depreciation( - investment: int or float, - investment_period: int, - investment_longevity: int, - analysis_period_span: int, - commissioning_delay_after_investment: int = 1) -> float: + investment: int or float, + investment_period: int, + investment_longevity: int, + analysis_period_span: int, + commissioning_delay_after_investment: int = 1, +) -> float: """ Determine an asset\'s salvage value by the end of an analysis period. - - The depreciation is assumed to be linear: the asset is initially rated at + + The depreciation is assumed to be linear: the asset is initially rated at 100% of the investment made and then 0% by the time it is no longer usable. - + The salvage value is the asset\'s value after the analysis period. Parameters @@ -339,77 +319,89 @@ def salvage_value_linear_depreciation( float The salvage value. - """ - if investment_period >= analysis_period_span+1: - + """ + if investment_period >= analysis_period_span + 1: raise ValueError( - 'The investment has to be made within the period being analysed.' - ) - + "The investment has to be made within the period being analysed." + ) + # calculate the salvage value - + return ( - investment_longevity+ - investment_period+ - commissioning_delay_after_investment-1- - analysis_period_span - )*investment/investment_longevity + ( + investment_longevity + + investment_period + + commissioning_delay_after_investment + - 1 + - analysis_period_span + ) + * investment + / investment_longevity + ) + # ***************************************************************************** # ***************************************************************************** -def salvage_value_annuity(investment: int or float, - discount_rate: float, - investment_longevity: int, - investment_period: int, - analysis_period_span: int) -> float: - + +def salvage_value_annuity( + investment: int or float, + discount_rate: float, + investment_longevity: int, + investment_period: int, + analysis_period_span: int, +) -> float: npv_salvage = present_salvage_value_annuity( - investment=investment, + investment=investment, investment_longevity=investment_longevity, investment_period=investment_period, discount_rate=discount_rate, analysis_period_span=analysis_period_span, - return_annuity=False - ) - - return npv_salvage/discount_factor( + return_annuity=False, + ) + + return npv_salvage / discount_factor( tuple(discount_rate for i in range(analysis_period_span)) - ) + ) + # ***************************************************************************** # ***************************************************************************** -def annuity(investment: int or float, - investment_longevity: int, - discount_rate: float) -> float: + +def annuity( + investment: int or float, investment_longevity: int, discount_rate: float +) -> float: "Returns the annuity value for a given investment sum and longevity." - + return ( - investment* - discount_rate/(1-(1+discount_rate)**( - -investment_longevity - )) - ) + investment + * discount_rate + / (1 - (1 + discount_rate) ** (-investment_longevity)) + ) + # ***************************************************************************** # ***************************************************************************** -def present_salvage_value_annuity(investment: int or float, - investment_longevity: int, - investment_period: int, - discount_rate: float, - analysis_period_span: int, - return_annuity: bool = False) -> float: + +def present_salvage_value_annuity( + investment: int or float, + investment_longevity: int, + investment_period: int, + discount_rate: float, + analysis_period_span: int, + return_annuity: bool = False, +) -> float: """ Calculates the present value of an asset after a given analysis period. - + The calculation is based on the annuity method, which assumes that the investment could produce a steady revenue stream for a given period after the investment is made -- the investment longevity. - + This method assumes that the investment precedes the annuity payments. - + Parameters ---------- investment : int or float @@ -440,62 +432,52 @@ def present_salvage_value_annuity(investment: int or float, The annuity. """ - if investment_period >= analysis_period_span+1: - + if investment_period >= analysis_period_span + 1: raise ValueError( - 'The investment has to be made within the period being analysed.' - ) - + "The investment has to be made within the period being analysed." + ) + # the present salvage value requires the lifetime to extend beyond the hor. - - if analysis_period_span >= investment_longevity+investment_period: - + + if analysis_period_span >= investment_longevity + investment_period: if return_annuity: - return 0, annuity( investment=investment, investment_longevity=investment_longevity, - discount_rate=discount_rate - ) - + discount_rate=discount_rate, + ) + else: - return 0 - + # the annuity has to consider the asset longevity and the commission. delay - + value_annuity = annuity( investment=investment, investment_longevity=investment_longevity, - discount_rate=discount_rate - ) - + discount_rate=discount_rate, + ) + discount_rates = tuple( - discount_rate - for i in range(investment_longevity+investment_period) - ) - + discount_rate for i in range(investment_longevity + investment_period) + ) + net_cash_flows = list( - value_annuity - for i in range(investment_longevity+investment_period+1) - ) - - for year_index in range(analysis_period_span+1): + value_annuity for i in range(investment_longevity + investment_period + 1) + ) + + for year_index in range(analysis_period_span + 1): net_cash_flows[year_index] = 0 - + if return_annuity: - - return npv( - discount_rates=discount_rates, - net_cash_flows=net_cash_flows - ), value_annuity - + return ( + npv(discount_rates=discount_rates, net_cash_flows=net_cash_flows), + value_annuity, + ) + else: - - return npv( - discount_rates=discount_rates, - net_cash_flows=net_cash_flows - ) + return npv(discount_rates=discount_rates, net_cash_flows=net_cash_flows) + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/src/topupopt/data/gis/__init__.py b/src/topupopt/data/gis/__init__.py index 7a94186..ffeff9b 100644 --- a/src/topupopt/data/gis/__init__.py +++ b/src/topupopt/data/gis/__init__.py @@ -1,2 +1,2 @@ # -*- coding: utf-8 -*- -# import osm \ No newline at end of file +# import osm diff --git a/src/topupopt/data/gis/calculate.py b/src/topupopt/data/gis/calculate.py index ad236f7..17c258c 100644 --- a/src/topupopt/data/gis/calculate.py +++ b/src/topupopt/data/gis/calculate.py @@ -1,4 +1,3 @@ - # imports from math import inf @@ -22,10 +21,11 @@ from ..gis import identify as ident # ***************************************************************************** # ***************************************************************************** + def edge_lengths(network: MultiDiGraph, edge_keys: tuple = None) -> dict: """ Calculate edge lengths in a OSMnx-formatted MultiDiGraph network object. - + The calculation method changes depending on whether the coordinates are projected and depending on whether the edges are simplified. @@ -44,60 +44,66 @@ def edge_lengths(network: MultiDiGraph, edge_keys: tuple = None) -> dict: """ # determine if the graph is projected or not - graph_is_projected = is_projected(network.graph['crs']) + graph_is_projected = is_projected(network.graph["crs"]) # check if edge keys were specified if type(edge_keys) == type(None): # no particular edge keys were provided: consider all edges (default) - edge_keys = network.edges(keys=True) # tuple(network.edges(keys=True)) - # initialise length dict + edge_keys = network.edges(keys=True) # tuple(network.edges(keys=True)) + # initialise length dict length_dict = {} # for each edge on the graph for edge_key in edge_keys: # calculate it using the library if graph_is_projected: # calculate it using projected coordinates - if osm.KEY_OSMNX_GEOMETRY in network.edges[edge_key]: + if osm.KEY_OSMNX_GEOMETRY in network.edges[edge_key]: # use geometry length_dict[edge_key] = length( network.edges[edge_key][osm.KEY_OSMNX_GEOMETRY] - ) + ) else: # use (projected) coordinates start_point = Point( - (network.nodes[edge_key[0]][osm.KEY_OSMNX_X], - network.nodes[edge_key[0]][osm.KEY_OSMNX_Y]) + ( + network.nodes[edge_key[0]][osm.KEY_OSMNX_X], + network.nodes[edge_key[0]][osm.KEY_OSMNX_Y], ) + ) end_point = Point( - (network.nodes[edge_key[1]][osm.KEY_OSMNX_X], - network.nodes[edge_key[1]][osm.KEY_OSMNX_Y]) + ( + network.nodes[edge_key[1]][osm.KEY_OSMNX_X], + network.nodes[edge_key[1]][osm.KEY_OSMNX_Y], ) + ) length_dict[edge_key] = start_point.distance(end_point) - + else: # calculate it using unprojected coordinates (lat/long) - if osm.KEY_OSMNX_GEOMETRY in network.edges[edge_key]: + if osm.KEY_OSMNX_GEOMETRY in network.edges[edge_key]: # use geometry length_dict[edge_key] = great_circle_distance_along_path( network.edges[edge_key][osm.KEY_OSMNX_GEOMETRY] - ) + ) else: # use (unprojected) coordinates length_dict[edge_key] = great_circle( - lat1=network.nodes[edge_key[0]][osm.KEY_OSMNX_Y], - lon1=network.nodes[edge_key[0]][osm.KEY_OSMNX_X], - lat2=network.nodes[edge_key[1]][osm.KEY_OSMNX_Y], - lon2=network.nodes[edge_key[1]][osm.KEY_OSMNX_X] - ) + lat1=network.nodes[edge_key[0]][osm.KEY_OSMNX_Y], + lon1=network.nodes[edge_key[0]][osm.KEY_OSMNX_X], + lat2=network.nodes[edge_key[1]][osm.KEY_OSMNX_Y], + lon2=network.nodes[edge_key[1]][osm.KEY_OSMNX_X], + ) # return the dict with lengths of each edge return length_dict + # ***************************************************************************** # ***************************************************************************** + def great_circle_distance_along_path(path: LineString) -> float: """ Computes the great circle distance along a given path. - + The distance is to be calculated using a shapely LineString object made of (longitude, latitude) coordinate tuples. The calculation is vectorised. @@ -118,16 +124,18 @@ def great_circle_distance_along_path(path: LineString) -> float: # sum individual distances and return return sum( great_circle( - lat[:-1], # latitudes of starting points - lon[:-1], # longitudes of starting points + lat[:-1], # latitudes of starting points + lon[:-1], # longitudes of starting points lat[1:], # latitudes of ending points - lon[1:] # longitudes of ending points - ) + lon[1:], # longitudes of ending points ) + ) + # ***************************************************************************** # ***************************************************************************** + def update_street_count(network: MultiDiGraph): """ Updates the street count attributes of nodes in a MultiDiGraph object. @@ -145,22 +153,26 @@ def update_street_count(network: MultiDiGraph): # update street count street_count_dict = count_streets_per_node(network) network.add_nodes_from( - ((key, {osm.KEY_OSMNX_STREET_COUNT:value}) - for key, value in street_count_dict.items()) + ( + (key, {osm.KEY_OSMNX_STREET_COUNT: value}) + for key, value in street_count_dict.items() ) - + ) + + # ***************************************************************************** # ***************************************************************************** -def node_path_length(network: MultiDiGraph, - path: list, - return_minimum_length_only: bool = True) -> list or float: + +def node_path_length( + network: MultiDiGraph, path: list, return_minimum_length_only: bool = True +) -> list or float: """ Returns the length or lengths of a path defined using nodes. - + If more than one edge connects adjacent nodes along the path, a length value will be returned for each possible path combination. - + Parameters ---------- network : MultiDiGraph @@ -176,15 +188,15 @@ def node_path_length(network: MultiDiGraph, The path\'s length or all lengths consistent with the path provided. """ - + # direction matters path_length = len(path) if path_length == 0: return inf - + # if the path is given as a list of node keys, then it is subjective # i.e., it may refer to many paths, namely if parallel edges exist - + # check if the path object qualifies as such if not is_path(network, path): # it does not, exit @@ -192,69 +204,64 @@ def node_path_length(network: MultiDiGraph, return inf else: return [inf] - + # prepare a list with all possible paths given as lists of edge keys - list_of_edge_key_paths = [[]] # a list of edge key lists - + list_of_edge_key_paths = [[]] # a list of edge key lists + # for each pair of nodes in the path - for node_pair in range(path_length-1): + for node_pair in range(path_length - 1): # get the edges between these two nodes edge_keys = ident.get_edges_from_a_to_b( - network, - path[node_pair], - path[node_pair+1] - ) + network, path[node_pair], path[node_pair + 1] + ) number_edge_keys = len(edge_keys) - if number_edge_keys == 1: - # only one edge exists: append its key to all existing lists/paths - for edge_key_path in list_of_edge_key_paths: - edge_key_path.append(edge_keys[0]) - else: # multiple edges exist: each path identified so far has to be + if number_edge_keys == 1: + # only one edge exists: append its key to all existing lists/paths + for edge_key_path in list_of_edge_key_paths: + edge_key_path.append(edge_keys[0]) + else: # multiple edges exist: each path identified so far has to be # replicated a total of number_edge_keys times and then updated - number_paths = len(list_of_edge_key_paths) - # for each parallel edge - for edge_key_index in range(number_edge_keys-1): - # replicate all paths - for path_index in range(number_paths): + number_paths = len(list_of_edge_key_paths) + # for each parallel edge + for edge_key_index in range(number_edge_keys - 1): + # replicate all paths + for path_index in range(number_paths): list_of_edge_key_paths.append( list(list_of_edge_key_paths[path_index]) - ) - # paths have been replicated, now add the edges - for edge_key_index in range(number_edge_keys): - for path_index in range(number_paths): - # add the new edge + ) + # paths have been replicated, now add the edges + for edge_key_index in range(number_edge_keys): + for path_index in range(number_paths): + # add the new edge list_of_edge_key_paths[ - path_index+edge_key_index*number_paths - ].append( - edge_keys[edge_key_index] - ) - + path_index + edge_key_index * number_paths + ].append(edge_keys[edge_key_index]) + # ************************************************************************* - + path_lenths = [ - sum(network.edges[edge_key][osm.KEY_OSMNX_LENGTH] - for edge_key in edge_key_path) + sum(network.edges[edge_key][osm.KEY_OSMNX_LENGTH] for edge_key in edge_key_path) for edge_key_path in list_of_edge_key_paths - ] - if return_minimum_length_only: - return min(path_lenths) - else: + ] + if return_minimum_length_only: + return min(path_lenths) + else: return path_lenths # ************************************************************************* - + + # ***************************************************************************** # ***************************************************************************** -def edge_path_length(network: MultiDiGraph, - path: list, - **kwargs) -> float: + +def edge_path_length(network: MultiDiGraph, path: list, **kwargs) -> float: """ Returns the total length of a path defined using edges. - + If the path does not exist, or if no path is provided, the result will be infinity (math.inf). - + Parameters ---------- network : MultiDiGraph @@ -268,29 +275,29 @@ def edge_path_length(network: MultiDiGraph, The path\'s length or all lengths consistent with the path provided. """ - - # check the number of + + # check the number of path_length = len(path) if path_length == 0: return inf if ident.is_edge_path(network, path, **kwargs): - return sum( - network.edges[edge_key][osm.KEY_OSMNX_LENGTH] for edge_key in path - ) + return sum(network.edges[edge_key][osm.KEY_OSMNX_LENGTH] for edge_key in path) else: # no path provided return inf + # ***************************************************************************** # ***************************************************************************** -def count_ocurrences(gdf: GeoDataFrame, - column: str, - column_entries: list = None) -> dict: + +def count_ocurrences( + gdf: GeoDataFrame, column: str, column_entries: list = None +) -> dict: """ Counts the number of occurrences per entry in a DataFrame object's column. - - If a list is provided, only the entries that match those in the list are + + If a list is provided, only the entries that match those in the list are counted. If no list is provided, all unique entries are counted. Parameters @@ -309,7 +316,7 @@ def count_ocurrences(gdf: GeoDataFrame, A dictionary with the counts whose keys are the values counted. """ - + if type(column_entries) == list: # find entries also present in the dict # initialise dict @@ -317,12 +324,12 @@ def count_ocurrences(gdf: GeoDataFrame, # for each key in the dict for key in column_entries: # # store the number of rows - # count_dict[key] = gdf[gdf[column]==key].shape[0] + # count_dict[key] = gdf[gdf[column]==key].shape[0] # count the number of rows with this key if isna(key): - count_dict[key] = gdf[gdf[column].isnull()].shape[0] + count_dict[key] = gdf[gdf[column].isnull()].shape[0] else: - count_dict[key] = gdf[gdf[column]==key].shape[0] + count_dict[key] = gdf[gdf[column] == key].shape[0] else: # find all unique entries # initialise dict @@ -333,12 +340,13 @@ def count_ocurrences(gdf: GeoDataFrame, # it is, skip continue # it is not, count and store the number of rows with said entry - if isna(entry): #type(entry) == type(None): - count_dict[entry] = gdf[gdf[column].isnull()].shape[0] + if isna(entry): # type(entry) == type(None): + count_dict[entry] = gdf[gdf[column].isnull()].shape[0] else: - count_dict[entry] = gdf[gdf[column]==entry].shape[0] + count_dict[entry] = gdf[gdf[column] == entry].shape[0] # return statement return count_dict + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/src/topupopt/data/gis/identify.py b/src/topupopt/data/gis/identify.py index 97beec8..ee69c61 100644 --- a/src/topupopt/data/gis/identify.py +++ b/src/topupopt/data/gis/identify.py @@ -20,11 +20,12 @@ from ..gis import osm # ***************************************************************************** # ***************************************************************************** + def is_edge_consistent_with_geometry(network: nx.MultiDiGraph, edge_key): """ Returns True if a given edge in an OSMnx-formatted graph is declared in the same order as its geometry. That is, if the source node corresponds to the - first position in the geometry attribute and so forth. + first position in the geometry attribute and so forth. Parameters ---------- @@ -51,23 +52,24 @@ def is_edge_consistent_with_geometry(network: nx.MultiDiGraph, edge_key): if not network.has_edge(*edge_key): # the edge does not exist raise ValueError( - 'No edge was found matching the key provided: '+str(edge_key) - ) + "No edge was found matching the key provided: " + str(edge_key) + ) elif osm.KEY_OSMNX_GEOMETRY in network.edges[edge_key]: # edge exists and has a geometry attribute: check the geometry # check if the first point on the geometry matches the first node - return ( - tuple(network.edges[edge_key][osm.KEY_OSMNX_GEOMETRY].coords)[0] == - (network.nodes[edge_key[0]][osm.KEY_OSMNX_X], - network.nodes[edge_key[0]][osm.KEY_OSMNX_Y]) - ) + return tuple(network.edges[edge_key][osm.KEY_OSMNX_GEOMETRY].coords)[0] == ( + network.nodes[edge_key[0]][osm.KEY_OSMNX_X], + network.nodes[edge_key[0]][osm.KEY_OSMNX_Y], + ) else: # edge exists but has no geometry attribute: it is consistent return True + # ***************************************************************************** # ***************************************************************************** + def find_edges_in_reverse(network: nx.MultiDiGraph) -> dict: """ Finds edges in reverse within an OSMnx-formatted MultiDiGraph object. @@ -87,30 +89,32 @@ def find_edges_in_reverse(network: nx.MultiDiGraph) -> dict: edge_key: [ other_edge_key for other_edge_key in get_edges_from_a_to_b( - network, - node_start=edge_key[1], - node_end=edge_key[0] - ) + network, node_start=edge_key[1], node_end=edge_key[0] + ) if edges_are_in_reverse(network, edge_key, other_edge_key) - ] + ] for edge_key in network.edges(keys=True) - } + } + # ***************************************************************************** # ***************************************************************************** - + + def is_edge_osmnx_compliant(network: nx.MultiDiGraph, edge_key) -> bool: """Returns True if a given edge is osmnx compliant and False otherwise.""" - + # check if the edge exists if not network.has_edge(*edge_key): - raise ValueError('Edge not found.') + raise ValueError("Edge not found.") # check compatibility with osmnx return is_edge_data_osmnx_compliant(network.get_edge_data(*edge_key)) - + + # ***************************************************************************** # ***************************************************************************** - + + def is_edge_data_osmnx_compliant(edge_data: dict): """Returns True a given MultiDiGraph edge data dict is OSMnx-compliant.""" # check if all the essential attributes are in @@ -156,26 +160,28 @@ def is_edge_data_osmnx_compliant(edge_data: dict): return False # else: # no action return True - + + # ***************************************************************************** # ***************************************************************************** + def edges_are_in_reverse( - network: nx.MultiDiGraph, - edge_a: tuple, - edge_b: tuple, - tolerance: float or int = 1e-3 - ) -> bool: + network: nx.MultiDiGraph, + edge_a: tuple, + edge_b: tuple, + tolerance: float or int = 1e-3, +) -> bool: """ Returns True if two edges in a graph represent the same one but in reverse. - + The conditions the two edges must observe are the following: - the set of each attribute appearing as a list must match; - the geometries must be identical but in reverse; - the lengths must be within a given tolerance of one another; - the reversed flags must be opposite, unless they are a list; - every other attribute must be identical. - + The graph should be formatted by OSMnx standards. Parameters @@ -200,111 +206,134 @@ def edges_are_in_reverse( If True, the edges are in reverse. """ - + # the edges are the same but in reverse: - # - all attributes have to be the same or lists with - # the same content, as in a set, except for + # - all attributes have to be the same or lists with + # the same content, as in a set, except for # the geometry and reversed attributes - + # must involve the same nodes in reverse order if edge_a[0] != edge_b[1] or edge_a[1] != edge_b[0]: # the nodes do not match return False # make sure both edges exist and comply with osmnx - if (not is_edge_osmnx_compliant(network, edge_a) or - not is_edge_osmnx_compliant(network, edge_b)): - raise ValueError('One or more of the edges is not OSMnx-compliant.') - + if not is_edge_osmnx_compliant(network, edge_a) or not is_edge_osmnx_compliant( + network, edge_b + ): + raise ValueError("One or more of the edges is not OSMnx-compliant.") + fw_dict = network.get_edge_data(*edge_a) rv_dict = network.get_edge_data(*edge_b) - + # check if any of the non-mandatory attributes exist in one dict but not # the other key_attr = set([osm.KEY_OSMNX_GEOMETRY, osm.KEY_OSMNX_REVERSED]) for _attr in key_attr: - if ((_attr in fw_dict.keys() and _attr not in rv_dict.keys()) or - (_attr not in fw_dict.keys() and _attr in rv_dict.keys())): + if (_attr in fw_dict.keys() and _attr not in rv_dict.keys()) or ( + _attr not in fw_dict.keys() and _attr in rv_dict.keys() + ): # incoherent inputs return False - + # for each key, value pair in the forward edge's dict for attr_key, attr_value in fw_dict.items(): - if (type(attr_value) == list and - ((type(rv_dict[attr_key]) == list and - set(attr_value) != set(rv_dict[attr_key])) or - type(rv_dict[attr_key]) != list)): + if type(attr_value) == list and ( + ( + type(rv_dict[attr_key]) == list + and set(attr_value) != set(rv_dict[attr_key]) + ) + or type(rv_dict[attr_key]) != list + ): # the sets of list arguments do not match # or, the arguments are not equivalent return False - elif (type(attr_value) == list and - type(rv_dict[attr_key]) == list and - set(attr_value) == set(rv_dict[attr_key])): + elif ( + type(attr_value) == list + and type(rv_dict[attr_key]) == list + and set(attr_value) == set(rv_dict[attr_key]) + ): # the sets of list arguments match continue - elif (attr_key == osm.KEY_OSMNX_GEOMETRY and - ((type(rv_dict[attr_key]) == LineString and - tuple(attr_value.coords) != - tuple(rv_dict[attr_key].reverse().coords)) or - attr_key not in rv_dict or - type(rv_dict[attr_key]) != LineString)): + elif attr_key == osm.KEY_OSMNX_GEOMETRY and ( + ( + type(rv_dict[attr_key]) == LineString + and tuple(attr_value.coords) + != tuple(rv_dict[attr_key].reverse().coords) + ) + or attr_key not in rv_dict + or type(rv_dict[attr_key]) != LineString + ): # either the geometries are not reversed # or, there is no geometry attribute in the reverse dict # or, the geometry in the reverse edge is not for a LineString return False - elif (attr_key == osm.KEY_OSMNX_GEOMETRY and - type(rv_dict[attr_key]) == LineString and - tuple(attr_value.coords) == - tuple(rv_dict[attr_key].reverse().coords)): + elif ( + attr_key == osm.KEY_OSMNX_GEOMETRY + and type(rv_dict[attr_key]) == LineString + and tuple(attr_value.coords) == tuple(rv_dict[attr_key].reverse().coords) + ): # the geometries are reversed continue - elif (attr_key == osm.KEY_OSMNX_REVERSED and - ((attr_key in rv_dict and - attr_value == rv_dict[attr_key]) or - attr_key not in rv_dict or - type(rv_dict[attr_key]) != bool)): + elif attr_key == osm.KEY_OSMNX_REVERSED and ( + (attr_key in rv_dict and attr_value == rv_dict[attr_key]) + or attr_key not in rv_dict + or type(rv_dict[attr_key]) != bool + ): # either the reversed flags match # or, there is no reversed flag in the reverse dict return False - elif (attr_key == osm.KEY_OSMNX_REVERSED and - attr_key in rv_dict and - not attr_value == rv_dict[attr_key]): + elif ( + attr_key == osm.KEY_OSMNX_REVERSED + and attr_key in rv_dict + and not attr_value == rv_dict[attr_key] + ): # the reversed flags are logical opposites continue - elif (attr_key == osm.KEY_OSMNX_LENGTH and - ((attr_key in rv_dict and - #isinstance(rv_dict[attr_key], Real) and - abs(attr_value-rv_dict[attr_key]) > tolerance) or - attr_key not in rv_dict)): + elif attr_key == osm.KEY_OSMNX_LENGTH and ( + ( + attr_key in rv_dict + and + # isinstance(rv_dict[attr_key], Real) and + abs(attr_value - rv_dict[attr_key]) > tolerance + ) + or attr_key not in rv_dict + ): # either the lengths differ too much # or, there is no length attribute in the reverse dict # or it is not a numeric type return False - elif (attr_key == osm.KEY_OSMNX_LENGTH and - attr_key in rv_dict and - #isinstance(rv_dict[attr_key], Real) and - abs(attr_value-rv_dict[attr_key]) <= tolerance): + elif ( + attr_key == osm.KEY_OSMNX_LENGTH + and attr_key in rv_dict + and + # isinstance(rv_dict[attr_key], Real) and + abs(attr_value - rv_dict[attr_key]) <= tolerance + ): # the lengths are within the tolerance continue elif attr_key in rv_dict and attr_value != rv_dict[attr_key]: # either the attributes do not match return False # else: # the argument does not exist - + # all other possibilities have been exhausted: return True return True + # ***************************************************************************** # ***************************************************************************** + def close_to_extremities( - line: LineString, - points: tuple, - tolerance: float = 7/3-4/3-1, - use_start_point_equidistant: bool = True, - return_distances: bool = False) -> tuple: + line: LineString, + points: tuple, + tolerance: float = 7 / 3 - 4 / 3 - 1, + use_start_point_equidistant: bool = True, + return_distances: bool = False, +) -> tuple: """ Determines which points are close to a line\'s start and end points. - + Closeness between points is defined by being within a given tolerance of each other. @@ -334,31 +363,32 @@ def close_to_extremities( points that are closest to the end point. """ - + # calculate the distances to the line line_distances = line.distance(points) - + # identify the start and end points start_point = Point(line.coords[0]) end_point = Point(line.coords[-1]) - + # calculate the distances to the start and end points start_distances = start_point.distance(points) end_distances = end_point.distance(points) - + # for each point _start = [] _end = [] - for i, (line_distance, start_distance, end_distance) in enumerate( - zip(line_distances, start_distances, end_distances)): + for i, (line_distance, start_distance, end_distance) in enumerate( + zip(line_distances, start_distances, end_distances) + ): if start_distance < end_distance: # the point is closer to the start point than to the end point - if abs(start_distance-line_distance) <= tolerance: + if abs(start_distance - line_distance) <= tolerance: # the point is within range of the start point _start.append(i) elif start_distance > end_distance: # the point is closer to the end point than to the start point - if abs(end_distance-line_distance) <= tolerance: + if abs(end_distance - line_distance) <= tolerance: # the point is within range of the end point _end.append(i) else: @@ -367,44 +397,48 @@ def close_to_extremities( # the point is closer to the line than to the start/end points continue # reach these statements - if use_start_point_equidistant: + if use_start_point_equidistant: _start.append(i) else: _end.append(i) - + # return statement if return_distances: return _start, _end, line_distances, start_distances, end_distances else: return _start, _end + # ***************************************************************************** # ***************************************************************************** -def find_roundabouts(network: nx.MultiDiGraph, - maximum_perimeter: float = None, - minimum_perimeter: float = None, - maximum_number_nodes: int = None, - minimum_number_nodes: int = None) -> list: + +def find_roundabouts( + network: nx.MultiDiGraph, + maximum_perimeter: float = None, + minimum_perimeter: float = None, + maximum_number_nodes: int = None, + minimum_number_nodes: int = None, +) -> list: """ Finds sequences of nodes in a network that constitute roundabouts. A roundabout is defined as a sequence of nodes connected through one-way edges to form an endless loop. One-way edges are identified by the presence - of the 'oneway' attribute equal to True in the edge dictionary. The minimum + of the 'oneway' attribute equal to True in the edge dictionary. The minimum and maximum roundabout perimeter can be used to filter out roundabouts that are too big or too small, as can minimum and maximum number of nodes. - + Parameters ---------- network : nx.MultiDiGraph The object describing the network. maximum_perimeter : float, optional - The maximum perimeter for an ordered sequence of nodes to qualify as a + The maximum perimeter for an ordered sequence of nodes to qualify as a roundabout. The units are those of OSMnx: meters. The default is None, which leads to the maximum perimeter being ignored. minimum_perimeter : float, optional - The minimum perimeter for an ordered sequence of nodes to qualify as a + The minimum perimeter for an ordered sequence of nodes to qualify as a roundabout. The units are those of OSMNX: meters. The default is None, which leads to the minimum perimeter being ignored. maximum_number_nodes : int, optional @@ -422,33 +456,29 @@ def find_roundabouts(network: nx.MultiDiGraph, A list of lists with node keys representing a roundabout's nodes. The nodes are ordered in the manner needed to go around the roundabout. """ - + # copy the network object - + new_network = network.copy() - - # remove edges that do not qualify: + + # remove edges that do not qualify: # 1) self-loops # 2) edges that are not oneway # 3) edges that do not have the necessary attributes - + # node number limits - there_are_upper_node_number_limits = ( - True if maximum_number_nodes != None else False) - there_are_lower_node_number_limits = ( - True if minimum_number_nodes != None else False) - there_are_node_number_limits = ( - there_are_upper_node_number_limits or - there_are_lower_node_number_limits) - + there_are_upper_node_number_limits = True if maximum_number_nodes != None else False + there_are_lower_node_number_limits = True if minimum_number_nodes != None else False + there_are_node_number_limits = ( + there_are_upper_node_number_limits or there_are_lower_node_number_limits + ) + # perimeter limits - there_are_upper_perimeter_limits = ( - True if maximum_perimeter != None else False) - there_are_lower_perimeter_limits = ( - True if minimum_perimeter != None else False) - there_are_perimeter_limits = ( - there_are_upper_perimeter_limits or - there_are_lower_perimeter_limits) + there_are_upper_perimeter_limits = True if maximum_perimeter != None else False + there_are_lower_perimeter_limits = True if minimum_perimeter != None else False + there_are_perimeter_limits = ( + there_are_upper_perimeter_limits or there_are_lower_perimeter_limits + ) # find edges that are not one way list_removable_edges = [] @@ -482,30 +512,28 @@ def find_roundabouts(network: nx.MultiDiGraph, for node_key in list_selflooping_nodes: while new_network.has_edge(u=node_key, v=node_key): new_network.remove_edge(u=node_key, v=node_key) - + # ************************************************************************* # ************************************************************************* - + # find loops node_paths = nx.simple_cycles(new_network) - + # ************************************************************************* # ************************************************************************* - + # exclude paths based on the perimeter and the number of nodes, if set if not there_are_node_number_limits and not there_are_perimeter_limits: return list(node_paths) - else: # each potential candidate needs to be checked + else: # each potential candidate needs to be checked final_node_paths = [] # for each node group for node_path in node_paths: if there_are_perimeter_limits: # compute the total length for each node total_length = node_path_length( - network, - node_path, - return_minimum_length_only=True - ) + network, node_path, return_minimum_length_only=True + ) if there_are_lower_perimeter_limits: if total_length < minimum_perimeter: continue @@ -524,19 +552,21 @@ def find_roundabouts(network: nx.MultiDiGraph, final_node_paths.append(node_path) # return the final list return final_node_paths - + # ********************************************************************* # ********************************************************************* - + + # ***************************************************************************** # ***************************************************************************** -def is_roundabout(network: nx.MultiDiGraph, - path: list, - path_as_node_keys: bool = True) -> bool: + +def is_roundabout( + network: nx.MultiDiGraph, path: list, path_as_node_keys: bool = True +) -> bool: """ Returns True if a given path constitutes a roundabout in a directed graph. - + A roundabout is defined as a sequence of nodes connected through one-way edges to form an endless loop. One-way edges are identified by the presence of the 'oneway' attribute equal to True in the edge dictionary. @@ -556,123 +586,106 @@ def is_roundabout(network: nx.MultiDiGraph, True, if the path constitutes a roundabout and False otherwise. """ - + # paths are given as node lists - + # roundabouts require at least two nodes - + if len(path) <= 1: - - raise ValueError('Node paths require at least two nodes.') - + raise ValueError("Node paths require at least two nodes.") + # for each node in path - + for node_key in path: - # check if it exists in the network - + if not network.has_node(node_key): - return False - + # there should be no repetitions - + if path.count(node_key) > 1: - return False - + # check if the last node connects to the first - - edge_keys = get_edges_from_a_to_b(network, - path[-1], - path[0]) - + + edge_keys = get_edges_from_a_to_b(network, path[-1], path[0]) + if len(edge_keys) == 0: - return False - + else: - # among the edges between them, find at least one compatible - + compatible_edge_exists = False - - for edge_key in edge_keys: - + + for edge_key in edge_keys: # get its data - - edge_data_dict = network.get_edge_data(u=edge_key[0], - v=edge_key[1], - key=edge_key[2]) - + + edge_data_dict = network.get_edge_data( + u=edge_key[0], v=edge_key[1], key=edge_key[2] + ) + # ensure that this edge has the oneway attribute - + if osm.KEY_OSMNX_ONEWAY in edge_data_dict: - # ensure that it is true - + if edge_data_dict[osm.KEY_OSMNX_ONEWAY]: - compatible_edge_exists = True - + break - + # check for compatible edges - + if not compatible_edge_exists: - # no compatible edges exist between these two nodes - + return False - + # for each other node pair - for node_pair in range(len(path)-1): - + for node_pair in range(len(path) - 1): # for each edge between them, find at least one compatible edge - + compatible_edge_exists = False - - for edge_key in get_edges_from_a_to_b(network, - path[node_pair], - path[node_pair+1]): - + + for edge_key in get_edges_from_a_to_b( + network, path[node_pair], path[node_pair + 1] + ): # get its data - - edge_data_dict = network.get_edge_data(u=edge_key[0], - v=edge_key[1], - key=edge_key[2]) - + + edge_data_dict = network.get_edge_data( + u=edge_key[0], v=edge_key[1], key=edge_key[2] + ) + # ensure that this edge has the oneway attribute - + if osm.KEY_OSMNX_ONEWAY in edge_data_dict: - # ensure that it is true - + if edge_data_dict[osm.KEY_OSMNX_ONEWAY]: - compatible_edge_exists = True - + break - + # check for compatible edges - + if not compatible_edge_exists: - # no compatible edges exist between these two nodes - + return False - + # otherwise, it is a roundabout - + return True - + + # ***************************************************************************** # ***************************************************************************** -def get_edges_from_a_to_b(network: nx.MultiDiGraph, - node_start, - node_end) -> list: + +def get_edges_from_a_to_b(network: nx.MultiDiGraph, node_start, node_end) -> list: """ Retrieve the keys for edges from one node to another. @@ -692,18 +705,21 @@ def get_edges_from_a_to_b(network: nx.MultiDiGraph, """ if network.has_edge(u=node_start, v=node_end): - return [(node_start, node_end, key) - for key in network._adj[node_start][node_end]] + return [ + (node_start, node_end, key) for key in network._adj[node_start][node_end] + ] else: return [] + # ***************************************************************************** # ***************************************************************************** + def get_edges_between_two_nodes(network: nx.MultiDiGraph, u, v) -> list: """ Retrieve the keys for all edges involving two specific nodes. - + The keys concern edges in both directions. For a single direction, consider using the method get_edges_from_a_to_b instead. @@ -722,13 +738,13 @@ def get_edges_between_two_nodes(network: nx.MultiDiGraph, u, v) -> list: A list of edge keys involving both nodes, in both directions. """ - + if network.has_edge(u, v): # edges exist from u to v - _out = [(u,v,k) for k in network._adj[u][v]] + _out = [(u, v, k) for k in network._adj[u][v]] try: # try finding out if edges exist from v to u - _out.extend([(v,u,k) for k in network._adj[v][u]]) + _out.extend([(v, u, k) for k in network._adj[v][u]]) except KeyError: # edges do not exist from v to u pass @@ -736,22 +752,26 @@ def get_edges_between_two_nodes(network: nx.MultiDiGraph, u, v) -> list: return _out elif network.has_edge(v, u): # edges do not exist from u to v but exist from v to u - return [(v,u,k) for k in network._adj[v][u]] + return [(v, u, k) for k in network._adj[v][u]] else: # no edges found return [] + # ***************************************************************************** # ***************************************************************************** -def get_edges_involving_node(network: nx.MultiDiGraph, - node_key, - include_outgoing_edges: bool = True, - include_incoming_edges: bool = True, - include_self_loops: bool = True) -> list: + +def get_edges_involving_node( + network: nx.MultiDiGraph, + node_key, + include_outgoing_edges: bool = True, + include_incoming_edges: bool = True, + include_self_loops: bool = True, +) -> list: """ Retrieve the keys for all edges involving a specific node. - + The keys concern incoming and outgoing edges. Optionally, the keys retrieved can concern only incoming or outgoing edges, self-loops included or not. @@ -774,31 +794,39 @@ def get_edges_involving_node(network: nx.MultiDiGraph, A list of edge keys involving the specified node. """ - + return [ edge_key for edge_key in network.edges(keys=True) if node_key in edge_key[0:2] # outgoing edges - if ((node_key != edge_key[0] and not include_outgoing_edges) or - include_outgoing_edges) + if ( + (node_key != edge_key[0] and not include_outgoing_edges) + or include_outgoing_edges + ) # incoming edges - if ((node_key != edge_key[1] and not include_incoming_edges) or - include_incoming_edges) + if ( + (node_key != edge_key[1] and not include_incoming_edges) + or include_incoming_edges + ) # self-loops - if ((edge_key[0] != edge_key[1] and not include_self_loops) or - include_self_loops) - ] + if ( + (edge_key[0] != edge_key[1] and not include_self_loops) + or include_self_loops + ) + ] + # ***************************************************************************** # ***************************************************************************** -def neighbours(network: nx.MultiDiGraph or nx.MultiGraph, - node_key, - ignore_self_loops: bool = True): + +def neighbours( + network: nx.MultiDiGraph or nx.MultiGraph, node_key, ignore_self_loops: bool = True +): """ Return a given node\'s neighbours. - + This method relies on networkx\'s neighbours method but adds the option to ignore self-loops. @@ -819,29 +847,28 @@ def neighbours(network: nx.MultiDiGraph or nx.MultiGraph, """ if network.has_edge(node_key, node_key) and ignore_self_loops: - return ( - _node_key + _node_key for _node_key in nx.all_neighbors(network, node_key) if _node_key != node_key - ) - + ) + else: - return nx.all_neighbors(network, node_key) + # ***************************************************************************** # ***************************************************************************** + def is_node_path( - network: nx.MultiDiGraph, - path: list, - consider_reversed_edges: bool = False) -> bool: + network: nx.MultiDiGraph, path: list, consider_reversed_edges: bool = False +) -> bool: """ Indicates if a given path qualifies as a node path in a directed network. - + A node path consists of a sequence of nodes connected by directed edges. - + The sequence must include at least two nodes. Parameters @@ -875,15 +902,19 @@ def is_node_path( return False return True else: - return nx.is_path(network, path) + return nx.is_path(network, path) + # ***************************************************************************** # ***************************************************************************** -def is_edge_path(network: nx.MultiDiGraph, - path: list, - ignore_edge_direction: bool = False, - allow_multiple_formats: bool = False) -> bool: + +def is_edge_path( + network: nx.MultiDiGraph, + path: list, + ignore_edge_direction: bool = False, + allow_multiple_formats: bool = False, +) -> bool: """ Indicates if a given path qualifies as an edge path in a directed network. @@ -906,106 +937,104 @@ def is_edge_path(network: nx.MultiDiGraph, Returns True if the path qualifies as an edge path and False otherwise. """ - + if len(path) == 0: - # empty path - + return False - + else: - # all the edges have to exist - + previous_edge_key_length = len(path[0]) - + for edge_i, tentative_edge_key in enumerate(path): - edge_key_length = len(tentative_edge_key) - + if not allow_multiple_formats: - if previous_edge_key_length != edge_key_length: - # the edge key format changes: not a path - + raise ValueError( - 'The path must be provided using only one edge format.' - ) - + "The path must be provided using only one edge format." + ) + # find out if the edge exists - + if edge_key_length == 3: - # 3-tuple format - - if not network.has_edge(u=tentative_edge_key[0], - v=tentative_edge_key[1], - key=tentative_edge_key[2]): - + + if not network.has_edge( + u=tentative_edge_key[0], + v=tentative_edge_key[1], + key=tentative_edge_key[2], + ): # the edge does not exist as specified - + return False - + elif edge_key_length == 2: - # 2-tuple format - - if not network.has_edge(u=tentative_edge_key[0], - v=tentative_edge_key[1]): - + + if not network.has_edge( + u=tentative_edge_key[0], v=tentative_edge_key[1] + ): # the edge does not exist as specified - + return False - + else: - # unknown format - + return False - + # the edge exists: check if it forms a sequence - - if edge_i != 0: # skip the first iteration - + + if edge_i != 0: # skip the first iteration # if none of the current edge's nodes is mentioned in the # previous edge, then no sequence is formed - - if (tentative_edge_key[0] not in path[edge_i-1][0:2] and - tentative_edge_key[1] not in path[edge_i-1][0:2] and - ignore_edge_direction): - + + if ( + tentative_edge_key[0] not in path[edge_i - 1][0:2] + and tentative_edge_key[1] not in path[edge_i - 1][0:2] + and ignore_edge_direction + ): return False - + # if the previous edge's end node is not the current edge's # start node, then it is not a valid edge path - - if (path[edge_i-1][1] != tentative_edge_key[0] and - not ignore_edge_direction): - + + if ( + path[edge_i - 1][1] != tentative_edge_key[0] + and not ignore_edge_direction + ): return False - + # # check the formats, if necessary - + # if (not allow_multiple_formats and # len(path[edge_i-1]) != len(tentative_edge_key)): - + # return False - + previous_edge_key_length = edge_key_length - + return True + # ***************************************************************************** # ***************************************************************************** -def is_path_straight(network: nx.MultiDiGraph, - path: list, - consider_reversed_edges: bool = False, - ignore_self_loops: bool = False) -> bool: + +def is_path_straight( + network: nx.MultiDiGraph, + path: list, + consider_reversed_edges: bool = False, + ignore_self_loops: bool = False, +) -> bool: """ Returns True if the path is straight and False otherwise. - + A path is defined to be straight if it presents no options along it. Parameters @@ -1033,33 +1062,42 @@ def is_path_straight(network: nx.MultiDiGraph, # a straight path requires at least two nodes path_length = len(path) if path_length == 2: - return True # path with two nodes is always straight - + return True # path with two nodes is always straight + # check if the intermediate nodes have the right number of neighbours for intermediate_node in path[1:-1]: - if len(set(neighbours( - network, - intermediate_node, - ignore_self_loops=ignore_self_loops)) - ) != 2: - # the path is not straight if the intermediate nodes do not have + if ( + len( + set( + neighbours( + network, intermediate_node, ignore_self_loops=ignore_self_loops + ) + ) + ) + != 2 + ): + # the path is not straight if the intermediate nodes do not have # two distinct neighbours - return False - + return False + # if all intermediate nodes have two neighbours, return True return True + # ***************************************************************************** # ***************************************************************************** -def find_simplifiable_paths(network: nx.MultiDiGraph, - excluded_nodes: list, - ignore_self_loops: bool = False, - consider_reversed_edges: bool = False, - include_both_directions: bool = False) -> list: + +def find_simplifiable_paths( + network: nx.MultiDiGraph, + excluded_nodes: list, + ignore_self_loops: bool = False, + consider_reversed_edges: bool = False, + include_both_directions: bool = False, +) -> list: """ Enumerates the simplifiable paths found in a given graph. - + A path is defined to be simplifiable if it presents no options along it and involves at least three different nodes: two-node paths are straight but are not simplifiable, with or without reversed edges. @@ -1075,7 +1113,7 @@ def find_simplifiable_paths(network: nx.MultiDiGraph, paths including self-loops cannot be straight. The default is False. consider_reversed_edges : bool, optional If True, a straight path can include nodes connected in reverse. If - False, only edges in the stated direction will be considered. The + False, only edges in the stated direction will be considered. The default is False. include_both_directions : bool, optional If True, and if reverse edges are allowed, simplifiable paths will @@ -1087,107 +1125,102 @@ def find_simplifiable_paths(network: nx.MultiDiGraph, A list of the straight paths in the graph. """ - + # a straight path is a path where the intermediate nodes (all excluding the # first and the last) have to exist and have exactly 2 distinct neighbours - + # ************************************************************************* # ************************************************************************* - + # locate all the non-excluded nodes that can form straight paths - - intermediate_candidate_nodes = set([ - node_key - for node_key in network.nodes() - # the node cannot be among those excluded - if node_key not in excluded_nodes - # the node has to be linked to two other nodes other than itself - if len(set( - neighbours( - network, - node_key, - ignore_self_loops=True - ) - )) == 2 - # exclude nodes with self-loops if desired: - # 1) self-loops are tolerated (no need to check) - # 2) self-loops are not tolerated and they do not exist - if (ignore_self_loops or - (not ignore_self_loops and - not network.has_edge(node_key, node_key))) - ]) - + + intermediate_candidate_nodes = set( + [ + node_key + for node_key in network.nodes() + # the node cannot be among those excluded + if node_key not in excluded_nodes + # the node has to be linked to two other nodes other than itself + if len(set(neighbours(network, node_key, ignore_self_loops=True))) == 2 + # exclude nodes with self-loops if desired: + # 1) self-loops are tolerated (no need to check) + # 2) self-loops are not tolerated and they do not exist + if ( + ignore_self_loops + or (not ignore_self_loops and not network.has_edge(node_key, node_key)) + ) + ] + ) + # ************************************************************************* - + # find out paths around the nodes identified - + list_paths = [] list_paths_nodes = [] list_nodes_joined = set([]) - - # try to form paths around the candidate nodes + + # try to form paths around the candidate nodes for candidate_node in intermediate_candidate_nodes: # skip if the node is already in a path if candidate_node in list_nodes_joined: continue - + # select method if consider_reversed_edges: # reversed edges are accepted new_sequence = _find_path_direction_insensitive( network, - list_valid_nodes=intermediate_candidate_nodes-list_nodes_joined, + list_valid_nodes=intermediate_candidate_nodes - list_nodes_joined, start_node=candidate_node, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) else: # reversed edges are not accepted new_sequence = _find_path_direction_sensitive( network, - list_valid_nodes=intermediate_candidate_nodes-list_nodes_joined, + list_valid_nodes=intermediate_candidate_nodes - list_nodes_joined, start_node=candidate_node, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) # make sure the sequence is not redundant - if (len(new_sequence) <= 2 or - new_sequence in list_paths): + if len(new_sequence) <= 2 or new_sequence in list_paths: # path is just one edge or has already been included continue - - # add the path + + # add the path list_paths.append(new_sequence) list_paths_nodes.append(set(new_sequence)) if consider_reversed_edges and include_both_directions: - # directions do not matter: + # directions do not matter: list_paths.append(new_sequence[::-1]) - # update the list of intermediate nodes already on paths + # update the list of intermediate nodes already on paths list_nodes_joined.update(set(new_sequence[1:-1])) - + # ************************************************************************* # ************************************************************************* - + return list_paths # ************************************************************************* # ************************************************************************* - + + # ***************************************************************************** # ***************************************************************************** - + + def _find_path_direction_sensitive( - network: nx.MultiDiGraph, - list_valid_nodes: list, - start_node, - ignore_self_loops: bool - ) -> list: - - def find_path_forward(network: nx.MultiDiGraph, - current_node, - path: list): + network: nx.MultiDiGraph, + list_valid_nodes: list, + start_node, + ignore_self_loops: bool, +) -> list: + def find_path_forward(network: nx.MultiDiGraph, current_node, path: list): # identify the last node's neighbours current_neighbours = set( neighbours(network, current_node, ignore_self_loops=True) - ) + ) # check each neighbour for a_neighbour in current_neighbours: # check the direction of edge towards the neighbour @@ -1206,12 +1239,8 @@ def _find_path_direction_sensitive( # add the neighbour to the end of the path path.append(a_neighbour) # recursive call with extended path - return find_path_forward( - network, - path[-1], - path - ) - else: # is not a valid node: path ends + return find_path_forward(network, path[-1], path) + else: # is not a valid node: path ends # add the neighbour to the end of the path: path.append(a_neighbour) # return the path @@ -1221,32 +1250,38 @@ def _find_path_direction_sensitive( # neighbour is already on the path, matches the start, # and has two neighbours other than itself: # close the loop and return the path - if (len(set(neighbours( - network, - a_neighbour, - ignore_self_loops=ignore_self_loops))) == 2): + if ( + len( + set( + neighbours( + network, + a_neighbour, + ignore_self_loops=ignore_self_loops, + ) + ) + ) + == 2 + ): # add the neighbour to the end of the path: path.append(a_neighbour) # return the path return path # all neighbours have been visited: return the current path return path - - def find_path_backward(network: nx.MultiDiGraph, - current_node, - path: list): + + def find_path_backward(network: nx.MultiDiGraph, current_node, path: list): # identify the last node's neighbours current_neighbours = set( neighbours(network, current_node, ignore_self_loops=True) - ) + ) # check each neighbour - # 1) if the neighbour is ahead and is a valid node: + # 1) if the neighbour is ahead and is a valid node: # >> recursion w/ neighbour and then return the final path - # 2) if the neighbour is ahead and is not a valid node: + # 2) if the neighbour is ahead and is not a valid node: # >> add it to the path and then return it - # 3) if the neighbour is not ahead and is on the path: + # 3) if the neighbour is not ahead and is on the path: # >> continue, namely to check the other neighbour - # 4) if the neighbour is not ahead and is not on the path: + # 4) if the neighbour is not ahead and is not on the path: # >> add it to the beginning of the path and continue # check each neighbour for a_neighbour in current_neighbours: @@ -1266,12 +1301,8 @@ def _find_path_direction_sensitive( # add the neighbour to the start of the path path.insert(0, a_neighbour) # recursive call with extended path - return find_path_backward( - network, - path[0], - path - ) - else: # is not a valid node: path ends + return find_path_backward(network, path[0], path) + else: # is not a valid node: path ends # add the neighbour to the start of the path path.insert(0, a_neighbour) # return the path @@ -1282,8 +1313,8 @@ def _find_path_direction_sensitive( # and has two neighbours other than itself: # close the loop and return the path # if (len(set(neighbours( - # network, - # a_neighbour, + # network, + # a_neighbour, # ignore_self_loops=ignore_self_loops))) == 2): # # add the neighbour to the start of the path # path.insert(0, a_neighbour) @@ -1294,47 +1325,37 @@ def _find_path_direction_sensitive( return path # all neighbours have been visited: return the current path return path - + # ************************************************************************* - + # find the path forward, check for cycles and then find the path backwards # find the forward path segment - path = find_path_forward( - network, - start_node, - [start_node] - ) + path = find_path_forward(network, start_node, [start_node]) # cycles have to be detected on the first try - if len(path) >= 3 and path[0] == path[-1]: + if len(path) >= 3 and path[0] == path[-1]: # it is a cycle: no need to search backwards return path # find the backward path segment - return find_path_backward( - network, - path[0], - path - ) - + return find_path_backward(network, path[0], path) + # ************************************************************************* + # ***************************************************************************** # ***************************************************************************** - + + def _find_path_direction_insensitive( - network: nx.MultiDiGraph, - list_valid_nodes: list, - start_node, - ignore_self_loops: bool - ) -> list: - - def find_path_forward(network: nx.MultiDiGraph, - current_node, - path: list): - + network: nx.MultiDiGraph, + list_valid_nodes: list, + start_node, + ignore_self_loops: bool, +) -> list: + def find_path_forward(network: nx.MultiDiGraph, current_node, path: list): # identify the last node's neighbours current_neighbours = set( neighbours(network, current_node, ignore_self_loops=True) - ) + ) # check each neighbour for a_neighbour in current_neighbours: # check the direction of edge towards the neighbour: @@ -1351,12 +1372,8 @@ def _find_path_direction_insensitive( # add the neighbour to the end of the path path.append(a_neighbour) # recursive call with extended path - return find_path_forward( - network, - path[-1], - path - ) - else: # is not a valid node: path ends + return find_path_forward(network, path[-1], path) + else: # is not a valid node: path ends # add the neighbour to the end of the path: path.append(a_neighbour) # return the path @@ -1366,35 +1383,40 @@ def _find_path_direction_insensitive( # neighbour is already on the path, matches the start, # and has two neighbours other than itself: # close the loop and return the path - if (len(set(neighbours( - network, - a_neighbour, - ignore_self_loops=ignore_self_loops))) == 2): + if ( + len( + set( + neighbours( + network, + a_neighbour, + ignore_self_loops=ignore_self_loops, + ) + ) + ) + == 2 + ): # add the neighbour to the end of the path: path.append(a_neighbour) # return the path return path # all neighbours have been visited: return the current path return path - - def find_path_backward(network: nx.MultiDiGraph, - current_node, - path: list): - + + def find_path_backward(network: nx.MultiDiGraph, current_node, path: list): # identify the last node's neighbours current_neighbours = set( neighbours(network, current_node, ignore_self_loops=True) - ) + ) # check each neighbour - # 1) if the neighbour is ahead and is a valid node: + # 1) if the neighbour is ahead and is a valid node: # >> recursion w/ neighbour and then return the final path - # 2) if the neighbour is ahead and is not a valid node: + # 2) if the neighbour is ahead and is not a valid node: # >> add it to the path and then return it - # 3) if the neighbour is not ahead and is on the path: + # 3) if the neighbour is not ahead and is on the path: # >> continue, namely to check the other neighbour - # 4) if the neighbour is not ahead and is not on the path: + # 4) if the neighbour is not ahead and is not on the path: # >> add it to the beginning of the path and continue - + # check each neighbour for a_neighbour in current_neighbours: # check the direction of edge towards the neighbour @@ -1411,12 +1433,8 @@ def _find_path_direction_insensitive( # add the neighbour to the start of the path path.insert(0, a_neighbour) # recursive call with extended path - return find_path_backward( - network, - path[0], - path - ) - else: # is not a valid node: path ends + return find_path_backward(network, path[0], path) + else: # is not a valid node: path ends # add the neighbour to the start of the path path.insert(0, a_neighbour) # return the path @@ -1427,8 +1445,8 @@ def _find_path_direction_insensitive( # and has two neighbours other than itself: # close the loop and return the path # if (len(set(neighbours( - # network, - # a_neighbour, + # network, + # a_neighbour, # ignore_self_loops=ignore_self_loops))) == 2): # # add the neighbour to the start of the path # path.insert(0, a_neighbour) @@ -1440,33 +1458,27 @@ def _find_path_direction_insensitive( return path # all neighbours have been visited: return the current path return path - + # ************************************************************************* - + # find the path forward, check for cycles and then find the path backwards # explore paths in the forward sense - path = find_path_forward( - network, - start_node, - [start_node] - ) + path = find_path_forward(network, start_node, [start_node]) # check for cycles - if len(path) >= 3 and path[0] == path[-1]: + if len(path) >= 3 and path[0] == path[-1]: # it is a cycle: no need to search backwards return path # explore paths in the backward sense and return the path - return find_path_backward( - network, - path[0], - path - ) - + return find_path_backward(network, path[0], path) + # ************************************************************************* # ************************************************************************* + # ***************************************************************************** # ***************************************************************************** + def find_self_loops(network: nx.MultiDiGraph) -> list: """ Returns a list with the nodes that connect to themselves. @@ -1484,18 +1496,20 @@ def find_self_loops(network: nx.MultiDiGraph) -> list: """ return ( - node_key + node_key for node_key in network.nodes() if network.has_edge(u=node_key, v=node_key) - ) + ) + # ***************************************************************************** # ***************************************************************************** + def find_unconnected_nodes(network: nx.MultiDiGraph) -> list: """ Returns a list with the nodes that are not connected whilst in the network. - + The method is meant to be used with MultiDiGraph objects. Parameters @@ -1509,22 +1523,24 @@ def find_unconnected_nodes(network: nx.MultiDiGraph) -> list: The list of unconnected nodes. """ - + return [ - node_key + node_key for node_key in network.nodes() if len(tuple(network.neighbors(node_key))) == 0 - ] + ] + # ***************************************************************************** # ***************************************************************************** -def nearest_nodes_other_than_themselves(network: nx.MultiDiGraph, - node_keys: list, - return_dist: bool = False) -> list: + +def nearest_nodes_other_than_themselves( + network: nx.MultiDiGraph, node_keys: list, return_dist: bool = False +) -> list: """ Returns a list with the keys of nodes closest to another set of nodes. - + The method relies on osmnx\'s nearest_nodes method. Parameters @@ -1540,38 +1556,38 @@ def nearest_nodes_other_than_themselves(network: nx.MultiDiGraph, Returns ------- nn or (nn, dist) : int/list or tuple - nearest node IDs or optionally a tuple where dist contains distances + nearest node IDs or optionally a tuple where dist contains distances between the points and their nearest nodes (note: from nearest_nodes). """ - + # create a copy of the network - + network_copy = network.copy() - + # remove selected node keys from the copy - + network_copy.remove_nodes_from(node_keys) - + # use nearest_nodes from osmnx to find the nearest nodes in the copy - + nearest_node_keys = nearest_nodes( - network_copy, - [network.nodes[node_key]['x'] - for node_key in node_keys], - [network.nodes[node_key]['y'] - for node_key in node_keys], + network_copy, + [network.nodes[node_key]["x"] for node_key in node_keys], + [network.nodes[node_key]["y"] for node_key in node_keys], return_dist=return_dist, - ) - + ) + return nearest_node_keys + # ***************************************************************************** # ***************************************************************************** -def is_start_or_end_point_or_close(line: LineString, - point: Point, - tolerance: float = 1e-3) -> bool: + +def is_start_or_end_point_or_close( + line: LineString, point: Point, tolerance: float = 1e-3 +) -> bool: """ Returns True if a given point is near the start or end points of a line. @@ -1591,87 +1607,85 @@ def is_start_or_end_point_or_close(line: LineString, end point. """ - + # get the start and end points - + start_coords, *_, end_coords = line.coords - + # # compare the coordinates - - # if (tuple(point.coords)[0] == start_coords or + + # if (tuple(point.coords)[0] == start_coords or # tuple(point.coords)[0] == end_coords): - + # return True - + # compare with the start point - + start_point = Point(start_coords) - + if start_point.distance(point) <= tolerance: - return True - + # compare with the end point - + end_point = Point(end_coords) - + if end_point.distance(point) <= tolerance: - return True - + # return statement - + return False + # ***************************************************************************** # ***************************************************************************** -def is_start_or_end_point(line: LineString, - point: Point) -> bool: + +def is_start_or_end_point(line: LineString, point: Point) -> bool: """ Returns True if a given point is the start or end point of a line. - + Parameters ---------- line : LineString The object describing the line. point : Point The point under consideration. - + Returns ------- bool A boolean indicating whether the point is the start or end points. - + """ - + # get the start and end points - + start_coords, *_, end_coords = line.coords - + # compare the coordinates - - if (tuple(point.coords)[0] == start_coords or - tuple(point.coords)[0] == end_coords): - + + if tuple(point.coords)[0] == start_coords or tuple(point.coords)[0] == end_coords: return True - + # return statement - + return False + # ***************************************************************************** # ***************************************************************************** + def identify_edge_closest_to_node( - network: nx.MultiDiGraph, - node_keys: list, - crs: str = None) -> Tuple[list, nx.MultiDiGraph]: + network: nx.MultiDiGraph, node_keys: list, crs: str = None +) -> Tuple[list, nx.MultiDiGraph]: """ Identify the edges that are closest to a given set of nodes. - + The network object should formatted according to OSMnx standards. - + Distances are calculated using projected coordinates, unless a specific coordinate system is given. @@ -1682,7 +1696,7 @@ def identify_edge_closest_to_node( node_keys : list A list of keys corresponding to the nodes under consideration. crs : str, optional - The CRS under which the operation is to be done. The default is None. + The CRS under which the operation is to be done. The default is None. If None, the CRS is determined automatically via the OSMnx library. Returns @@ -1693,42 +1707,44 @@ def identify_edge_closest_to_node( The object for the projected network. """ - + # ************************************************************************* - - # 1) ensure that the network crs is correct and convert if not + + # 1) ensure that the network crs is correct and convert if not # 2) identify the edges that are nearest to the nodes # 3) revert network crs back to the original, if necessary # ************************************************************************* - + # if it is a geographic CRS, convert it to a projected CRS - - if not is_projected(network.graph['crs']) or type(crs) != type(None): - + + if not is_projected(network.graph["crs"]) or type(crs) != type(None): # convert to a projected CRS (including if crs=None) - + projected_network = project_graph(network, to_crs=crs) - + else: - projected_network = network # ************************************************************************* - + # 2) identify the edges that are nearest to the nodes - + nearest_edge_keys = nearest_edges( - projected_network, - X=[projected_network.nodes[node_key][osm.KEY_OSMNX_X] - for node_key in node_keys], - Y=[projected_network.nodes[node_key][osm.KEY_OSMNX_Y] - for node_key in node_keys], - return_dist=False) - + projected_network, + X=[ + projected_network.nodes[node_key][osm.KEY_OSMNX_X] for node_key in node_keys + ], + Y=[ + projected_network.nodes[node_key][osm.KEY_OSMNX_Y] for node_key in node_keys + ], + return_dist=False, + ) + # return statement - + return nearest_edge_keys, projected_network + # ***************************************************************************** # ***************************************************************************** diff --git a/src/topupopt/data/gis/modify.py b/src/topupopt/data/gis/modify.py index e8cad22..6e39c27 100644 --- a/src/topupopt/data/gis/modify.py +++ b/src/topupopt/data/gis/modify.py @@ -19,6 +19,7 @@ from ...problems.esipp.utils import unused_node_key from ..misc.utils import generate_pseudo_unique_key from ..gis import osm from ..gis import identify as gis_iden + # from ..gis import identify as gis_calc from .identify import close_to_extremities from .calculate import update_street_count, edge_lengths @@ -26,6 +27,7 @@ from .calculate import update_street_count, edge_lengths # ***************************************************************************** # ***************************************************************************** + def remove_self_loops(network: nx.MultiDiGraph): """ Removes self-loops from a directed graph defined in a MultiDiGraph object. @@ -41,22 +43,24 @@ def remove_self_loops(network: nx.MultiDiGraph): The keys to the nodes whose self-loops were removed. """ - + selflooping_nodes = list(gis_iden.find_self_loops(network)) for node in selflooping_nodes: while network.has_edge(u=node, v=node): network.remove_edge(u=node, v=node) return selflooping_nodes + # ***************************************************************************** # ***************************************************************************** + def transform_roundabouts_into_crossroads( - network: nx.MultiDiGraph, - roundabouts: list) -> list: + network: nx.MultiDiGraph, roundabouts: list +) -> list: """ Transform roundabouts into crossroads. - + If there are roundabouts that encompass other roundabouts, the latter will be ignored. @@ -73,10 +77,10 @@ def transform_roundabouts_into_crossroads( A list of the node keys for the intersections created. """ - + # declare the output list list_roundabout_centroids = [] - + # for each roundabout for roundabout in roundabouts: # make sure roundabout qualifies as a roundabout @@ -101,52 +105,53 @@ def transform_roundabouts_into_crossroads( # break out of the loop break if roundabout_overlaps: - # the roundabout overlaps with some other one, skip it - list_roundabout_centroids.append(None) - continue - + # the roundabout overlaps with some other one, skip it + list_roundabout_centroids.append(None) + continue + # ********************************************************************* # ********************************************************************* # create a new node whose location is the roundabout's centroid list_point_coordinates = [ - (network.nodes[node_key][osm.KEY_OSMNX_X], - network.nodes[node_key][osm.KEY_OSMNX_Y]) + ( + network.nodes[node_key][osm.KEY_OSMNX_X], + network.nodes[node_key][osm.KEY_OSMNX_Y], + ) for node_key in roundabout - ] + ] new_geo = LineString(list_point_coordinates) roundabout_centroid_key = generate_pseudo_unique_key(network) network.add_node( - roundabout_centroid_key, - **{osm.KEY_OSMNX_X: new_geo.centroid.coords.xy[0][0], - osm.KEY_OSMNX_Y: new_geo.centroid.coords.xy[1][0]} - ) + roundabout_centroid_key, + **{ + osm.KEY_OSMNX_X: new_geo.centroid.coords.xy[0][0], + osm.KEY_OSMNX_Y: new_geo.centroid.coords.xy[1][0], + } + ) list_roundabout_centroids.append(roundabout_centroid_key) # ********************************************************************* # ********************************************************************* - # create new edges to link each node leading to the roundabout to the + # create new edges to link each node leading to the roundabout to the # node just created (new_node_key) # find the edges leading to the roundabout list_edges_leading_to_roundabout = [ edge_key # for each node in the roundabout - for node_key in roundabout + for node_key in roundabout # for each neighbouring nodes - for other_node_key in gis_iden.neighbours(network, node_key) + for other_node_key in gis_iden.neighbours(network, node_key) # if it is not in the roundabout itself - if other_node_key not in roundabout + if other_node_key not in roundabout # for each edge between the two nodes for edge_key in gis_iden.get_edges_between_two_nodes( - network, - node_key, - other_node_key) - ] + network, node_key, other_node_key + ) + ] # for each edge leading to the roundabout for edge_key in list_edges_leading_to_roundabout: # replace it with a new edge to the new node # get edge dict - edge_dict = network.get_edge_data(edge_key[0], - edge_key[1], - edge_key[2]) + edge_dict = network.get_edge_data(edge_key[0], edge_key[1], edge_key[2]) if osm.KEY_OSMNX_GEOMETRY in edge_dict: # geometry exists old_geometry = edge_dict[osm.KEY_OSMNX_GEOMETRY] @@ -154,25 +159,31 @@ def transform_roundabouts_into_crossroads( # geometry does not exist # create it old_geometry = LineString( - [(network.nodes[edge_key[0]][osm.KEY_OSMNX_X], - network.nodes[edge_key[0]][osm.KEY_OSMNX_Y]), - (network.nodes[edge_key[1]][osm.KEY_OSMNX_X], - network.nodes[edge_key[1]][osm.KEY_OSMNX_Y])] - ) + [ + ( + network.nodes[edge_key[0]][osm.KEY_OSMNX_X], + network.nodes[edge_key[0]][osm.KEY_OSMNX_Y], + ), + ( + network.nodes[edge_key[1]][osm.KEY_OSMNX_X], + network.nodes[edge_key[1]][osm.KEY_OSMNX_Y], + ), + ] + ) # if osm.KEY_OSMNX_LENGTH in edge_dict: # # length exists # old_length = edge_dict[osm.KEY_OSMNX_LENGTH] # else: # # length does not exist # old_length = edge_lengths( - # network, + # network, # edge_keys=[edge_key])[edge_key] # the old length has to exist old_length = edge_dict[osm.KEY_OSMNX_LENGTH] - + # ***************************************************************** # ***************************************************************** - + # find closest point if edge_key[0] in roundabout: # this edge starts from the roundabout @@ -181,23 +192,27 @@ def transform_roundabouts_into_crossroads( # create geometry object between old roundabout point to the # roundabout's centroid extra_geometry = LineString( - [(network.nodes[roundabout_centroid_key][osm.KEY_OSMNX_X], - network.nodes[roundabout_centroid_key][osm.KEY_OSMNX_Y]), - (network.nodes[edge_key[0]][osm.KEY_OSMNX_X], - network.nodes[edge_key[0]][osm.KEY_OSMNX_Y])] - ) - if is_projected(network.graph['crs']): + [ + ( + network.nodes[roundabout_centroid_key][osm.KEY_OSMNX_X], + network.nodes[roundabout_centroid_key][osm.KEY_OSMNX_Y], + ), + ( + network.nodes[edge_key[0]][osm.KEY_OSMNX_X], + network.nodes[edge_key[0]][osm.KEY_OSMNX_Y], + ), + ] + ) + if is_projected(network.graph["crs"]): # projected graph: use direct method extra_length = length(extra_geometry) - else: # unprojected graph: use great circle method + else: # unprojected graph: use great circle method extra_length = great_circle( - lat1=network.nodes[ - roundabout_centroid_key][osm.KEY_OSMNX_Y], - lon1=network.nodes[ - roundabout_centroid_key][osm.KEY_OSMNX_X], - lat2=network.nodes[edge_key[0]][osm.KEY_OSMNX_Y], - lon2=network.nodes[edge_key[0]][osm.KEY_OSMNX_X] - ) + lat1=network.nodes[roundabout_centroid_key][osm.KEY_OSMNX_Y], + lon1=network.nodes[roundabout_centroid_key][osm.KEY_OSMNX_X], + lat2=network.nodes[edge_key[0]][osm.KEY_OSMNX_Y], + lon2=network.nodes[edge_key[0]][osm.KEY_OSMNX_X], + ) elif edge_key[1] in roundabout: # this edge ends in the roundabout new_edge_start_node = edge_key[0] @@ -205,41 +220,43 @@ def transform_roundabouts_into_crossroads( # create geometry object between old roundabout point to the # roundabout's centroid extra_geometry = LineString( - [(network.nodes[roundabout_centroid_key][osm.KEY_OSMNX_X], - network.nodes[roundabout_centroid_key][osm.KEY_OSMNX_Y]), - (network.nodes[edge_key[1]][osm.KEY_OSMNX_X], - network.nodes[edge_key[1]][osm.KEY_OSMNX_Y])] - ) - if is_projected(network.graph['crs']): + [ + ( + network.nodes[roundabout_centroid_key][osm.KEY_OSMNX_X], + network.nodes[roundabout_centroid_key][osm.KEY_OSMNX_Y], + ), + ( + network.nodes[edge_key[1]][osm.KEY_OSMNX_X], + network.nodes[edge_key[1]][osm.KEY_OSMNX_Y], + ), + ] + ) + if is_projected(network.graph["crs"]): # projected graph, use direct method extra_length = length(extra_geometry) else: # unprojected graph, use great circle method extra_length = great_circle( - lat1=network.nodes[ - roundabout_centroid_key][osm.KEY_OSMNX_Y], - lon1=network.nodes[ - roundabout_centroid_key][osm.KEY_OSMNX_X], - lat2=network.nodes[edge_key[1]][osm.KEY_OSMNX_Y], - lon2=network.nodes[edge_key[1]][osm.KEY_OSMNX_X] - ) - + lat1=network.nodes[roundabout_centroid_key][osm.KEY_OSMNX_Y], + lon1=network.nodes[roundabout_centroid_key][osm.KEY_OSMNX_X], + lat2=network.nodes[edge_key[1]][osm.KEY_OSMNX_Y], + lon2=network.nodes[edge_key[1]][osm.KEY_OSMNX_X], + ) + # ***************************************************************** # ***************************************************************** - + edge_dict[osm.KEY_OSMNX_GEOMETRY] = linemerge( - [old_geometry, - extra_geometry]) - edge_dict[osm.KEY_OSMNX_LENGTH] = old_length+extra_length - network.add_edge(new_edge_start_node, - new_edge_end_node, - **edge_dict) - + [old_geometry, extra_geometry] + ) + edge_dict[osm.KEY_OSMNX_LENGTH] = old_length + extra_length + network.add_edge(new_edge_start_node, new_edge_end_node, **edge_dict) + # ************************************************************************* # ************************************************************************* - + # remove the roundabout nodes - + for roundabout_index, roundabout in enumerate(roundabouts): # if the transformation of the roundabout was successful... if list_roundabout_centroids[roundabout_index] != None: @@ -247,21 +264,23 @@ def transform_roundabouts_into_crossroads( network.remove_nodes_from(roundabout) # return return list_roundabout_centroids - + # ************************************************************************* # ************************************************************************* - + + # ***************************************************************************** # ***************************************************************************** # TODO: develop algorithm to traverse the graph in search of dead ends -def remove_dead_ends(network: nx.MultiDiGraph, - keepers: tuple = None, - max_iterations: int = 1) -> list: + +def remove_dead_ends( + network: nx.MultiDiGraph, keepers: tuple = None, max_iterations: int = 1 +) -> list: """ Removes dead ends (non-cyclical branches) from a directed graph. - + The removal process is iterative and nodes that are initially not dead ends may be removed in subsequent iterations. @@ -281,10 +300,10 @@ def remove_dead_ends(network: nx.MultiDiGraph, A list of the keys for the nodes that were removed. """ - + if type(keepers) == type(None): keepers = [] - + # while true nodes_removed = [] iteration_counter = 0 @@ -296,12 +315,9 @@ def remove_dead_ends(network: nx.MultiDiGraph, for node_key in network.nodes() if node_key not in keepers # if it has at most one neighbour other than itself - if len(set(gis_iden.neighbours( - network, - node_key, - ignore_self_loops=True) - )) <= 1 - ] + if len(set(gis_iden.neighbours(network, node_key, ignore_self_loops=True))) + <= 1 + ] # if there no nodes meeting those conditions, break out of loop if len(target_nodes) == 0: break @@ -312,27 +328,26 @@ def remove_dead_ends(network: nx.MultiDiGraph, iteration_counter += 1 # store the nodes removed nodes_removed.extend(target_nodes) - # return the list of nodes removed + # return the list of nodes removed return nodes_removed - + + # ***************************************************************************** # ***************************************************************************** -def replace_path( - network: nx.MultiDiGraph, - path: list - ) -> tuple: + +def replace_path(network: nx.MultiDiGraph, path: list) -> tuple: """ Replaces a simplifiable path with one equivalent edge linking both ends. - + If there are parallel or anti-parallel edges along the path, only one will be used to create the new edge. - + There should only be one edge between each of the nodes on the path, since only one will be used to create the new edge. By default, the edges between the nodes should be in the forward direction, but this restriction can be lifted. In that case, the edges between the nodes can be in any direction. - + The intermediate nodes on the path will be removed. Parameters @@ -353,69 +368,65 @@ def replace_path( The key for the new edge. """ - + # ************************************************************************* - + # make sure path it is a simplifiable path if not gis_iden.is_path_straight( - network, - path, - consider_reversed_edges=True, - ignore_self_loops=True - ): - raise ValueError('The path cannot be simplified.') - + network, path, consider_reversed_edges=True, ignore_self_loops=True + ): + raise ValueError("The path cannot be simplified.") + # ************************************************************************* - + # create the new edge - + # create the geometry/linestring list_oneway = [] list_reversed = [] list_osmid = [] list_geometries = [] - + edge_length = 0 - - for node_pair_index in range(len(path)-1): + + for node_pair_index in range(len(path) - 1): # get one edge for this node pair - edge_key = list(gis_iden.get_edges_between_two_nodes( - network, - path[node_pair_index], - path[node_pair_index+1] - )) + edge_key = list( + gis_iden.get_edges_between_two_nodes( + network, path[node_pair_index], path[node_pair_index + 1] + ) + ) edge_key = sorted( - (network.edges[_key][osm.KEY_OSMNX_LENGTH], _key) - for _key in edge_key - )[0][1] + (network.edges[_key][osm.KEY_OSMNX_LENGTH], _key) for _key in edge_key + )[0][1] if osm.KEY_OSMNX_GEOMETRY in network.edges[edge_key]: # geometry exists: possibly a composite geometry # check if the geometry is consistent with the edge declaration if gis_iden.is_edge_consistent_with_geometry(network, edge_key): # the geometry is not reversed - list_geometries.append( - network.edges[edge_key][osm.KEY_OSMNX_GEOMETRY] - ) - else: # the geometry is reversed + list_geometries.append(network.edges[edge_key][osm.KEY_OSMNX_GEOMETRY]) + else: # the geometry is reversed list_geometries.append( network.edges[edge_key][osm.KEY_OSMNX_GEOMETRY].reverse() - ) + ) else: # geometry does not exist: direct path # the edge is not reversed: use it as is list_geometries.append( LineString( - [(network.nodes[ - path[node_pair_index]][osm.KEY_OSMNX_X], - network.nodes[ - path[node_pair_index]][osm.KEY_OSMNX_Y]), - (network.nodes[ - path[node_pair_index+1]][osm.KEY_OSMNX_X], - network.nodes[ - path[node_pair_index+1]][osm.KEY_OSMNX_Y])] - ) + [ + ( + network.nodes[path[node_pair_index]][osm.KEY_OSMNX_X], + network.nodes[path[node_pair_index]][osm.KEY_OSMNX_Y], + ), + ( + network.nodes[path[node_pair_index + 1]][osm.KEY_OSMNX_X], + network.nodes[path[node_pair_index + 1]][osm.KEY_OSMNX_Y], + ), + ] ) - + ) + # osmid if type(network.edges[edge_key][osm.KEY_OSMNX_OSMID]) == list: list_osmid.extend(network.edges[edge_key][osm.KEY_OSMNX_OSMID]) @@ -423,29 +434,21 @@ def replace_path( list_osmid.append(network.edges[edge_key][osm.KEY_OSMNX_OSMID]) # reversed if type(network.edges[edge_key][osm.KEY_OSMNX_REVERSED]) == list: - list_reversed.extend( - network.edges[edge_key][osm.KEY_OSMNX_REVERSED] - ) + list_reversed.extend(network.edges[edge_key][osm.KEY_OSMNX_REVERSED]) else: - list_reversed.append( - network.edges[edge_key][osm.KEY_OSMNX_REVERSED] - ) + list_reversed.append(network.edges[edge_key][osm.KEY_OSMNX_REVERSED]) # oneway if type(network.edges[edge_key][osm.KEY_OSMNX_ONEWAY]) == list: - list_oneway.extend( - network.edges[edge_key][osm.KEY_OSMNX_ONEWAY] - ) + list_oneway.extend(network.edges[edge_key][osm.KEY_OSMNX_ONEWAY]) else: - list_oneway.append( - network.edges[edge_key][osm.KEY_OSMNX_ONEWAY] - ) - + list_oneway.append(network.edges[edge_key][osm.KEY_OSMNX_ONEWAY]) + # update the edge length edge_length += network.edges[edge_key][osm.KEY_OSMNX_LENGTH] - + # merge the geometries new_geo = linemerge(list_geometries) - + # verify that it led to the creation of a linestring object if type(new_geo) != LineString: # TODO: make sure this is still relevant and add tests @@ -454,62 +457,59 @@ def replace_path( list_geometries = [] # snap each consecutive geometry pair in the MultiLineString object # since linemerge separates linestrings that are not contiguous - for geo_pair_index in range(len(new_geo.geoms)-1): + for geo_pair_index in range(len(new_geo.geoms) - 1): list_geometries.append( snap( new_geo.geoms[geo_pair_index], - new_geo.geoms[geo_pair_index+1], - tolerance=1e-3 - ) + new_geo.geoms[geo_pair_index + 1], + tolerance=1e-3, ) + ) new_geo = linemerge(list_geometries) - + # prepare edge data dict list_osmid = list(set(list_osmid)) list_oneway = list(set(list_oneway)) list_reversed = list(set(list_reversed)) # create the dict edge_dict = { - osm.KEY_OSMNX_LENGTH: edge_length, + osm.KEY_OSMNX_LENGTH: edge_length, osm.KEY_OSMNX_GEOMETRY: new_geo, osm.KEY_OSMNX_ONEWAY: ( list_oneway if len(list_oneway) != 1 else list_oneway[0] - ), + ), osm.KEY_OSMNX_REVERSED: ( list_reversed if len(list_reversed) != 1 else list_reversed[0] - ), - osm.KEY_OSMNX_OSMID: ( - list_osmid if len(list_osmid) != 1 else list_osmid[0] - ) - } - + ), + osm.KEY_OSMNX_OSMID: (list_osmid if len(list_osmid) != 1 else list_osmid[0]), + } + # ************************************************************************* - + # add edges - start_node = path[0] + start_node = path[0] end_node = path[-1] - + # create the forward edge - for_k = network.add_edge( - start_node, - end_node, - **edge_dict - ) - + for_k = network.add_edge(start_node, end_node, **edge_dict) + # delete all intermediate nodes network.remove_nodes_from(path[1:-1]) - + # return the edge key return (start_node, end_node, for_k) + # ***************************************************************************** # ***************************************************************************** -def remove_longer_parallel_edges(network: nx.MultiDiGraph, - ignore_edge_directions: bool = False) -> list: + +def remove_longer_parallel_edges( + network: nx.MultiDiGraph, ignore_edge_directions: bool = False +) -> list: """ Removes longer parallel edges from the network. - + Parallel edges are those connecting the same nodes in the same direction. If there are parallel edges between any given pair of nodes, the longer ones will be removed. If desired, edge directions can be ignored. In that case, @@ -526,73 +526,69 @@ def remove_longer_parallel_edges(network: nx.MultiDiGraph, A list of the edges removed. """ - + # redundancy: having more than one edge between two nodes # solution: remove the longest one, leave the shortest one - + # for each node pair - + edges_removed = [] - for node_one in network.nodes(): + for node_one in network.nodes(): for node_two in network.nodes(): # skip self-loops - if node_one == node_two: + if node_one == node_two: continue # get the edges between the two nodes - if ignore_edge_directions: # both directions + if ignore_edge_directions: # both directions list_edges = gis_iden.get_edges_between_two_nodes( - network, - node_one, - node_two - ) - else: # one direction + network, node_one, node_two + ) + else: # one direction list_edges = gis_iden.get_edges_from_a_to_b( - network, - node_start=node_one, - node_end=node_two - ) - + network, node_start=node_one, node_end=node_two + ) + # if none exist, skip if len(list_edges) == 0: continue - + # otherwise, find out which is the shortest one sorted_edges = sorted( (network.edges[edge_key][osm.KEY_OSMNX_LENGTH], edge_key) for edge_key in list_edges - ) - - network.remove_edges_from( - edge_tuple[1] for edge_tuple in sorted_edges[1:] - ) - + ) + + network.remove_edges_from(edge_tuple[1] for edge_tuple in sorted_edges[1:]) + edges_removed.extend(edge_tuple[1] for edge_tuple in sorted_edges[1:]) - + return edges_removed - + + # ***************************************************************************** # ***************************************************************************** - + + def merge_points_into_linestring( - line: LineString, - points: tuple or list, - tolerance: float = 7./3-4./3-1, - fixed_extremities: bool = True, - use_start_point_equidistant: bool = True - ) -> LineString: + line: LineString, + points: tuple or list, + tolerance: float = 7.0 / 3 - 4.0 / 3 - 1, + fixed_extremities: bool = True, + use_start_point_equidistant: bool = True, +) -> LineString: """ Merge points into a line where they are closest to it. - - The points are merged in succession and the line keeps changing as the + + The points are merged in succession and the line keeps changing as the points are merged but the original points remain on it. - + The points added do not need to be close to the line. The tolerance parameter is only used to determine closeness to the points already on the line. - + If a point is closest to a point on the line shared by two line segments, then the former is placed after the latter, and not before. - + If a point is closest to multiple line segments, then the point is placed between the start and end points of the first among the closest segments. @@ -607,7 +603,7 @@ def merge_points_into_linestring( The default is 7/3-4/3-1 (2.220446049250313e-16). fixed_extremities : bool, optional If False, the line can be extended beyond the original start and end - points. If not, points closest to the extremities are not merged and + points. If not, points closest to the extremities are not merged and are instead linked to the closest extremities. The default is True. use_start_point_equidistant : bool, optional If True, the start point is adopted as the closest point when the start @@ -624,128 +620,127 @@ def merge_points_into_linestring( A list with the indices of the points closest to the end point. """ - + if fixed_extremities: - # the line cannot be extended - + # identify which points are close to the start and end points # note: these points will not be merged - - (close_to_start, - close_to_end, - line_distances, - start_distances, - end_distances) = close_to_extremities( - line, + + ( + close_to_start, + close_to_end, + line_distances, + start_distances, + end_distances, + ) = close_to_extremities( + line, points, tolerance=tolerance, use_start_point_equidistant=use_start_point_equidistant, - return_distances=True - ) - + return_distances=True, + ) + # for each mew point - + for i in range(len(points)): - if i in close_to_start or i in close_to_end: - # the point is close to the start or end nodes: skip iteration - + continue - + if points[i].coords[0] in line.coords: - # this point is already on the line: skip iteration - + continue - + # merge the points that are between the start and end points... - + # create line segments for each pair of points on the line - + line_segments = [ - LineString([line.coords[j],line.coords[j+1]]) - for j in range(len(line.coords)-1) - ] - + LineString([line.coords[j], line.coords[j + 1]]) + for j in range(len(line.coords) - 1) + ] + # calculate the distances between point i and each point on the li. - + line_segment_distances = points[i].distance(line_segments) - + # locate the closest pair of points - + sorted_distances = sorted( - (line_segment_distance, j) - for j, line_segment_distance in enumerate( - line_segment_distances - ) - ) + (line_segment_distance, j) + for j, line_segment_distance in enumerate(line_segment_distances) + ) # prepare new line coordinates with the new point - - line_coords = list(line.coords) - - if (len(sorted_distances) >= 2 and - sorted_distances[0][0] == sorted_distances[1][0]): - + + line_coords = list(line.coords) + + if ( + len(sorted_distances) >= 2 + and sorted_distances[0][0] == sorted_distances[1][0] + ): # there are 2(+) segments that are equally close to the point - + # if the closest points are end/start points of a segment, then # place the point after the second point of the first segment - - if abs(Point( - line_segments[ - sorted_distances[0][1] - ].coords[-1] - ).distance(points[i])-line_distances[i]) <= tolerance: - + + if ( + abs( + Point( + line_segments[sorted_distances[0][1]].coords[-1] + ).distance(points[i]) + - line_distances[i] + ) + <= tolerance + ): line_coords.insert( - # sorted_distances[0][1]+1, - sorted_distances[0][1]+2, - tuple(points[i].coords[0]) - ) - + # sorted_distances[0][1]+1, + sorted_distances[0][1] + 2, + tuple(points[i].coords[0]), + ) + else: - line_coords.insert( - sorted_distances[0][1]+1, - # sorted_distances[0][1]+2, - tuple(points[i].coords[0]) - ) - + sorted_distances[0][1] + 1, + # sorted_distances[0][1]+2, + tuple(points[i].coords[0]), + ) + else: - # there is only segment with the minimum distance: # place the new point where the end point of the segment is - + line_coords.insert( - sorted_distances[0][1]+1, # i.e., the segment number + 1 - tuple(points[i].coords[0]) - ) - + sorted_distances[0][1] + 1, # i.e., the segment number + 1 + tuple(points[i].coords[0]), + ) + # create new line - + line = LineString(line_coords) - + else: - # the line can be extended - + raise NotImplementedError - + # return statement return line, close_to_start, close_to_end - + + # ***************************************************************************** # ***************************************************************************** - -def split_linestring(line: LineString, - points: list, - tolerance: float = 7./3-4./3-1): + + +def split_linestring( + line: LineString, points: list, tolerance: float = 7.0 / 3 - 4.0 / 3 - 1 +): """ Split a line into segments according to a set of cutting points. - + The cutting points should be close to the original line, since they will be merged into it before the cutting takes place. If they are not close to the line, the resulting segments will not resemble the original line. @@ -770,21 +765,18 @@ def split_linestring(line: LineString, A list with the indices of the points closest to the end point. """ - + # add the points to the linestring new_line, close_to_start, close_to_end = merge_points_into_linestring( - line=line, - points=points, - tolerance=tolerance, - fixed_extremities=True - ) - - if len(close_to_end)+len(close_to_start) == len(points): + line=line, points=points, tolerance=tolerance, fixed_extremities=True + ) + + if len(close_to_end) + len(close_to_start) == len(points): # no changes to the line (assuming no swaps) return [], close_to_start, close_to_end - - # split the linestring object (new_line) - line_segments = [] + + # split the linestring object (new_line) + line_segments = [] previous_split_index = 0 # for each point on the new line (they should be ordered) for coords_index, coords in enumerate(new_line.coords): @@ -795,57 +787,53 @@ def split_linestring(line: LineString, # it is a start or end point: skip the iteration # line_segments.append(None) continue - + # if it is not a start nor an end point, and it is on the original - # line, then - # if not a start nor an end point, build the segment between the + # line, then + # if not a start nor an end point, build the segment between the # previous split point and the current input point line_segments.append( - LineString( - new_line.coords[previous_split_index:coords_index+1] - ) - ) - + LineString(new_line.coords[previous_split_index : coords_index + 1]) + ) + # store new end/start point previous_split_index = coords_index - + # else: - + # # the point is not on the original line: split point - + # pass - + # next iteration - + # add the last segment - line_segments.append( - LineString( - new_line.coords[previous_split_index:] - ) - ) - + line_segments.append(LineString(new_line.coords[previous_split_index:])) + # return the geometries for each segment and the relevant points by order return line_segments, close_to_start, close_to_end - + # ************************************************************************* + # ***************************************************************************** # ***************************************************************************** -def recreate_edges(network: nx.MultiDiGraph, - points: dict, - tolerance: float = 7./3-4./3-1) -> tuple: + +def recreate_edges( + network: nx.MultiDiGraph, points: dict, tolerance: float = 7.0 / 3 - 4.0 / 3 - 1 +) -> tuple: """ Recreates OSMnx-type edges by splitting them into multiple smaller edges, which are defined by points along the original edge. - + If the points are closest to the extremities than other parts of an edge, no changes are introduced and the points become synonymous with the closest extremity (i.e., the start or end points). - + If the points are closest to other parts of an edge, the edge is split there with new nodes and edges being created to replace the original edge. - + Parameters ---------- network : nx.MultiDiGraph @@ -864,19 +852,18 @@ def recreate_edges(network: nx.MultiDiGraph, A dictionary keyed by edge and holding the keys for the edges that were created to replace the original edge. If a given edge was not recreated, its key does not appear in the dictionary. - + """ - + # declare outputs connection_node_keys_per_edge = {} - + recreated_edges = {} - + # for each edge that is to be split - + for edge_key, points_in_edge in points.items(): - # check if there is a geometry already if osm.KEY_OSMNX_GEOMETRY in network.edges[edge_key]: # get the geometry @@ -888,26 +875,31 @@ def recreate_edges(network: nx.MultiDiGraph, else: # there is not geometry, create it line = LineString( - [(network.nodes[edge_key[0]][osm.KEY_OSMNX_X], - network.nodes[edge_key[0]][osm.KEY_OSMNX_Y]), - (network.nodes[edge_key[1]][osm.KEY_OSMNX_X], - network.nodes[edge_key[1]][osm.KEY_OSMNX_Y])] - ) - + [ + ( + network.nodes[edge_key[0]][osm.KEY_OSMNX_X], + network.nodes[edge_key[0]][osm.KEY_OSMNX_Y], + ), + ( + network.nodes[edge_key[1]][osm.KEY_OSMNX_X], + network.nodes[edge_key[1]][osm.KEY_OSMNX_Y], + ), + ] + ) + # split the line into segments using the intermediate points # note: points that are close to the start and end points are handled # separately - + line_segments, close_to_start, close_to_end = split_linestring( - line=line, - points=points_in_edge - ) - + line=line, points=points_in_edge + ) + # link each point to a node key: # those closer to the start node: edge_key[0] # those closer to the end node: edge_key[1] # intermediate points: new node key via unused_node_key - + # _node_keys_by_point = { # points_in_edge[i]: ( # edge_key[0] if i in close_to_start else edge_key[1] @@ -916,136 +908,123 @@ def recreate_edges(network: nx.MultiDiGraph, # ) else unused_node_key(network) # for i in range(len(points_in_edge)) # } - + _node_keys_by_point = {} - + for i in range(len(points_in_edge)): if i in close_to_start or i in close_to_end: # point i is close to the extremities: use start/end node key _node_keys_by_point[points_in_edge[i]] = ( edge_key[0] if i in close_to_start else edge_key[1] - ) + ) else: # point i is not close to the extremities: new node key - _node_keys_by_point[ - points_in_edge[i] - ] = unused_node_key(network) + _node_keys_by_point[points_in_edge[i]] = unused_node_key(network) network.add_node(_node_keys_by_point[points_in_edge[i]]) - + # _node_keys = [ # (edge_key[0] if i in close_to_start else edge_key[1]) if ( # i in close_to_start or i in close_to_end # ) else unused_node_key(network) # for i in range(len(points_in_edge)) # ] - + # should be the same order as in the inputs - - _node_keys = [ - _node_keys_by_point[point] - for point in points_in_edge - ] - + + _node_keys = [_node_keys_by_point[point] for point in points_in_edge] + # ********************************************************************* - + # create new edges between the points to rebuild the edge - + segment_keys = [] - + edge_dict = dict(network.get_edge_data(*edge_key)) - + for line_index, line_segment in enumerate(line_segments): - edge_dict[osm.KEY_OSMNX_GEOMETRY] = line_segment - + if line_index == 0: - # initial segment - - v_key = _node_keys_by_point[ - Point(line_segment.coords[-1]) - ] - + + v_key = _node_keys_by_point[Point(line_segment.coords[-1])] + k_key = network.add_edge( - u_for_edge=edge_key[0], - v_for_edge=v_key, - **edge_dict - ) - + u_for_edge=edge_key[0], v_for_edge=v_key, **edge_dict + ) + network.add_node( - v_key, - **{osm.KEY_OSMNX_X: line_segment.coords[-1][0], - osm.KEY_OSMNX_Y: line_segment.coords[-1][1]} - ) - - segment_keys.append((edge_key[0],v_key,k_key)) - - elif line_index == len(line_segments)-1: - + v_key, + **{ + osm.KEY_OSMNX_X: line_segment.coords[-1][0], + osm.KEY_OSMNX_Y: line_segment.coords[-1][1], + } + ) + + segment_keys.append((edge_key[0], v_key, k_key)) + + elif line_index == len(line_segments) - 1: # final segment - - u_key = _node_keys_by_point[ - Point(line_segment.coords[0]) - ] - + + u_key = _node_keys_by_point[Point(line_segment.coords[0])] + k_key = network.add_edge( - u_for_edge=u_key, - v_for_edge=edge_key[1], - **edge_dict - ) - + u_for_edge=u_key, v_for_edge=edge_key[1], **edge_dict + ) + network.add_node( - u_key, - **{osm.KEY_OSMNX_X: line_segment.coords[0][0], - osm.KEY_OSMNX_Y: line_segment.coords[0][1]} - ) - - segment_keys.append((u_key,edge_key[1],k_key)) - - else: # intermediate segment - - u_key = _node_keys_by_point[ - Point(line_segment.coords[0]) - ] - - v_key = _node_keys_by_point[ - Point(line_segment.coords[-1]) - ] - + u_key, + **{ + osm.KEY_OSMNX_X: line_segment.coords[0][0], + osm.KEY_OSMNX_Y: line_segment.coords[0][1], + } + ) + + segment_keys.append((u_key, edge_key[1], k_key)) + + else: # intermediate segment + u_key = _node_keys_by_point[Point(line_segment.coords[0])] + + v_key = _node_keys_by_point[Point(line_segment.coords[-1])] + k_key = network.add_edge( - u_for_edge=u_key, - v_for_edge=v_key, - **edge_dict - ) - + u_for_edge=u_key, v_for_edge=v_key, **edge_dict + ) + network.add_node( - u_key, - **{osm.KEY_OSMNX_X: line_segment.coords[0][0], - osm.KEY_OSMNX_Y: line_segment.coords[0][1]} - ) - + u_key, + **{ + osm.KEY_OSMNX_X: line_segment.coords[0][0], + osm.KEY_OSMNX_Y: line_segment.coords[0][1], + } + ) + network.add_node( - v_key, - **{osm.KEY_OSMNX_X: line_segment.coords[-1][0], - osm.KEY_OSMNX_Y: line_segment.coords[-1][1]} - ) - - segment_keys.append((u_key,v_key,k_key)) - + v_key, + **{ + osm.KEY_OSMNX_X: line_segment.coords[-1][0], + osm.KEY_OSMNX_Y: line_segment.coords[-1][1], + } + ) + + segment_keys.append((u_key, v_key, k_key)) + # TODO: use network.add_edges_from() for performance? - + # TODO: try to create all the edges (with lengths included) in one go - + # calculate the lengths edge_lengths_by_dict = edge_lengths(network, edge_keys=segment_keys) network.add_edges_from( tuple( - (*segment_key, - {osm.KEY_OSMNX_LENGTH: edge_lengths_by_dict[segment_key]}) - for segment_key in segment_keys + ( + *segment_key, + {osm.KEY_OSMNX_LENGTH: edge_lengths_by_dict[segment_key]}, ) + for segment_key in segment_keys ) - + ) + # update the outputs if len(line_segments) > 0: recreated_edges[edge_key] = segment_keys @@ -1053,19 +1032,21 @@ def recreate_edges(network: nx.MultiDiGraph, # return statement return connection_node_keys_per_edge, recreated_edges + # ***************************************************************************** # ***************************************************************************** + def connect_nodes_to_edges( - network: nx.MultiDiGraph, - node_keys: list, - edge_keys: list, - store_unsimplified_geometries: bool = False, - use_one_edge_per_direction: bool = False - ) -> tuple: + network: nx.MultiDiGraph, + node_keys: list, + edge_keys: list, + store_unsimplified_geometries: bool = False, + use_one_edge_per_direction: bool = False, +) -> tuple: """ Connects nodes to edges using additional edges in an OSMnx-formatted graph. - + Parameters ---------- network : nx.MultiDiGraph @@ -1079,7 +1060,7 @@ def connect_nodes_to_edges( If True, straight line geometries that are created are also preserved. If False, they are not preserved. The default is False. use_one_edge_per_direction : bool, optional - If True, two edges are used for each new edge created to connect a node. + If True, two edges are used for each new edge created to connect a node. If False, only one (forward) edge will be created. The default is False. Returns @@ -1088,7 +1069,7 @@ def connect_nodes_to_edges( A network graph object where the node and edge pairs are connected. new_edge_keys : list An ordered list containing the keys for the new edges created to connect - each node. + each node. connection_node_keys_per_edge : dict A dictionary keyed by edge and holding the node keys for the points that were provided initially to split the edge. These node keys are for nodes @@ -1099,39 +1080,38 @@ def connect_nodes_to_edges( its key does not appear in the dictionary. """ - + # ************************************************************************* - + # 1) group nodes by the edge they are closest to - # 2) for each edge, and node that is to be connected to it, find its closest + # 2) for each edge, and node that is to be connected to it, find its closest # point on the edge # 3) recreate each edge after dividing it at the specified points # 4) connect the nodes to the edges # 5) delete the original edges, if they have been split # 6) calculate or update the edge attributes - + # ************************************************************************* # ************************************************************************* - + # 1) group nodes by the edge they are closest to - + nodes_to_connect_to_edge = { edge_key: tuple( node_key for other_edge_key, node_key in zip(edge_keys, node_keys) if other_edge_key == edge_key - ) + ) for edge_key in set(edge_keys) - } + } # ************************************************************************* # ************************************************************************* - - # 2) for each edge, and node that is to be connected to it, find its closest + + # 2) for each edge, and node that is to be connected to it, find its closest # point on the edge points_per_edge = {} for edge_key, _node_keys in nodes_to_connect_to_edge.items(): - # check if the geometry exists if osm.KEY_OSMNX_GEOMETRY in network.edges[edge_key]: # the geometry object exists, get it @@ -1139,58 +1119,66 @@ def connect_nodes_to_edges( else: # the geometry object does not exist, make it edge_geo = LineString( - [(network.nodes[edge_key[0]][osm.KEY_OSMNX_X], - network.nodes[edge_key[0]][osm.KEY_OSMNX_Y]), - (network.nodes[edge_key[1]][osm.KEY_OSMNX_X], - network.nodes[edge_key[1]][osm.KEY_OSMNX_Y])] - ) + [ + ( + network.nodes[edge_key[0]][osm.KEY_OSMNX_X], + network.nodes[edge_key[0]][osm.KEY_OSMNX_Y], + ), + ( + network.nodes[edge_key[1]][osm.KEY_OSMNX_X], + network.nodes[edge_key[1]][osm.KEY_OSMNX_Y], + ), + ] + ) # store the geometry if store_unsimplified_geometries: # update the edge - network.add_edge(*edge_key, - **{osm.KEY_OSMNX_GEOMETRY: edge_geo}) + network.add_edge(*edge_key, **{osm.KEY_OSMNX_GEOMETRY: edge_geo}) # use nearest_points to locate the closest points on the edge points_per_edge[edge_key] = [ nearest_points( - edge_geo, - Point(network.nodes[node_key][osm.KEY_OSMNX_X], - network.nodes[node_key][osm.KEY_OSMNX_Y]) - )[0] # [0] to get the point on the edge + edge_geo, + Point( + network.nodes[node_key][osm.KEY_OSMNX_X], + network.nodes[node_key][osm.KEY_OSMNX_Y], + ), + )[ + 0 + ] # [0] to get the point on the edge for node_key in _node_keys - ] + ] # TIP: exclude the points that can be considered close to the start or end nodes # TIP: use the shortest line method to obtain the line geometry - + # ************************************************************************* # ************************************************************************* - + # 3) recreate each edge after dividing it at the specified points - + connection_node_keys_per_edge, recreated_edges = recreate_edges( - network, - points=points_per_edge - ) - + network, points=points_per_edge + ) + # put the keys for the connection nodes - + connection_node_keys = [ connection_node_keys_per_edge[edge_key][ nodes_to_connect_to_edge[edge_key].index(node_key) - ] - for node_key, edge_key in zip(node_keys, edge_keys) ] - + for node_key, edge_key in zip(node_keys, edge_keys) + ] + # delete the original edges, if they have been split - + network.remove_edges_from(recreated_edges) - + # ************************************************************************* # ************************************************************************* - + # 4) connect the nodes to the edges - + connection_edge_containers = [] - + for node_key, connection_node_key in zip(node_keys, connection_node_keys): # skip self-loops if node_key == connection_node_key: @@ -1198,73 +1186,65 @@ def connect_nodes_to_edges( # proceed with other types of edges if use_one_edge_per_direction: # add one directed edge per direction - connection_edge_containers.append( - (node_key, connection_node_key) - ) - connection_edge_containers.append( - (connection_node_key, node_key) - ) + connection_edge_containers.append((node_key, connection_node_key)) + connection_edge_containers.append((connection_node_key, node_key)) else: # add one directed edge starting from the edge and ending in the node - connection_edge_containers.append( - (connection_node_key, node_key) - ) + connection_edge_containers.append((connection_node_key, node_key)) edge_keys = network.add_edges_from(connection_edge_containers) - + # ************************************************************************* # ************************************************************************* - + # 5) calculate or update the edge attributes - + # calculate edge lengths and street counts for the new edges if len(edge_keys) != 0: # there are new edges: calculate the lengths and add them new_edge_keys = [ - (*edge_tuple[0:2], edge_key) # apply it only to specific edges - for edge_tuple, edge_key in zip( - connection_edge_containers, edge_keys) - ] - - if is_projected(network.graph['crs']): + (*edge_tuple[0:2], edge_key) # apply it only to specific edges + for edge_tuple, edge_key in zip(connection_edge_containers, edge_keys) + ] + + if is_projected(network.graph["crs"]): # projected crs: use own method - lengths_dict = edge_lengths( - network, - edge_keys=new_edge_keys - ) - + lengths_dict = edge_lengths(network, edge_keys=new_edge_keys) + network.add_edges_from( tuple( - (*edge_key, - {osm.KEY_OSMNX_LENGTH: lengths_dict[edge_key], - osm.KEY_OSMNX_ONEWAY: False, - osm.KEY_OSMNX_REVERSED: False, - osm.KEY_OSMNX_OSMID: None}) - for edge_key in new_edge_keys + ( + *edge_key, + { + osm.KEY_OSMNX_LENGTH: lengths_dict[edge_key], + osm.KEY_OSMNX_ONEWAY: False, + osm.KEY_OSMNX_REVERSED: False, + osm.KEY_OSMNX_OSMID: None, + }, ) + for edge_key in new_edge_keys ) - + ) + else: # unprojected crs: use the osmnx method network = add_edge_lengths(network, edges=new_edge_keys) - + # update the street count update_street_count(network) else: - new_edge_keys = [] - + # ************************************************************************* # ************************************************************************* - + return network, new_edge_keys, connection_node_keys_per_edge, recreated_edges - + + # ***************************************************************************** # ***************************************************************************** -def remove_reversed_edges( - network: nx.MultiDiGraph, - reversed_attr: bool = True - ) -> list: + +def remove_reversed_edges(network: nx.MultiDiGraph, reversed_attr: bool = True) -> list: """ Removes reversed edges from an OSMnx-formatted multi directed edge graph. @@ -1291,11 +1271,13 @@ def remove_reversed_edges( edge_key for edge_key, reverse_edges in reversed_edges.items() # at least one reversed edge - if len(reverse_edges) >= 1 + if len(reverse_edges) >= 1 # must satisfy the reversed attribute value - if (network.edges[edge_key][osm.KEY_OSMNX_REVERSED] == reversed_attr - or type(network.edges[edge_key][osm.KEY_OSMNX_REVERSED]) == list) - ] + if ( + network.edges[edge_key][osm.KEY_OSMNX_REVERSED] == reversed_attr + or type(network.edges[edge_key][osm.KEY_OSMNX_REVERSED]) == list + ) + ] # filter for edge_key in edges_removed: for other_edge_key in reversed_edges[edge_key]: @@ -1305,17 +1287,19 @@ def remove_reversed_edges( network.remove_edges_from(edges_removed) # return return edges_removed - + + # ***************************************************************************** # ***************************************************************************** + def create_reverse_edges( - network: nx.MultiDiGraph, - edge_keys: list = None, - ) -> list: + network: nx.MultiDiGraph, + edge_keys: list = None, +) -> list: """ Creates reversed edges for all or select edges in a network. - + A reversed edge has the same attributes as the original except that it is declared in the opposite direction and has reversed geometry and related attributes. The edges created are compliant with OSMnx. @@ -1339,16 +1323,16 @@ def create_reverse_edges( A list with the keys for the new edges. """ - + edges_created = [] if type(edge_keys) == type(None): edge_keys = tuple(network.edges(keys=True)) - + for edge_key in edge_keys: # make sure the edge exists if not network.has_edge(*edge_key): - raise ValueError('Unknown edge: '+str(edge_key)) + raise ValueError("Unknown edge: " + str(edge_key)) # get its data edge_dict = network.get_edge_data(*edge_key) # create a dict for the reversed edge @@ -1356,34 +1340,33 @@ def create_reverse_edges( # check for the reversed keyword if type(edge_dict[osm.KEY_OSMNX_REVERSED]) == bool: # boolean: negate it - rev_edge_dict[osm.KEY_OSMNX_REVERSED] = ( - not edge_dict[osm.KEY_OSMNX_REVERSED] - ) - elif (type(edge_dict[osm.KEY_OSMNX_REVERSED]) == list and - len(edge_dict[osm.KEY_OSMNX_REVERSED]) == 2 and - len(set(edge_dict[osm.KEY_OSMNX_REVERSED])) == 2): - # list: + rev_edge_dict[osm.KEY_OSMNX_REVERSED] = not edge_dict[ + osm.KEY_OSMNX_REVERSED + ] + elif ( + type(edge_dict[osm.KEY_OSMNX_REVERSED]) == list + and len(edge_dict[osm.KEY_OSMNX_REVERSED]) == 2 + and len(set(edge_dict[osm.KEY_OSMNX_REVERSED])) == 2 + ): + # list: rev_edge_dict[osm.KEY_OSMNX_REVERSED] = [True, False] else: raise ValueError( - 'The edge '+str(edge_key)+'is not compliant with OSMnx.' - ) + "The edge " + str(edge_key) + "is not compliant with OSMnx." + ) # check for the geometry keyword if osm.KEY_OSMNX_GEOMETRY in edge_dict: # a geometry exists, reverse it for the reverse edge dict - rev_edge_dict[osm.KEY_OSMNX_GEOMETRY] = ( - edge_dict[osm.KEY_OSMNX_GEOMETRY].reverse() - ) + rev_edge_dict[osm.KEY_OSMNX_GEOMETRY] = edge_dict[ + osm.KEY_OSMNX_GEOMETRY + ].reverse() # add the edge - rev_k = network.add_edge( - edge_key[1], - edge_key[0], - **rev_edge_dict - ) - + rev_k = network.add_edge(edge_key[1], edge_key[0], **rev_edge_dict) + edges_created.append((edge_key[1], edge_key[0], rev_k)) # return the keys for the edges created return edges_created - + + # ***************************************************************************** # ***************************************************************************** diff --git a/src/topupopt/data/gis/osm.py b/src/topupopt/data/gis/osm.py index efc83bf..2df1edc 100644 --- a/src/topupopt/data/gis/osm.py +++ b/src/topupopt/data/gis/osm.py @@ -5,14 +5,14 @@ # general -KEY_OSM_CITY = 'addr:city' -KEY_OSM_COUNTRY = 'addr:country' -KEY_OSM_HOUSE_NUMBER = 'addr:housenumber' -KEY_OSM_MUNICIPALITY = 'addr:municipality' -KEY_OSM_PLACE = 'addr:place' -KEY_OSM_POSTCODE = 'addr:postcode' -KEY_OSM_STREET = 'addr:street' -KEY_OSM_SOURCE = 'source' +KEY_OSM_CITY = "addr:city" +KEY_OSM_COUNTRY = "addr:country" +KEY_OSM_HOUSE_NUMBER = "addr:housenumber" +KEY_OSM_MUNICIPALITY = "addr:municipality" +KEY_OSM_PLACE = "addr:place" +KEY_OSM_POSTCODE = "addr:postcode" +KEY_OSM_STREET = "addr:street" +KEY_OSM_SOURCE = "source" KEYS_OSM = [ KEY_OSM_CITY, @@ -22,41 +22,39 @@ KEYS_OSM = [ KEY_OSM_PLACE, KEY_OSM_POSTCODE, KEY_OSM_STREET, - KEY_OSM_SOURCE - ] + KEY_OSM_SOURCE, +] # country specific -KEY_COUNTRY_DK = 'dk' +KEY_COUNTRY_DK = "dk" -KEY_OSM_DK_BUILDING_ENTRANCE_ID = 'osak:identifier' +KEY_OSM_DK_BUILDING_ENTRANCE_ID = "osak:identifier" + +KEY_OSM_BUILDING_ENTRANCE_ID = {KEY_COUNTRY_DK: KEY_OSM_DK_BUILDING_ENTRANCE_ID} -KEY_OSM_BUILDING_ENTRANCE_ID = { - KEY_COUNTRY_DK: KEY_OSM_DK_BUILDING_ENTRANCE_ID - } - # ***************************************************************************** # osmnx -KEY_OSMNX_OSMID = 'osmid' -KEY_OSMNX_ELEMENT_TYPE = 'element_type' +KEY_OSMNX_OSMID = "osmid" +KEY_OSMNX_ELEMENT_TYPE = "element_type" -KEY_OSMNX_NAME = 'name' -KEY_OSMNX_GEOMETRY = 'geometry' -KEY_OSMNX_REVERSED = 'reversed' -KEY_OSMNX_LENGTH = 'length' -KEY_OSMNX_ONEWAY = 'oneway' -KEY_OSMNX_X = 'x' -KEY_OSMNX_Y = 'y' -KEY_OSMNX_LON = 'lon' -KEY_OSMNX_LAT = 'lat' -KEY_OSMNX_STREET_COUNT = 'street_count' +KEY_OSMNX_NAME = "name" +KEY_OSMNX_GEOMETRY = "geometry" +KEY_OSMNX_REVERSED = "reversed" +KEY_OSMNX_LENGTH = "length" +KEY_OSMNX_ONEWAY = "oneway" +KEY_OSMNX_X = "x" +KEY_OSMNX_Y = "y" +KEY_OSMNX_LON = "lon" +KEY_OSMNX_LAT = "lat" +KEY_OSMNX_STREET_COUNT = "street_count" KEYS_OSMNX = [ - KEY_OSMNX_OSMID, # one half of multi-index for geodataframes from osmnx - KEY_OSMNX_ELEMENT_TYPE, # the other half of the multi-index from osmnx + KEY_OSMNX_OSMID, # one half of multi-index for geodataframes from osmnx + KEY_OSMNX_ELEMENT_TYPE, # the other half of the multi-index from osmnx KEY_OSMNX_NAME, KEY_OSMNX_GEOMETRY, KEY_OSMNX_REVERSED, @@ -66,8 +64,8 @@ KEYS_OSMNX = [ KEY_OSMNX_Y, KEY_OSMNX_LON, KEY_OSMNX_LAT, - KEY_OSMNX_STREET_COUNT - ] + KEY_OSMNX_STREET_COUNT, +] KEYS_OSMNX_NODES = { KEY_OSMNX_OSMID, @@ -77,28 +75,24 @@ KEYS_OSMNX_NODES = { KEY_OSMNX_Y, KEY_OSMNX_LON, KEY_OSMNX_LAT, - KEY_OSMNX_STREET_COUNT - } + KEY_OSMNX_STREET_COUNT, +} -KEYS_OSMNX_NODES_ESSENTIAL = { - KEY_OSMNX_OSMID, - KEY_OSMNX_NAME, - KEY_OSMNX_STREET_COUNT - } +KEYS_OSMNX_NODES_ESSENTIAL = {KEY_OSMNX_OSMID, KEY_OSMNX_NAME, KEY_OSMNX_STREET_COUNT} KEYS_OSMNX_EDGES = { KEY_OSMNX_OSMID, KEY_OSMNX_LENGTH, KEY_OSMNX_ONEWAY, KEY_OSMNX_GEOMETRY, - KEY_OSMNX_REVERSED - } + KEY_OSMNX_REVERSED, +} KEYS_OSMNX_EDGES_ESSENTIAL = { KEY_OSMNX_OSMID, KEY_OSMNX_LENGTH, KEY_OSMNX_ONEWAY, - KEY_OSMNX_REVERSED - } + KEY_OSMNX_REVERSED, +} -# ***************************************************************************** \ No newline at end of file +# ***************************************************************************** diff --git a/src/topupopt/data/gis/utils.py b/src/topupopt/data/gis/utils.py index 6e7b42f..dbc5c1b 100644 --- a/src/topupopt/data/gis/utils.py +++ b/src/topupopt/data/gis/utils.py @@ -1,4 +1,3 @@ - # imports from ast import literal_eval @@ -27,109 +26,108 @@ from ..gis import calculate as gis_calc # constants -KEY_GPD_CRS = 'crs' -KEY_GPD_GEOMETRY = 'geometry' +KEY_GPD_CRS = "crs" +KEY_GPD_GEOMETRY = "geometry" -RKW_GPKG = 'packed' +RKW_GPKG = "packed" # ***************************************************************************** # ***************************************************************************** # TODO: complete method + def find_gpkg_packable_columns(gdf: GeoDataFrame) -> set: - # columns incompatible with GPKG format: # 1) columns with equivalent lowercase names # 2) columns of Nones (fiona 1.9.3; appears to work with fiona 1.8.x) # 3) columns with lists, dicts (keys become strings), tuples and sets # 4) columns with other types except 'geometry' types in the geometry col. # 5) columns with multiple types - + # packable columns: 1), 2) and 3) - + # ************************************************************************* - + # 1) columns with equivalent lowercase names - - lowercase_columns = tuple( - column.lower() - for column in gdf.columns - ) - + + lowercase_columns = tuple(column.lower() for column in gdf.columns) + set_columns = set( column for column, lccolumn in zip(gdf.columns, lowercase_columns) if lowercase_columns.count(lccolumn) >= 2 - ) - + ) + # ************************************************************************* - + # for each column - + for column in gdf.columns: - # if the column has already been identified, or if it is the geometry # one (identified via KEY_GPD_GEOMETRY), skip the current column - + if column == KEY_GPD_GEOMETRY or column in set_columns: - continue - + # 2) columns of Nones (fiona 1.9.3; appears to work with fiona 1.8.x) # 3) columns with lists, dicts (keys become strings), tuples and sets - + # identify the type of objects in each row - - set_types = set( - type(gdf.loc[(index,column)]) - for index in gdf.index - ) - + + set_types = set(type(gdf.loc[(index, column)]) for index in gdf.index) + # allowed types: int, float, numpy floats - - if (len(set_types) == 1 and - (str in set_types or float in set_types or int in set_types or - bool in set_types or float64 in set_types or int64 in set_types)): - - # if (len(set_types) == 1 and - # (str in set_types or float in set_types or int in set_types or - # bool in set_types or float64 in set_types or int64 in set_types or - # type(None) in set_types) - # ): - + + if len(set_types) == 1 and ( + str in set_types + or float in set_types + or int in set_types + or bool in set_types + or float64 in set_types + or int64 in set_types + ): + # if (len(set_types) == 1 and + # (str in set_types or float in set_types or int in set_types or + # bool in set_types or float64 in set_types or int64 in set_types or + # type(None) in set_types) + # ): + # these are allowed - + continue - + else: - # two or more different types are not allowed - + set_columns.add(column) - + # ************************************************************************* - + return set_columns + # ***************************************************************************** # ***************************************************************************** -def write_gdf_file(gdf: GeoDataFrame, - filename: str, - columns_to_pack: tuple = None, - preserve_original: bool = True, - **kwargs): + +def write_gdf_file( + gdf: GeoDataFrame, + filename: str, + columns_to_pack: tuple = None, + preserve_original: bool = True, + **kwargs +): """ Writes the contents of a GeoDataFrame object into a GIS-compatible file. - - The method differs from the GeoDataFrame.to_file() method by allowing + + The method differs from the GeoDataFrame.to_file() method by allowing objects with columns whose elements are containers to be written to a file. - For this, it relies on the repr() method. For correctly recognising these + For this, it relies on the repr() method. For correctly recognising these elements while reading the file, the literal_eval() method should be used. - Note that the literal_eval() is not completely safe (e.g., is vulnerable to + Note that the literal_eval() is not completely safe (e.g., is vulnerable to denial of service attacks) but does not allow for arbitrary code execution. - + Other format rules: - Missing values in object columns should be specified as None types @@ -159,84 +157,79 @@ def write_gdf_file(gdf: GeoDataFrame, ------- None. - """ + """ if preserve_original: - # copy the original (slower) - + new_gdf = gdf.copy() - + else: - # just point to the original (faster) - + new_gdf = gdf - + if type(columns_to_pack) != tuple: - # no columns identified, find the columns with containers # TODO: reach this statement columns_to_pack = tuple(find_gpkg_packable_columns(gdf)) - + else: - # focus on specific columns - + for column in columns_to_pack: - if column not in new_gdf.columns: # TODO: reach this statement - raise ValueError('Unknown column: '+str(column)) - + raise ValueError("Unknown column: " + str(column)) + # handle NaN and other values - - #new_gdf[column].fillna("NaN", inplace=True) - + + # new_gdf[column].fillna("NaN", inplace=True) + # containers have to be transformed into strings - + new_gdf[column] = new_gdf[column].apply(lambda x: repr(x)) - + # format specific limitations - + # GPKG: columns with the same lower case equivalent are not allowed - - if '.gpkg' in filename: # solution: use reserved words and numbers - - # identify incompatible columns - - lowercase_columns = tuple( - column.lower() - for column in gdf.columns - ) - + + if ".gpkg" in filename: # solution: use reserved words and numbers + # identify incompatible columns + + lowercase_columns = tuple(column.lower() for column in gdf.columns) + # place all their contents into one new column - + pack_columns( - gdf=new_gdf, + gdf=new_gdf, columns=list( column for column, lccolumn in zip(gdf.columns, lowercase_columns) if lowercase_columns.count(lccolumn) >= 2 - ) - ) - + ), + ) + # the GeoDataFrame object is ready: write it - + new_gdf.to_file(filename, **kwargs) + # ***************************************************************************** # ***************************************************************************** -def pack_columns(gdf: GeoDataFrame, - columns: list, - packed_column_name: str = RKW_GPKG, - convert_to_string: bool = True): + +def pack_columns( + gdf: GeoDataFrame, + columns: list, + packed_column_name: str = RKW_GPKG, + convert_to_string: bool = True, +): """ Places the contents of multiple GeoDataFrame columns into a single one. - + This method is intended to prepare a GeoDataFrame object for I/O, since so- me file formats (e.g., GPKG) place restrictions on column names. By placing - the contents of various columns into a single one, these can be correctly + the contents of various columns into a single one, these can be correctly unpacked later, provided some conditions are met concerning the contents. Parameters @@ -261,54 +254,52 @@ def pack_columns(gdf: GeoDataFrame, """ # if only one or no columns are specified, change nothing - + if len(columns) <= 1: - return - + # if the new column name is pre-existing, raise error - + if packed_column_name in gdf.columns: # TODO: reach this statement - raise ValueError('The desired column name already exists.') - + raise ValueError("The desired column name already exists.") + # create a new data dict - + data_dict = { index: { - column: gdf.loc[(index,column)] # gdf[repeated_column].loc[index] + column: gdf.loc[(index, column)] # gdf[repeated_column].loc[index] # column: repr(gdf.loc[(index,column)]) for column in columns - } - for index in gdf.index } - + for index in gdf.index + } + # add a new column - + gdf[packed_column_name] = Series(data=data_dict, index=gdf.index) - + # convert it to a string, if needed - + if convert_to_string: - - gdf[packed_column_name] = gdf[packed_column_name].apply( - lambda x: repr(x) - ) - + gdf[packed_column_name] = gdf[packed_column_name].apply(lambda x: repr(x)) + # drop original columns - + gdf.drop(labels=columns, axis=1, inplace=True) + # ***************************************************************************** # ***************************************************************************** + def unpack_columns(gdf: GeoDataFrame, packed_column_name: str = RKW_GPKG): """ Unpacks a specific GeoDataFrame column into multiple columns. - + This method is intended to allow reading GeoDataFrame data from files, sin- ce the conventional formats (e.g., GPKG) introduce some restrictions. - + Parameters ---------- gdf : GeoDataFrame @@ -328,45 +319,45 @@ def unpack_columns(gdf: GeoDataFrame, packed_column_name: str = RKW_GPKG): """ if packed_column_name not in gdf.columns: # TODO: reach this statement - raise ValueError('The column specified does not exist.') - + raise ValueError("The column specified does not exist.") + # if there are no rows, there is nothing to unpack - + if len(gdf) != 0: - # the object is not empty - + # create a dict with one dict per merged column # each dict corresponds to one packed column, to be keyed with index - + column_content_dict = { merged_column: { - index: gdf.loc[(index,packed_column_name)][merged_column] - #index: gdf[packed_column_name].loc[index][merged_column] + index: gdf.loc[(index, packed_column_name)][merged_column] + # index: gdf[packed_column_name].loc[index][merged_column] for index in gdf.index - } + } for merged_column in gdf[packed_column_name].iloc[0] - } - - # create the columns - + } + + # create the columns + for name, content in column_content_dict.items(): - gdf[name] = Series(data=content, index=gdf.index) - + # delete the packed column - + gdf.drop(labels=packed_column_name, axis=1, inplace=True) + # ***************************************************************************** # ***************************************************************************** -def read_gdf_file(filename: str, - packed_columns: tuple = None, - index: str or list = None) -> GeoDataFrame: + +def read_gdf_file( + filename: str, packed_columns: tuple = None, index: str or list = None +) -> GeoDataFrame: """ Loads the contents of a file with GIS data into a GeoDataFrame object. - + The method differs from the GeoDataFrame.read_file() method by recognising elements with container data. For this, it relies on the literal_eval() me- tho, which is not completely safe (e.g., is vulnerable to denial of service @@ -396,257 +387,236 @@ def read_gdf_file(filename: str, GeoDataFrame The GeoDataFrame object with the data loaded from the file. - """ + """ gdf = read_file(filename) - + # unpack special columns - - if '.gpkg' in filename and RKW_GPKG in gdf.columns: - + + if ".gpkg" in filename and RKW_GPKG in gdf.columns: # packed column appears to exist: decode column contents - - gdf[RKW_GPKG] = gdf[RKW_GPKG].apply( - lambda x: literal_eval(x) - ) - + + gdf[RKW_GPKG] = gdf[RKW_GPKG].apply(lambda x: literal_eval(x)) + # unpack it - + unpack_columns(gdf=gdf, packed_column_name=RKW_GPKG) - + # handle types - + if type(index) != type(None): - # a specific index is required, replace existing one - + gdf.set_index(index, drop=True, inplace=True) - + if type(packed_columns) != tuple: - # figure out which ones need it... # TODO: reach this statement raise NotImplementedError - - #packed_columns = tuple(find_gpkg_packable_columns(gdf)) - + + # packed_columns = tuple(find_gpkg_packable_columns(gdf)) + # focus on specific columns - + for column in packed_columns: - if column not in gdf.columns: # TODO: reach this statement - raise ValueError('Unknown column: '+str(column)) - - gdf[column] = gdf[column].apply( - lambda x: literal_eval(x) - ) - + raise ValueError("Unknown column: " + str(column)) + + gdf[column] = gdf[column].apply(lambda x: literal_eval(x)) + return gdf + # ***************************************************************************** # ***************************************************************************** # create osmnx-like geodataframes for nodes -def create_node_geodataframe(longitudes: tuple or list, - latitudes: tuple or list, - osmids: tuple or list = None, - crs: str = "EPSG:4326", - **kwargs) -> GeoDataFrame: - + +def create_node_geodataframe( + longitudes: tuple or list, + latitudes: tuple or list, + osmids: tuple or list = None, + crs: str = "EPSG:4326", + **kwargs +) -> GeoDataFrame: if len(longitudes) != len(latitudes): - - raise ValueError('The input parameters have mismatched sizes.') - + raise ValueError("The input parameters have mismatched sizes.") + if type(osmids) != type(None): - # check sizes - + if len(longitudes) != len(osmids): - - raise ValueError('The input parameters have mismatched sizes.') - + raise ValueError("The input parameters have mismatched sizes.") + else: - - # generate node keys - + # generate node keys + osmids = (str(uuid4()) for i in range(len(longitudes))) - + data_dict = { osm.KEY_OSMNX_GEOMETRY: [ Point(longitude, latitude) for longitude, latitude in zip(longitudes, latitudes) - ], - } - + ], + } + for kwarg in kwargs: - data_dict[kwarg] = kwargs[kwarg] - + return GeoDataFrame( data_dict, index=MultiIndex.from_tuples( - [('node', osmid) for osmid in osmids], - names=[osm.KEY_OSMNX_ELEMENT_TYPE, - osm.KEY_OSMNX_OSMID] - ), - crs=crs - ) + [("node", osmid) for osmid in osmids], + names=[osm.KEY_OSMNX_ELEMENT_TYPE, osm.KEY_OSMNX_OSMID], + ), + crs=crs, + ) + # ***************************************************************************** # ***************************************************************************** + def prepare_node_data_from_geodataframe( - gdf: GeoDataFrame, - node_key_column: str = None, - include_columns: list = None, - include_geometry: bool = False) -> tuple: + gdf: GeoDataFrame, + node_key_column: str = None, + include_columns: list = None, + include_geometry: bool = False, +) -> tuple: """Prepare a container with node data from a GeoDataFrame object.""" - + node_keys = [] node_data_container = [] - + node_key_to_gdf_index_dict = {} - + # check if the GeoDataFrame has the right type of index - + if gdf.index.names != [osm.KEY_OSMNX_ELEMENT_TYPE, osm.KEY_OSMNX_OSMID]: - - raise ValueError( - 'The GeoDataFrame object does not have the right index.') + raise ValueError("The GeoDataFrame object does not have the right index.") # for entry in the gdf object for gdf_entry in range(len(gdf)): - # select key - + if type(node_key_column) == str: - # the node_key_column has been specified: use a specific column as key - + node_key = gdf.iloc[gdf_entry][node_key_column] - - else: # default case: the key is the OSM identifier (should be unique) - + + else: # default case: the key is the OSM identifier (should be unique) # use the OSMID as the node key - + node_key = gdf.index[gdf_entry][1] - + # select node data - + geo = gdf.iloc[gdf_entry][KEY_GPD_GEOMETRY] - - node_dict = { - osm.KEY_OSMNX_X: geo.x, - osm.KEY_OSMNX_Y: geo.y - } - - # add geometry - + + node_dict = {osm.KEY_OSMNX_X: geo.x, osm.KEY_OSMNX_Y: geo.y} + + # add geometry + if include_geometry: - node_dict[osm.KEY_OSMNX_GEOMETRY] = geo - + # add extra columns - + if type(include_columns) == list: - for other_column in include_columns: - node_dict[other_column] = gdf.iloc[gdf_entry][other_column] - + # create new entry in container - - node_data_container.append( - (node_key, - node_dict) - ) - + + node_data_container.append((node_key, node_dict)) + # store node key - + node_keys.append(node_key) - + # update the dict - + node_key_to_gdf_index_dict[node_key] = gdf.index[gdf_entry] - + # ************************************************************************* - + return node_keys, node_data_container, node_key_to_gdf_index_dict + # ***************************************************************************** # ***************************************************************************** # TODO: simplify the passing of options to the methods relied upon -def plot_discrete_attributes(gdf_buildings: GeoDataFrame, - column: str, - category_to_label: dict, - zoom_level: int = 15, - figsize: tuple = (25,25), - legend_title: str = None, - markersize: int = 50, - edgecolor: str = 'k', - linewidth: float = 0.5, - markeredgewidth: float = 0.5, - markeredgecolor: str = 'k', - include_basemap: bool = False): + +def plot_discrete_attributes( + gdf_buildings: GeoDataFrame, + column: str, + category_to_label: dict, + zoom_level: int = 15, + figsize: tuple = (25, 25), + legend_title: str = None, + markersize: int = 50, + edgecolor: str = "k", + linewidth: float = 0.5, + markeredgewidth: float = 0.5, + markeredgecolor: str = "k", + include_basemap: bool = False, +): """Plots a map with discrete attributes found in GeoDataFrame column.""" - + gdf_map = gdf_buildings.to_crs(epsg=3857) - - ax = gdf_map.plot(figsize=figsize, - legend=True, - categorical=True, - column=column, - markersize=markersize, - edgecolor=edgecolor, - linewidth=linewidth - ) - + + ax = gdf_map.plot( + figsize=figsize, + legend=True, + categorical=True, + column=column, + markersize=markersize, + edgecolor=edgecolor, + linewidth=linewidth, + ) + # adjust legend labels - + legend_handles = ax.legend_.legend_handles - - for legend_handle in legend_handles: + + for legend_handle in legend_handles: legend_handle.set_markeredgewidth(markeredgewidth) legend_handle.set_markeredgecolor(markeredgecolor) - + # convert keys to string (since that is what the method asks for) - - _category_to_label = { - str(key):value for key, value in category_to_label.items() - } - - legend_texts = [ - _category_to_label[text.get_text()] for text in ax.legend_.texts - ] - - ax.legend( - legend_handles, - legend_texts, - title=legend_title - ) - + + _category_to_label = {str(key): value for key, value in category_to_label.items()} + + legend_texts = [_category_to_label[text.get_text()] for text in ax.legend_.texts] + + ax.legend(legend_handles, legend_texts, title=legend_title) + # add base map if include_basemap: - cx.add_basemap(ax, - #crs="EPSG:4326", # switch to another crs - zoom=zoom_level, - source=cx.providers.OpenStreetMap.Mapnik) + cx.add_basemap( + ax, + # crs="EPSG:4326", # switch to another crs + zoom=zoom_level, + source=cx.providers.OpenStreetMap.Mapnik, + ) + # ***************************************************************************** # ***************************************************************************** -def count_ocurrences(gdf: GeoDataFrame, - column: str, - column_entries: list = None) -> dict: + +def count_ocurrences( + gdf: GeoDataFrame, column: str, column_entries: list = None +) -> dict: """ Counts the number of occurrences per entry in a DataFrame object's column. - - If a list is provided, only the entries that match those in the list are + + If a list is provided, only the entries that match those in the list are counted. If no list is provided, all unique entries are counted. Parameters @@ -665,70 +635,64 @@ def count_ocurrences(gdf: GeoDataFrame, A dictionary with the counts whose keys are the values counted. """ - + if type(column_entries) == list: - # find entries also present in the dict - + # initialise dict - + count_dict = {} - + # for each key in the dict - + for key in column_entries: - # store the number of rows - - count_dict[key] = gdf[gdf[column]==key].shape[0] - + + count_dict[key] = gdf[gdf[column] == key].shape[0] + # count the number of rows with this key - + if type(key) == type(None): - - count_dict[key] = gdf[gdf[column].isnull()].shape[0] - + count_dict[key] = gdf[gdf[column].isnull()].shape[0] + else: - - count_dict[key] = gdf[gdf[column]==key].shape[0] - + count_dict[key] = gdf[gdf[column] == key].shape[0] + else: - # find all unique entries - + # initialise dict - + count_dict = {} - + for entry in gdf[column]: - # check if it is already in the dict - + if entry in count_dict: - # it is, skip - + continue - + # it is not, count and store the number of rows with said entry - + if type(entry) == type(None): - - count_dict[entry] = gdf[gdf[column].isnull()].shape[0] - + count_dict[entry] = gdf[gdf[column].isnull()].shape[0] + else: - - count_dict[entry] = gdf[gdf[column]==entry].shape[0] - + count_dict[entry] = gdf[gdf[column] == entry].shape[0] + # return statement - + return count_dict + # ***************************************************************************** # ***************************************************************************** -def get_directed(network: MultiGraph, - drop_unsimplified_geometries: bool = True) -> MultiDiGraph: + +def get_directed( + network: MultiGraph, drop_unsimplified_geometries: bool = True +) -> MultiDiGraph: """ Converts an OSMnx-formatted MultiGraph object into a MultiDiGraph one. @@ -746,41 +710,42 @@ def get_directed(network: MultiGraph, An object describing the transformed graph. """ - + directed_network = MultiDiGraph() - + directed_network.add_nodes_from(network.nodes(data=True)) - + for edge_key in network.edges(keys=True): - edge_data = dict(network.edges[edge_key]) - u = edge_data['from'] - v = edge_data['to'] - edge_data.pop('from') - edge_data.pop('to') - - if (drop_unsimplified_geometries and - osm.KEY_OSMNX_GEOMETRY in edge_data and - len(edge_data[osm.KEY_OSMNX_GEOMETRY].coords) == 2): - + u = edge_data["from"] + v = edge_data["to"] + edge_data.pop("from") + edge_data.pop("to") + + if ( + drop_unsimplified_geometries + and osm.KEY_OSMNX_GEOMETRY in edge_data + and len(edge_data[osm.KEY_OSMNX_GEOMETRY].coords) == 2 + ): edge_data.pop(osm.KEY_OSMNX_GEOMETRY) - - directed_network.add_edge( - u_for_edge=u, - v_for_edge=v, - **edge_data) - + + directed_network.add_edge(u_for_edge=u, v_for_edge=v, **edge_data) + return directed_network + # ***************************************************************************** # ***************************************************************************** -def simplify_network(network: MultiDiGraph, - protected_nodes: list, - dead_end_probing_depth: int = 5, - remove_opposite_parallel_edges: bool = False, - update_street_count_per_node: bool = True, - **roundabout_conditions): + +def simplify_network( + network: MultiDiGraph, + protected_nodes: list, + dead_end_probing_depth: int = 5, + remove_opposite_parallel_edges: bool = False, + update_street_count_per_node: bool = True, + **roundabout_conditions +): """ Simplifies a network described in a OSMnx-formatted MultiDiGraph object. @@ -793,7 +758,7 @@ def simplify_network(network: MultiDiGraph, dead_end_probing_depth: int The maximum number of nodes a dead end can have to be detectable. remove_opposite_parallel_edges : bool, optional - If True, longer parallel edges in opposite directions are also removed. + If True, longer parallel edges in opposite directions are also removed. The default is False. update_street_count_per_node : bool, optional If True, updates the street count on each node. The default is True. @@ -805,59 +770,51 @@ def simplify_network(network: MultiDiGraph, None. """ - + # 1) remove dead ends (tends to create straight paths) gis_mod.remove_dead_ends( - network, - protected_nodes, - max_iterations=dead_end_probing_depth - ) + network, protected_nodes, max_iterations=dead_end_probing_depth + ) # 2) remove longer parallel edges (tends to create straight paths) gis_mod.remove_longer_parallel_edges( - network, - ignore_edge_directions=remove_opposite_parallel_edges - ) + network, ignore_edge_directions=remove_opposite_parallel_edges + ) # 3) remove self loops (tends to create straight paths and dead ends) gis_mod.remove_self_loops(network) - # 4) join segments (can create self-loops) - simplifiable_paths = gis_iden.find_simplifiable_paths( - network, - protected_nodes - ) + # 4) join segments (can create self-loops) + simplifiable_paths = gis_iden.find_simplifiable_paths(network, protected_nodes) for path in simplifiable_paths: gis_mod.replace_path(network, path) # 4) remove self loops (tends to create straight paths and dead ends) gis_mod.remove_self_loops(network) # 5) transform roundabouts into crossroads (can create straight paths) - list_roundabout_nodes = gis_iden.find_roundabouts( - network, - **roundabout_conditions) - gis_mod.transform_roundabouts_into_crossroads( - network, - list_roundabout_nodes - ) + list_roundabout_nodes = gis_iden.find_roundabouts(network, **roundabout_conditions) + gis_mod.transform_roundabouts_into_crossroads(network, list_roundabout_nodes) # 6) update street count if update_street_count_per_node: gis_calc.update_street_count(network) + # ***************************************************************************** # ***************************************************************************** + def identify_building_entrance_edges( - gdf: GeoDataFrame, - gdf_street_column: str, - network: gis_iden.nx.MultiDiGraph, - node_key_to_gdf_index_dict: dict, - crs: str = None, - revert_to_original_crs: bool = False) -> tuple: + gdf: GeoDataFrame, + gdf_street_column: str, + network: gis_iden.nx.MultiDiGraph, + node_key_to_gdf_index_dict: dict, + crs: str = None, + revert_to_original_crs: bool = False, +) -> tuple: """ Identifies the edges that can be linked to special nodes in an OSMnx graph through a OSMnx-formatted GeoDataFrame object. - + The links between nodes and edges are determined by: - the edge being the closest one to the node; - the node and edge being associated through a string in the GeoDataFrame. - + When a node\'s closest edge cannot be linked to it by a string, the node\'s string is used to search for suitable alternatives, among which the closest is selected. If none are available, the closest edge is selected. @@ -886,301 +843,277 @@ def identify_building_entrance_edges( ------- dict A dictionary keyed by node and holding the selected edge key. - dict - A dictionary keyed by node and holding the key to its closest edge. + dict + A dictionary keyed by node and holding the key to its closest edge. nx.MultiDiGraph The object for the network used in the method. """ - + # Notes: # - Each building is expected to have a street name associated with it; # - If a building does not have a street name associated with it, then the # edge corresponding to the street must be determined using distances. - + # 1) for each node (building entrance), identify the closest edge # 2) identify which edges identified before cannot be linked back to their # respective nodes via street names or via (only) one intermediate edge # 3) for the nodes whose closest edges that cannot be linked back to the no- - # des, find the edges that can, if any, (via their street names) and select + # des, find the edges that can, if any, (via their street names) and select # the closest one among them as a substitute for the closest one in general # 4) for all other cases, use the closest edge among all - + # output: a list of edge keys (one per building entrance) # exceptions: if a building cannot be linked to an edge key, link it to None - + # ************************************************************************* - + if revert_to_original_crs: - - original_crs = network.graph['crs'] - + original_crs = network.graph["crs"] + # ************************************************************************* - + # 1) for each building (entrance), identify the closest edge - - node_keys = list(node_key_to_gdf_index_dict.keys()) + + node_keys = list(node_key_to_gdf_index_dict.keys()) closest_edge_keys, network = gis_iden.identify_edge_closest_to_node( - network=network, - node_keys=node_keys, - crs=crs) # do not revert back to the original yet - + network=network, node_keys=node_keys, crs=crs + ) # do not revert back to the original yet + # create a dict for the closest edge keys: {node keys: closest edge keys} - + building_entrance_edges = dict(zip(node_keys, closest_edge_keys)) - + _closest_edge_keys_dict = dict(building_entrance_edges) - + # ************************************************************************* - + # 2) identify the nodes that require additional precautions (i.e., those # that should not be connected to their closest edges) - + # the nodes not requiring additional precautions are the following: # i) those that do not concern buildings (no address); # ii) those whose closest edge has the same street name as the node; # iii) those whose closest edge is a nameless intermediate edge that connects # with another edge which has the same street name as the node (driveway). - + # the nodes that require special precautions are: # iv) those whose closest edges have names that do not match the node's; # v) those whose closest edges do not have street names and do not lead to # an edge whose street name matches that of the building address. - + # in both cases, the solution is to find edges whose street names match # those of the node and connect the one that is closest among them. If not # possible (no edges), then the solution is to connect to the closest edge. - + # 2.1) generate a dict with the correspondence between streets and nodes - + node_street_names = { - node_key: gdf.loc[ - node_key_to_gdf_index_dict[node_key]][gdf_street_column] + node_key: gdf.loc[node_key_to_gdf_index_dict[node_key]][gdf_street_column] for node_key in node_keys - } - + } + trouble_nodes = [] - + for node_key, closest_edge_key in zip(node_keys, closest_edge_keys): - # check if the street name is a string - + if type(node_street_names[node_key]) != str: - # not a string, this node is not problematic (case i) - + continue - + # check if the edge has a name attribute - + if osm.KEY_OSMNX_NAME in network.edges[closest_edge_key]: - # edge object has name attribute, check if the street names match - + if type(network.edges[closest_edge_key][osm.KEY_OSMNX_NAME]) == str: - - # the address is a string - - if (network.edges[closest_edge_key][osm.KEY_OSMNX_NAME] in - node_street_names[node_key]): - + # the address is a string + + if ( + network.edges[closest_edge_key][osm.KEY_OSMNX_NAME] + in node_street_names[node_key] + ): # the street names match, this is not a problematic node (ii) - + continue - - else: - + + else: # the streets names differ, this is a problematic node (iv) - + trouble_nodes.append(node_key) - + continue - - else: # the address is not a string: it should be a list (osmnx) - + + else: # the address is not a string: it should be a list (osmnx) # if the node street is found among the elements - + matching_street_name_found_list = tuple( _name in node_street_names[node_key] - for _name in network.edges[closest_edge_key][ - osm.KEY_OSMNX_NAME] - ) - + for _name in network.edges[closest_edge_key][osm.KEY_OSMNX_NAME] + ) + if True in matching_street_name_found_list: - # the street names match, this is not a problematic node (ii) - + continue - - else: - + + else: # the streets names differ, this is a problematic node (iv) - + trouble_nodes.append(node_key) - + continue - + # otherwise, the edge is nameless but may not lead to the right street - + # get adjacent/neighbouring edges other_edges = gis_iden.get_edges_involving_node( - network=network, - node_key=closest_edge_key[0], - include_self_loops=False - ) + network=network, node_key=closest_edge_key[0], include_self_loops=False + ) other_edges.extend( gis_iden.get_edges_involving_node( - network=network, - node_key=closest_edge_key[1], - include_self_loops=False - ) + network=network, node_key=closest_edge_key[1], include_self_loops=False ) - + ) + matching_street_name_found = False - + # for each neighbour - + for other_edge_key in other_edges: - # check if the current edge is the closest one - + if closest_edge_key == other_edge_key: - # it is: skip, since it has already been considered - - continue - + + continue + # check if the current edge has the address/name attribute - + if osm.KEY_OSMNX_NAME in network.edges[other_edge_key]: - # it does, now check if it is a string - - if type(network.edges[other_edge_key][ - osm.KEY_OSMNX_NAME]) == str: - + + if type(network.edges[other_edge_key][osm.KEY_OSMNX_NAME]) == str: # it is, now check if the street names match - - if (network.edges[other_edge_key][osm.KEY_OSMNX_NAME] in - node_street_names[node_key]): - + + if ( + network.edges[other_edge_key][osm.KEY_OSMNX_NAME] + in node_street_names[node_key] + ): # an edge with a matching street name was found (iii) - + matching_street_name_found = True - + break - + else: - # if the node street is found among the elements - + matching_street_name_found_list = tuple( _name in node_street_names[node_key] - for _name in network.edges[other_edge_key][ - osm.KEY_OSMNX_NAME] - ) - + for _name in network.edges[other_edge_key][osm.KEY_OSMNX_NAME] + ) + if True in matching_street_name_found_list: - # the street names match, this node is okay (case iii) - + matching_street_name_found = True - + break - + # check if a matching street name was found among the neighbours - + if matching_street_name_found: - # one was, this is not a problematic case (case iii) - + continue - + # all other cases are problematic: case v - - trouble_nodes.append(node_key) - + + trouble_nodes.append(node_key) + # ************************************************************************* - + # 3) for the nodes whose closest edges that cannot be linked back to the no- - # des, find the edges that can, if any, (via their street names) and select + # des, find the edges that can, if any, (via their street names) and select # the closest one among them as a substitute for the closest one in general - + # 3.1) generate the list of edge keys per street - - unique_street_names = set( - node_street_names[node_key] for node_key in trouble_nodes - ) - + + unique_street_names = set(node_street_names[node_key] for node_key in trouble_nodes) + # edge keys with a given street name - + edges_per_street_name = { street_name: [ - edge_key for edge_key in network.edges(keys=True) + edge_key + for edge_key in network.edges(keys=True) if osm.KEY_OSMNX_NAME in network.edges[edge_key] if street_name in network.edges[edge_key][osm.KEY_OSMNX_NAME] - ] + ] for street_name in unique_street_names - } - + } + # 3.2) for each troublesome node, identify the edges that mention the same # street and pick the closest on - + for node_key in trouble_nodes: - # check the edges keys relevant for this node - + other_edge_keys = edges_per_street_name[node_street_names[node_key]] - + # check if there are no edges mentioning the street - + if len(other_edge_keys) == 0: - # no edges mentioning that street, skip - + continue - + # create a view - + new_network = network.edge_subgraph(edges=other_edge_keys) - + # pick the one that is closest - + other_closest_edge = gis_iden.nearest_edges( - new_network, - X=network.nodes[node_key][osm.KEY_OSMNX_X], + new_network, + X=network.nodes[node_key][osm.KEY_OSMNX_X], Y=network.nodes[node_key][osm.KEY_OSMNX_Y], - return_dist=False) - + return_dist=False, + ) + # replace previous entry - + building_entrance_edges[node_key] = other_closest_edge # ************************************************************************* - + # 4) for all other cases, use the closest edge among all # ************************************************************************* - + # revert network crs back to the original, if necessary - + if revert_to_original_crs: - network = gis_iden.project_graph(network, to_crs=original_crs) - + # return edge keys - + return building_entrance_edges, _closest_edge_keys_dict, network # ************************************************************************* # ************************************************************************* + # ***************************************************************************** # ***************************************************************************** -def convert_edge_path(network: MultiDiGraph, - path: list, - allow_reversed_edges: bool = False) -> list: + +def convert_edge_path( + network: MultiDiGraph, path: list, allow_reversed_edges: bool = False +) -> list: """ Converts a path of edge keys into a path of node keys. @@ -1191,7 +1124,7 @@ def convert_edge_path(network: MultiDiGraph, path : list A list of sequential edge keys that form a path. allow_reversed_edges : bool, optional - If True, edges in the opposite direction also count to form paths, as + If True, edges in the opposite direction also count to form paths, as long as the same nodes are involved. The default is False. Returns @@ -1200,15 +1133,13 @@ def convert_edge_path(network: MultiDiGraph, A list of node keys forming a path. """ - + # check if the path corresponds to an edge path if not gis_iden.is_edge_path( - network, - path, - ignore_edge_direction=allow_reversed_edges - ): - raise ValueError('No edge path was provided.') - + network, path, ignore_edge_direction=allow_reversed_edges + ): + raise ValueError("No edge path was provided.") + # path is a sequence of edge keys: convert to node path if allow_reversed_edges: # reverse edges are allowed @@ -1216,24 +1147,24 @@ def convert_edge_path(network: MultiDiGraph, edge_path = [ edge_key for edge_key in path - if edge_key[0] != edge_key[1] # exclude self loops - ] - - # if there is only one edge, the node path is straightforward + if edge_key[0] != edge_key[1] # exclude self loops + ] + + # if there is only one edge, the node path is straightforward if len(edge_path) == 1: return [edge_path[0][0], edge_path[0][1]] - + node_path = [] for edge_index, edge_key in enumerate(edge_path): # if there are no nodes yet on the path if len(node_path) == 0: # find out which node comes first if edge_key[0] in edge_path[1]: - # the start node is in the second edge too: reversed - node_path.append(edge_key[1]) + # the start node is in the second edge too: reversed + node_path.append(edge_key[1]) + node_path.append(edge_key[0]) + else: # the edge is not reversed node_path.append(edge_key[0]) - else: # the edge is not reversed - node_path.append(edge_key[0]) node_path.append(edge_key[1]) else: # find out which node comes after the previous node @@ -1248,12 +1179,13 @@ def convert_edge_path(network: MultiDiGraph, node_path = [ edge_key[0] for edge_key in path - if edge_key[0] != edge_key[1] # exclude self loops - ] + if edge_key[0] != edge_key[1] # exclude self loops + ] # add the last edge's end node node_path.append(path[-1][1]) # return statement return node_path - + + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/src/topupopt/data/misc/__init__.py b/src/topupopt/data/misc/__init__.py index 633f866..40a96af 100644 --- a/src/topupopt/data/misc/__init__.py +++ b/src/topupopt/data/misc/__init__.py @@ -1,2 +1 @@ # -*- coding: utf-8 -*- - diff --git a/src/topupopt/data/misc/units.py b/src/topupopt/data/misc/units.py index a19fb1f..572167f 100644 --- a/src/topupopt/data/misc/units.py +++ b/src/topupopt/data/misc/units.py @@ -1,5 +1,5 @@ # constants - + # ***************************************************************************** # ***************************************************************************** @@ -9,18 +9,18 @@ kWh_DIV_MWh = 1000 -GJ_DIV_kWh = 1e9/(3600*1e3) +GJ_DIV_kWh = 1e9 / (3600 * 1e3) + +GJ_DIV_MWh = 1000 / 3600 -GJ_DIV_MWh = 1000/3600 +MWh_DIV_J = 1 / (3600 * 1000 * 1000) -MWh_DIV_J = 1/(3600*1000*1000) - # ***************************************************************************** # ***************************************************************************** # currency conversions -EUR_DIV_DKK = 1/(743.95/100) - +EUR_DIV_DKK = 1 / (743.95 / 100) + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/src/topupopt/data/misc/utils.py b/src/topupopt/data/misc/utils.py index 745a410..4855b62 100644 --- a/src/topupopt/data/misc/utils.py +++ b/src/topupopt/data/misc/utils.py @@ -10,62 +10,59 @@ import math from statistics import mean # local, external - + # ***************************************************************************** # ***************************************************************************** - -def generate_pseudo_unique_key(key_list: tuple, - max_iterations: int = 10) -> str: + + +def generate_pseudo_unique_key(key_list: tuple, max_iterations: int = 10) -> str: """Generates a pseudo-unique key that is not among a given list of keys.""" - + iteration = 0 - + while iteration < max_iterations: - new_key = str(uuid.uuid4()) - + if new_key not in key_list: - return new_key - + iteration += 1 - + raise Exception( - 'A unique key could not be found within ' - + - str(max_iterations) - + - ' iterations.' - ) - + "A unique key could not be found within " + str(max_iterations) + " iterations." + ) + + # ***************************************************************************** # ***************************************************************************** + def discrete_sinusoid_matching_integral( - integration_result: float, - time_interval_durations: list, - min_to_max_ratio: float, - phase_shift_radians: float = None) -> list: + integration_result: float, + time_interval_durations: list, + min_to_max_ratio: float, + phase_shift_radians: float = None, +) -> list: """ Returns a profile that approximates a sinusoidal function in discrete time. - + The profile is obtained by integrating the sinusoidal function between spe- cific time intervals. The sum of the samples adds up to a specified result. Additional parameters include the function\'s phase and the ratio between the function\'s minimum (b-a) and maximum (b+a) values. - + The sinusoidal function is as follows: - + u(t) = a * sin ( alpha * t + beta) + b - + where: - + a = b*(1-min_to_max_ratio)/(1+min_to_max_ratio) - + b = integration_result/integration_period - + alpha = 2*math.pi/integration_period - + beta = phase_shift_radians Parameters @@ -86,54 +83,48 @@ def discrete_sinusoid_matching_integral( A profile approximating the aforementioned sinusoidal function. """ - + number_time_steps = len(time_interval_durations) - + integration_period = sum(time_interval_durations) - - b = integration_result/integration_period - - a = b*(1-min_to_max_ratio)/(1+min_to_max_ratio) - - alpha = 2*math.pi/integration_period - + + b = integration_result / integration_period + + a = b * (1 - min_to_max_ratio) / (1 + min_to_max_ratio) + + alpha = 2 * math.pi / integration_period + if phase_shift_radians is None: - beta = 0 - + else: - beta = phase_shift_radians - - t = [sum(time_interval_durations[0:i]) - for i in range(len(time_interval_durations)+1)] - - + + t = [ + sum(time_interval_durations[0:i]) + for i in range(len(time_interval_durations) + 1) + ] + def _integral(a, b, alpha, beta, t, t0): - return ( - -(a/alpha)*math.cos(alpha*t+beta) - + - b*(t-t0) - + - (a/alpha)*math.cos(alpha*t0+beta) - ) - + -(a / alpha) * math.cos(alpha * t + beta) + + b * (t - t0) + + (a / alpha) * math.cos(alpha * t0 + beta) + ) + return [ - _integral(a, b, alpha, beta, t[i+1], t[i]) - for i in range(number_time_steps) - ] - + _integral(a, b, alpha, beta, t[i + 1], t[i]) for i in range(number_time_steps) + ] + + # ***************************************************************************** # ***************************************************************************** -def synch_profile( - profile: list, - reference_profile: list, - synch: bool = True) -> list: + +def synch_profile(profile: list, reference_profile: list, synch: bool = True) -> list: """ Rearranges a profile based on the samples of a reference profile. - + By default, the profiles are synched: the highest sample in one is placed in the same position as the highest sample in the other; the second highest sample is placede in the same position as the second highest sample in the @@ -161,78 +152,72 @@ def synch_profile( """ # min reference >> max unsorted - + # lowest ref >> max unsorted # 2nd lowest ref >> 2nd highest unsorted # 3rd lowest ref >> 3rd highest unsorted - + if synch: - # regular synch - - sorted_profile = sorted((p,i) for i, p in enumerate(profile)) - - sorted_ref_profile = sorted( - (r,i) for i, r in enumerate(reference_profile) - ) - + + sorted_profile = sorted((p, i) for i, p in enumerate(profile)) + + sorted_ref_profile = sorted((r, i) for i, r in enumerate(reference_profile)) + return [ - sorted_profile[sorted_ref_profile.index((r,i))][0] + sorted_profile[sorted_ref_profile.index((r, i))][0] for i, r in enumerate(reference_profile) - ] - + ] + else: - # reverse synch - - sorted_profile = sorted( - ((p,i) for i, p in enumerate(profile)), - reverse=True - ) - - sorted_ref_profile = sorted( - (r,i) for i, r in enumerate(reference_profile) - ) - + + sorted_profile = sorted(((p, i) for i, p in enumerate(profile)), reverse=True) + + sorted_ref_profile = sorted((r, i) for i, r in enumerate(reference_profile)) + return [ - sorted_profile[sorted_ref_profile.index((r,i))][0] + sorted_profile[sorted_ref_profile.index((r, i))][0] for i, r in enumerate(reference_profile) - ] - + ] + + # ***************************************************************************** # ***************************************************************************** + def create_profile_using_time_weighted_state( - integration_result: float, - avg_state: list, - time_interval_durations: list, - min_to_max_ratio: float, - state_correlates_with_output: bool = True) -> list: + integration_result: float, + avg_state: list, + time_interval_durations: list, + min_to_max_ratio: float, + state_correlates_with_output: bool = True, +) -> list: """ Returns a profile that approximates a sinusoidal function in discrete time. - + The profile is obtained by integrating the sinusoidal function between spe- cific time intervals. The sum of the samples adds up to a specified result. - + The profile\'s peak is made to coincide with that of a function of the time-weighted average state, that is, the average state during each time interval weighted by the respective time duration relative to the average. It can also be made to match the lowest time-weighted average state. - + The sinusoidal function is as follows: - + u(t) = a * sin ( alpha * t + beta) + b - + where: - + a = b*(1-min_to_max_ratio)/(1+min_to_max_ratio) - + b = integration_result/integration_period - + alpha = 2*math.pi/integration_period - + beta = phase shift in radians (determined automatically) - + Parameters ---------- integration_result : float @@ -260,82 +245,84 @@ def create_profile_using_time_weighted_state( its peak placed where .. """ - + if len(avg_state) != len(time_interval_durations): - - raise ValueError('The inputs are inconsistent.') - + raise ValueError("The inputs are inconsistent.") + period = sum(time_interval_durations) - + avg_time_interval_duration = mean(time_interval_durations) - + avg_state_weighted = [ - (x_k*delta_k/avg_time_interval_duration - if state_correlates_with_output else - -x_k*delta_k/avg_time_interval_duration) + ( + x_k * delta_k / avg_time_interval_duration + if state_correlates_with_output + else -x_k * delta_k / avg_time_interval_duration + ) for delta_k, x_k in zip(time_interval_durations, avg_state) - ] - + ] + # find the peak - + _sorted = sorted( - ((state,index) for index, state in enumerate(avg_state_weighted)), - reverse=True - ) - + ((state, index) for index, state in enumerate(avg_state_weighted)), reverse=True + ) + # create new list for time durations starting with that of the peak - + swapped_time_durations = [ - *time_interval_durations[_sorted[0][1]:], - *time_interval_durations[0:_sorted[0][1]] - ] - + *time_interval_durations[_sorted[0][1] :], + *time_interval_durations[0 : _sorted[0][1]], + ] + # create sinusoidal profile based on that peak - + new_profile = discrete_sinusoid_matching_integral( integration_result=integration_result, time_interval_durations=swapped_time_durations, min_to_max_ratio=min_to_max_ratio, phase_shift_radians=( - math.pi/2 - - - 0.5*(time_interval_durations[_sorted[0][1]]/period)*2*math.pi - ) - ) - + math.pi / 2 + - 0.5 * (time_interval_durations[_sorted[0][1]] / period) * 2 * math.pi + ), + ) + # return profile in correct order n = len(time_interval_durations) - return [*new_profile[n-_sorted[0][1]:], *new_profile[0:n-_sorted[0][1]]] + return [*new_profile[n - _sorted[0][1] :], *new_profile[0 : n - _sorted[0][1]]] + # ***************************************************************************** # ***************************************************************************** + def max_min_sinusoidal_profile( - integration_result: float or int, - period: float or int, - time_interval_duration: float or int, - min_to_max_ratio: float) -> tuple: + integration_result: float or int, + period: float or int, + time_interval_duration: float or int, + min_to_max_ratio: float, +) -> tuple: """ Returns the maximum and minimum amount for a given time interval, according to a sinusoidal function of time. - + The profile is obtained by integrating the sinusoidal function between spe- cific time intervals. The sum of the samples adds up to a specified result. Additional parameters include the function\'s phase and the ratio between the function\'s minimum and average values. - + The sinusoidal function is as follows: - + u(t) = a * sin ( alpha * t + beta) + b - + where: - + a = b*(1-min_to_max_ratio)/(1+min_to_max_ratio) - + b = integration_result/integration_period - + alpha = 2*math.pi/integration_period - + beta = phase_shift_radians Parameters @@ -356,16 +343,17 @@ def max_min_sinusoidal_profile( The maximum and minimum integral during the specified time interval. """ - - b = integration_result/period - a = b*(1-min_to_max_ratio)/(1+min_to_max_ratio) - alpha = 2*math.pi/period - amplitude = a*(2/alpha)*math.sin(alpha*time_interval_duration/2) - + + b = integration_result / period + a = b * (1 - min_to_max_ratio) / (1 + min_to_max_ratio) + alpha = 2 * math.pi / period + amplitude = a * (2 / alpha) * math.sin(alpha * time_interval_duration / 2) + return ( - b*time_interval_duration+amplitude, - b*time_interval_duration-amplitude - ) + b * time_interval_duration + amplitude, + b * time_interval_duration - amplitude, + ) + # ***************************************************************************** # ***************************************************************************** diff --git a/src/topupopt/problems/__init__.py b/src/topupopt/problems/__init__.py index e7ce4a7..80cf2ba 100644 --- a/src/topupopt/problems/__init__.py +++ b/src/topupopt/problems/__init__.py @@ -1,2 +1,2 @@ # -*- coding: utf-8 -*- -#from . import mvesipp \ No newline at end of file +# from . import mvesipp diff --git a/src/topupopt/problems/esipp/__init__.py b/src/topupopt/problems/esipp/__init__.py index 9f85ec5..c5f6e93 100644 --- a/src/topupopt/problems/esipp/__init__.py +++ b/src/topupopt/problems/esipp/__init__.py @@ -1 +1 @@ -from . import problem \ No newline at end of file +from . import problem diff --git a/src/topupopt/problems/esipp/converter.py b/src/topupopt/problems/esipp/converter.py index 769179a..9a1ab26 100644 --- a/src/topupopt/problems/esipp/converter.py +++ b/src/topupopt/problems/esipp/converter.py @@ -16,7 +16,7 @@ import numpy as np from .dynsys import DynamicSystem from .signal import Signal, FixedSignal - + # ***************************************************************************** # ***************************************************************************** @@ -24,461 +24,432 @@ from .signal import Signal, FixedSignal # 1) organise information for optimisation # 2) upload optimisation results and compute the various objectives # 3) retrieve information -# 4) +# 4) # TODO: create constant terms using fixed signals - + + class Converter: """A class for modular dynamic systems in an integrated energy system.""" - - def __init__(self, - # converter name/key - key, - # system information - sys: DynamicSystem, - # initial conditions - initial_states: np.array, - # optimisation-relevant parameters - turn_key_cost: float or int, - # information about inputs - inputs: list or Signal = None, - # information about outputs - outputs: list or Signal = None, - # information about states - states: list or Signal = None, - # input amplitude costs - input_specific_amplitude_costs: dict = None, - # output amplitude costs - output_specific_amplitude_costs: dict = None, - # states amplitude costs - state_specific_amplitude_costs: dict = None, - # externalities - # externalities due - input_specific_externality_costs: dict = None,# one for every input - output_specific_externality_costs: dict = None, # one for every input, then another for every time interval - state_specific_externality_costs: dict = None # one for every output, then another for every time interval - ): - + + def __init__( + self, + # converter name/key + key, + # system information + sys: DynamicSystem, + # initial conditions + initial_states: np.array, + # optimisation-relevant parameters + turn_key_cost: float or int, + # information about inputs + inputs: list or Signal = None, + # information about outputs + outputs: list or Signal = None, + # information about states + states: list or Signal = None, + # input amplitude costs + input_specific_amplitude_costs: dict = None, + # output amplitude costs + output_specific_amplitude_costs: dict = None, + # states amplitude costs + state_specific_amplitude_costs: dict = None, + # externalities + # externalities due + input_specific_externality_costs: dict = None, # one for every input + output_specific_externality_costs: dict = None, # one for every input, then another for every time interval + state_specific_externality_costs: dict = None, # one for every output, then another for every time interval + ): # ********************************************************************* - + self.key = key - + self.sys = sys - + # ********************************************************************* - + # inputs - + if type(inputs) == list: - self.inputs = inputs - + elif isinstance(inputs, Signal): - self.inputs = [inputs] - + else: - - raise TypeError('Unknown format for input signals.') - + raise TypeError("Unknown format for input signals.") + # outputs - + if type(outputs) == list: - self.outputs = outputs - + elif isinstance(outputs, Signal): - self.outputs = [outputs] - + else: - - raise TypeError('Unknown format for output signals.') - + raise TypeError("Unknown format for output signals.") + # states - + if type(states) == list: - self.states = states - + elif isinstance(states, Signal): - self.states = [states] - + else: - - raise TypeError('Unknown format for state signals.') - - + raise TypeError("Unknown format for state signals.") + self.initial_states = initial_states - + # identify fixed signals - + self.fixed_inputs = self.identify_fixed_signals(self.inputs) - + # ********************************************************************* - + # amplitude costs: one per signal - + self.input_specific_amplitude_costs = input_specific_amplitude_costs - + self.output_specific_amplitude_costs = output_specific_amplitude_costs - + self.state_specific_amplitude_costs = state_specific_amplitude_costs - + # externality costs: one per signal and time interval - - self.input_specific_externality_costs = ( - input_specific_externality_costs - ) - - self.state_specific_externality_costs = ( - state_specific_externality_costs - ) - - self.output_specific_externality_costs = ( - output_specific_externality_costs - ) - + + self.input_specific_externality_costs = input_specific_externality_costs + + self.state_specific_externality_costs = state_specific_externality_costs + + self.output_specific_externality_costs = output_specific_externality_costs + # ********************************************************************* # ********************************************************************* - + # identify the signals with specific amplitude costs - + # inputs - + if type(self.input_specific_amplitude_costs) == type(None): - self.amplitude_penalised_inputs = None - + else: - self.amplitude_penalised_inputs = list( self.input_specific_amplitude_costs.keys() - ) - + ) + # states - + if type(self.state_specific_amplitude_costs) == type(None): - self.amplitude_penalised_states = None - + else: - self.amplitude_penalised_states = list( self.state_specific_amplitude_costs.keys() - ) - + ) + # outputs - + if type(self.output_specific_amplitude_costs) == type(None): - self.amplitude_penalised_outputs = None - + else: - self.amplitude_penalised_outputs = list( self.output_specific_amplitude_costs.keys() - ) - + ) + # ********************************************************************* # ********************************************************************* - + # identify the signals with specific externality costs - + # inputs - + if type(self.input_specific_externality_costs) == type(None): - self.externality_inducing_inputs = [] - + else: - self.externality_inducing_inputs = [ - key[0] # key is a (m, k) tuple + key[0] # key is a (m, k) tuple for key, value in self.input_specific_externality_costs.items() if value != 0 - ] - + ] + # states - + if type(self.state_specific_externality_costs) == type(None): - self.externality_inducing_states = [] - + else: - self.externality_inducing_states = [ - key[0] # key is a (n, k) tuple + key[0] # key is a (n, k) tuple for key, value in self.state_specific_externality_costs.items() if value != 0 - ] - + ] + # outputs - + if type(self.output_specific_externality_costs) == type(None): - self.externality_inducing_outputs = [] - + else: - self.externality_inducing_outputs = [ - key[0] # key is a (r, k) tuple + key[0] # key is a (r, k) tuple for key, value in self.output_specific_externality_costs.items() if value != 0 - ] - + ] + # ********************************************************************* - + # identify binary signals - + # identify bounded signals: those with predefined upper or lower bounds - + # identify dimensionable signals: those with amplitude penalties - + # # identify the signals with specific externality costs - + # self.identify_binary_inputs() - + # self.identify_dimensionable_inputs() - + # self.identify_externality_inducing_inputs() - + # self.identify_externality_inducing_outputs() - + # self.identify_amplitude_penalised_inputs() - + # ********************************************************************* # ********************************************************************* - + + # ************************************************************************* # ************************************************************************* - # ************************************************************************* - + @staticmethod def identify_fixed_signals(signals: list): - - return [ - u for u, sig in enumerate(signals) if isinstance(sig, FixedSignal) - ] - + return [u for u, sig in enumerate(signals) if isinstance(sig, FixedSignal)] + + # ************************************************************************* # ************************************************************************* - # ************************************************************************* - + # TODO: number of time intervals in the dynamic system class - + # TODO: replace the use of the number of time intervals from signals' objects - + def matrix_dictionaries(self) -> tuple: - # TODO: exclude fixed signals from the matrices # how? skip fixed signals = the dicts do not have that information and the indexes will be correct - + number_intervals = self.inputs[0].number_samples - + # a_innk - + a_innk = { - (self.key,n1,n2,k): self.sys.A_line_k[ - k if self.sys.A_line_is_time_varying else 0][n1, n2] - for n1 in range(self.sys.number_states) # the state being defined - for n2 in range(self.sys.number_states) # the influencing state - for k in range(number_intervals) # the time interval - } - + (self.key, n1, n2, k): self.sys.A_line_k[ + k if self.sys.A_line_is_time_varying else 0 + ][n1, n2] + for n1 in range(self.sys.number_states) # the state being defined + for n2 in range(self.sys.number_states) # the influencing state + for k in range(number_intervals) # the time interval + } + # b_inmk - + b_inmk = { - (self.key,n1,m,k): self.sys.B_line_k[ - k if self.sys.B_line_is_time_varying else 0][n1, m] - for n1 in range(self.sys.number_states) # the state being defined + (self.key, n1, m, k): self.sys.B_line_k[ + k if self.sys.B_line_is_time_varying else 0 + ][n1, m] + for n1 in range(self.sys.number_states) # the state being defined for m in range(self.sys.number_inputs) # the influencing input - if m not in self.fixed_inputs # free inputs only - for k in range(number_intervals) # the time interval - } - + if m not in self.fixed_inputs # free inputs only + for k in range(number_intervals) # the time interval + } + # c_irnk - + c_irnk = { - (self.key,r,n,k): self.sys.C_line_k[ - k if self.sys.C_line_is_time_varying else 0][r, n] - for r in range(self.sys.number_outputs) # the output being defined + (self.key, r, n, k): self.sys.C_line_k[ + k if self.sys.C_line_is_time_varying else 0 + ][r, n] + for r in range(self.sys.number_outputs) # the output being defined for n in range(self.sys.number_states) # the influencing state - for k in range(number_intervals) # the time interval - } - + for k in range(number_intervals) # the time interval + } + # d_irmk - + d_irmk = { - (self.key,r,m,k): self.sys.D_line_k[ - k if self.sys.D_line_is_time_varying else 0][r, m] - for r in range(self.sys.number_outputs) # the output being defined + (self.key, r, m, k): self.sys.D_line_k[ + k if self.sys.D_line_is_time_varying else 0 + ][r, m] + for r in range(self.sys.number_outputs) # the output being defined for m in range(self.sys.number_inputs) # the influencing input - if m not in self.fixed_inputs # free inputs only - for k in range(number_intervals) # the time interval - } - - # note: e_x_ink does not depend on the initial conditions since the + if m not in self.fixed_inputs # free inputs only + for k in range(number_intervals) # the time interval + } + + # note: e_x_ink does not depend on the initial conditions since the # a_innk coefficients contain the information to handle them elsewhere - + # e_x_ink: depends on fixed signals - + e_x_ink = { - (self.key,n,k): - sum( - self.sys.B_line_k[ - k if self.sys.B_line_is_time_varying else 0][n,m]* - self.inputs[m].samples[k] - for m in self.fixed_inputs # b_inmk*u_imk for fixed inputs - ) + (self.key, n, k): sum( + self.sys.B_line_k[k if self.sys.B_line_is_time_varying else 0][n, m] + * self.inputs[m].samples[k] + for m in self.fixed_inputs # b_inmk*u_imk for fixed inputs + ) for n in range(self.sys.number_states) # the state being defined - for k in range(number_intervals) # the time interval - } - + for k in range(number_intervals) # the time interval + } + # e_y_irk: depends on fixed signals - + e_y_irk = { - (self.key,r,k): sum( - self.sys.D_line_k[ - k if self.sys.D_line_is_time_varying else 0][r,m]* - self.inputs[m].samples[k] - for m in self.fixed_inputs # d_irmk*u_imk for fixed inputs - ) - for r in range(self.sys.number_outputs) # the output being defined - for k in range(number_intervals) # the time interval - } - + (self.key, r, k): sum( + self.sys.D_line_k[k if self.sys.D_line_is_time_varying else 0][r, m] + * self.inputs[m].samples[k] + for m in self.fixed_inputs # d_irmk*u_imk for fixed inputs + ) + for r in range(self.sys.number_outputs) # the output being defined + for k in range(number_intervals) # the time interval + } + # return statement - + return a_innk, b_inmk, c_irnk, d_irmk, e_x_ink, e_y_irk - + + +# # ************************************************************************* +# # ************************************************************************* + +# def has_dimensionable_inputs(self): + +# if len(self.dimensionable_inputs) == 0: + +# # the system has no dimensionable inputs + +# return False + +# else: # the system has dimensionable inputs + +# return True + +# # ************************************************************************* +# # ************************************************************************* + +# def has_binary_inputs(self): + +# if len(self.binary_inputs) == 0: + +# # the system has no binary inputs + +# return False + +# else: # the system has binary inputs + +# return True + +# # ************************************************************************* # # ************************************************************************* -# # ************************************************************************* - - # def has_dimensionable_inputs(self): - - # if len(self.dimensionable_inputs) == 0: - - # # the system has no dimensionable inputs - - # return False - - # else: # the system has dimensionable inputs - - # return True - - # # ************************************************************************* - # # ************************************************************************* - - # def has_binary_inputs(self): - - # if len(self.binary_inputs) == 0: - - # # the system has no binary inputs - - # return False - - # else: # the system has binary inputs - - # return True - - # # ************************************************************************* - # # ************************************************************************* - - # def has_amplitude_penalised_inputs(self): - - # if len(self.amplitude_penalised_inputs) == 0: - - # # the system has no amplitude-penalised inputs - - # return False - - # else: # the system has amplitude-penalised inputs - - # return True - - # # ************************************************************************* - # # ************************************************************************* - - # def has_externality_inducing_inputs(self): - - # if len(self.externality_inducing_inputs) == 0: - - # # the system has no externality-inducing inputs - - # return False - - # else: # the system has externality-inducing inputs - - # return True - - # # ************************************************************************* - # # ************************************************************************* - - # def has_externality_inducing_outputs(self): - - # if len(self.externality_inducing_outputs) == 0: - - # # the system has no externality-inducing outputs - - # return False - - # else: # the system has externality-inducing outputs - - # return True - - # # ************************************************************************* - # # ************************************************************************* - - # def identify_dimensionable_inputs(self): - - # self.dimensionable_inputs = [ - # i - # for i, u in enumerate(self.inputs) - # if u.is_dimensionable] - - # # ************************************************************************* - # # ************************************************************************* - - # def identify_binary_inputs(self): - - # self.binary_inputs = [ - # i - # for i, u in enumerate(self.inputs) - # if u.is_binary] - - # # ************************************************************************* - # # ************************************************************************* - - # def identify_externality_inducing_inputs(self): - - # self.externality_inducing_inputs = [ - # i - # for i, c in enumerate(self.input_externalities) - # if c != 0] - - # # ************************************************************************* - # # ************************************************************************* - - # def identify_externality_inducing_outputs(self): - - # self.externality_inducing_outputs = [ - # i - # for i, c in enumerate(self.output_externalities) - # if c != 0] - - # # ************************************************************************* - # # ************************************************************************* - - # def identify_amplitude_penalised_inputs(self): - - # self.amplitude_penalised_inputs = [ - # i - # for i, c in enumerate(self.input_amplitude_costs) - # if c != 0] - - # # ************************************************************************* - # # ************************************************************************* - + +# def has_amplitude_penalised_inputs(self): + +# if len(self.amplitude_penalised_inputs) == 0: + +# # the system has no amplitude-penalised inputs + +# return False + +# else: # the system has amplitude-penalised inputs + +# return True + +# # ************************************************************************* +# # ************************************************************************* + +# def has_externality_inducing_inputs(self): + +# if len(self.externality_inducing_inputs) == 0: + +# # the system has no externality-inducing inputs + +# return False + +# else: # the system has externality-inducing inputs + +# return True + +# # ************************************************************************* +# # ************************************************************************* + +# def has_externality_inducing_outputs(self): + +# if len(self.externality_inducing_outputs) == 0: + +# # the system has no externality-inducing outputs + +# return False + +# else: # the system has externality-inducing outputs + +# return True + +# # ************************************************************************* +# # ************************************************************************* + +# def identify_dimensionable_inputs(self): + +# self.dimensionable_inputs = [ +# i +# for i, u in enumerate(self.inputs) +# if u.is_dimensionable] + +# # ************************************************************************* +# # ************************************************************************* + +# def identify_binary_inputs(self): + +# self.binary_inputs = [ +# i +# for i, u in enumerate(self.inputs) +# if u.is_binary] + +# # ************************************************************************* +# # ************************************************************************* + +# def identify_externality_inducing_inputs(self): + +# self.externality_inducing_inputs = [ +# i +# for i, c in enumerate(self.input_externalities) +# if c != 0] + +# # ************************************************************************* +# # ************************************************************************* + +# def identify_externality_inducing_outputs(self): + +# self.externality_inducing_outputs = [ +# i +# for i, c in enumerate(self.output_externalities) +# if c != 0] + +# # ************************************************************************* +# # ************************************************************************* + +# def identify_amplitude_penalised_inputs(self): + +# self.amplitude_penalised_inputs = [ +# i +# for i, c in enumerate(self.input_amplitude_costs) +# if c != 0] + +# # ************************************************************************* +# # ************************************************************************* + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/src/topupopt/problems/esipp/dynsys.py b/src/topupopt/problems/esipp/dynsys.py index 082298b..006ae9d 100644 --- a/src/topupopt/problems/esipp/dynsys.py +++ b/src/topupopt/problems/esipp/dynsys.py @@ -14,63 +14,65 @@ import numpy as np from scipy.linalg import expm, inv # local libraries, internal - + # ***************************************************************************** # ***************************************************************************** # TODO: integrate only selected outputs on demand, not all of them # TODO: ensure it is compatible with stateless systems - + + class DynamicSystem: """A class for dynamic systems described using A, B, C and D matrices.""" - + # use cases: # 1) the A, B, C and D matrices are provided: inputs, states and outputs (general case) # 2) the A and B matrices are provided: inputs and states (no outputs) # 3) the D matrix is provided: inputs and outputs (no states) - + # formats: # 1) the (A, B, C and D) matrices are provided directly as numpy arrays (1 matrix each at most) # 2) the (A, B, C and D) matrices are provided within lists, whose length # is the same as the number of time intervals, to represent time variation # 3) the matrices are provided independently of each other, and may vary # in time or not, independently of the others (not ready yet) - + # note: matrices must be provided, even if they are zero matrices - + # other options: # i) time intervals: can be supplied as a numeric type (single interval) or # as a list, for multiple intervals (regular or irregular) # note: if the time interval is provided as a numeric type, the number of - # intervals may still be more than one, in accordance with the number of + # intervals may still be more than one, in accordance with the number of # matrices, in which case an internal list of time intervals will be created # ii) integrate outputs: it may not be relevant to obtain instantaneous # information, in which case the outputs can be integrated (all or none, for now) # note: this option requires the D matrix - - # TODO: define whether this class holds information about the number of + + # TODO: define whether this class holds information about the number of # time intervals or if that information comes from somewhere else - + # ************************************************************************* # ************************************************************************* - - def __init__(self, - time_interval_durations: list or float or int, - A: list or np.array, - B: list or np.array, - C: list or np.array, - D: list or np.array, - integrate_outputs: bool = True): - + + def __init__( + self, + time_interval_durations: list or float or int, + A: list or np.array, + B: list or np.array, + C: list or np.array, + D: list or np.array, + integrate_outputs: bool = True, + ): # steps: # 1) validate the input data # 2) compute and/or adjust the data in preparation for discretisation # 3) discretise the model - + # ********************************************************************* # ********************************************************************* - + # cases: # 1) time invariant system, regular time steps # >> one continuous SS model discretised once and reused every step @@ -81,543 +83,472 @@ class DynamicSystem: # 4) time-varying system, irregular time steps # >> multiple continous SS models, each discretised once # note: case 3 and 4 are effectively the same - + # ********************************************************************* # ********************************************************************* - + # if time_interval_durations is a number, transform it into a list - - if (type(time_interval_durations) == int or - type(time_interval_durations) == float): - + + if ( + type(time_interval_durations) == int + or type(time_interval_durations) == float + ): # a single time interval duration has been provided - + if time_interval_durations <= 0: - # zero or negative time interval duration - - raise ValueError( - 'Time interval durations cannot be zero nor negative.') - - #self.number_time_intervals = len(time_interval_durations) - + + raise ValueError("Time interval durations cannot be zero nor negative.") + + # self.number_time_intervals = len(time_interval_durations) + self.time_interval_durations = [time_interval_durations] - + + # validate the inputs + + ( + self.number_inputs, + self.number_states, + self.number_outputs, + self.A_is_time_varying, + self.B_is_time_varying, + self.C_is_time_varying, + self.D_is_time_varying, + ) = self.validate( + time_interval_durations=[time_interval_durations], A=A, B=B, C=C, D=D + ) + + else: # time_interval_durations is not a number + self.time_interval_durations = time_interval_durations + # validate the inputs - - (self.number_inputs, - self.number_states, - self.number_outputs, - self.A_is_time_varying, - self.B_is_time_varying, - self.C_is_time_varying, - self.D_is_time_varying) = self.validate( - time_interval_durations=[time_interval_durations], - A=A, - B=B, - C=C, - D=D) - - else: # time_interval_durations is not a number - - self.time_interval_durations = time_interval_durations - - # validate the inputs - - (self.number_inputs, - self.number_states, - self.number_outputs, - self.A_is_time_varying, - self.B_is_time_varying, - self.C_is_time_varying, - self.D_is_time_varying) = self.validate( - time_interval_durations=time_interval_durations, - A=A, - B=B, - C=C, - D=D) - + + ( + self.number_inputs, + self.number_states, + self.number_outputs, + self.A_is_time_varying, + self.B_is_time_varying, + self.C_is_time_varying, + self.D_is_time_varying, + ) = self.validate( + time_interval_durations=time_interval_durations, A=A, B=B, C=C, D=D + ) + # ********************************************************************* - + # determine the number of models - + if self.A_is_time_varying: - self.number_models = len(A) - + else: - self.number_models = 1 - + if self.B_is_time_varying: - if len(B) > self.number_models: - - self.number_models = len(B) # TODO: reach this statement - + self.number_models = len(B) # TODO: reach this statement + if self.C_is_time_varying: - if len(C) > self.number_models: - - self.number_models = len(C) # TODO: reach this statement - + self.number_models = len(C) # TODO: reach this statement + if self.D_is_time_varying: - if len(D) > self.number_models: - - self.number_models = len(D) # TODO: reach this statement + self.number_models = len(D) # TODO: reach this statement # extend the time_interval_durations for convenience - - if (len(self.time_interval_durations) == 1 and - (self.A_is_time_varying or - self.B_is_time_varying or - self.C_is_time_varying or - self.D_is_time_varying)): - - # time_interval_durations is a list with a single element and + + if len(self.time_interval_durations) == 1 and ( + self.A_is_time_varying + or self.B_is_time_varying + or self.C_is_time_varying + or self.D_is_time_varying + ): + # time_interval_durations is a list with a single element and # at least one of the matrices is time-varying - + # if self.A_is_time_varying: - + # number_models = len(A) - + # else: - + # number_models = 1 - + # if self.B_is_time_varying: - + # if len(B) > number_models: - + # number_models = len(B) - + # if self.C_is_time_varying: - + # if len(C) > number_models: - + # number_models = len(C) - + # if self.D_is_time_varying: - + # if len(D) > number_models: - + # number_models = len(D) - - self.time_interval_durations = [ # TODO: reach this statement - self.time_interval_durations[0] - for _ in range(self.number_models)] - + + self.time_interval_durations = [ # TODO: reach this statement + self.time_interval_durations[0] for _ in range(self.number_models) + ] + # else: - + # self.time_interval_durations = list(time_interval_durations) - + # determine the number of models - + # ********************************************************************* # ********************************************************************* - + # get the data into lists - + # matrices - + if type(A) == list or A is None: - self.A = A - + else: - self.A = [A] - + if type(B) == list or B is None: - self.B = B - + else: - - self.B = [B] - + self.B = [B] + if type(C) == list or C is None: - self.C = C - + else: - self.C = [C] - + if type(D) == list or D is None: - self.D = D - + else: - self.D = [D] - + # ********************************************************************* # ********************************************************************* - + # discretise the model - + self.discretise(integrate_outputs=integrate_outputs) - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* - # ************************************************************************* - - def validate(self, - time_interval_durations: list or np.array, - A: list or np.array, - B: list or np.array, - C: list or np.array, - D: list or np.array) -> tuple: - + # ************************************************************************* + + def validate( + self, + time_interval_durations: list or np.array, + A: list or np.array, + B: list or np.array, + C: list or np.array, + D: list or np.array, + ) -> tuple: # ********************************************************************* # ********************************************************************* - + # time interval duration - + if type(time_interval_durations) == list: - # a list has been provided - + if len(time_interval_durations) == 0: - # empty list - - raise ValueError('No time interval durations were provided.') - + + raise ValueError("No time interval durations were provided.") + else: - # check each element - + for elem in time_interval_durations: - # check if they are numbers - - if (type(elem) == int or - type(elem) == float): - + + if type(elem) == int or type(elem) == float: # elem is an integer or a float, all is fine so far - + if elem <= 0: - # zero or negative time interval duration - + raise ValueError( - 'Time interval durations cannot be zero nor '+ - 'negative.') - - else: # not an integer nor a float, something is up - - raise TypeError( - 'Unrecognised time interval duration format.') - - else: # unknown input - - raise TypeError( - 'Unrecognised time interval duration format.') - + "Time interval durations cannot be zero nor " + + "negative." + ) + + else: # not an integer nor a float, something is up + raise TypeError("Unrecognised time interval duration format.") + + else: # unknown input + raise TypeError("Unrecognised time interval duration format.") + # ********************************************************************* # ********************************************************************* - + # if multiple matrices are provided for a given position, then they # must all have the same shape (rows and columns) - + # if multiple matrices are provided for a given position, and more than # one time interval is provided, then their sizes must match - + # start by assuming the matrices are invariant - + A_is_time_varying = False - + B_is_time_varying = False - + C_is_time_varying = False - - D_is_time_varying = False - + + D_is_time_varying = False + # A matrix - + if type(A) == list: - # A is a list - + if isinstance(A[0], np.ndarray): - A_m, A_n = A[0].shape - + else: - - raise TypeError('Unrecognised A matrix format.') - + raise TypeError("Unrecognised A matrix format.") + for a in A: - if a.shape != (A_m, A_n): - - raise ValueError('The A matrices are inconsistent.') - + raise ValueError("The A matrices are inconsistent.") + if type(time_interval_durations) == list: - - if (len(time_interval_durations) != len(A) and - len(time_interval_durations) > 1 and - len(A) > 1): - + if ( + len(time_interval_durations) != len(A) + and len(time_interval_durations) > 1 + and len(A) > 1 + ): # the number of time intervals and matrices do not match & # they are larger than one: mismatch - + raise ValueError( - 'The number of A matrices is inconsistent with the number of time intervals.' - ) - + "The number of A matrices is inconsistent with the number of time intervals." + ) + if len(A) > 1: - A_is_time_varying = True - + elif isinstance(A, np.ndarray): - # A is not a list - + A_m, A_n = A.shape - + elif A is None: - A_m, A_n = 0, 0 - - else: - - raise TypeError('Unrecognised A matrix format.') - + + else: + raise TypeError("Unrecognised A matrix format.") + # B matrix - + if type(B) == list: - # B is a list - + if isinstance(B[0], np.ndarray): - B_m, B_n = B[0].shape - + else: - - raise TypeError('Unrecognised B matrix format.') - + raise TypeError("Unrecognised B matrix format.") + for b in B: - if b.shape != (B_m, B_n): - - raise ValueError('The B matrices are inconsistent.') - + raise ValueError("The B matrices are inconsistent.") + if type(time_interval_durations) == list: - - if (len(time_interval_durations) != len(B) and - len(time_interval_durations) > 1 and - len(B) > 1): - + if ( + len(time_interval_durations) != len(B) + and len(time_interval_durations) > 1 + and len(B) > 1 + ): # the number of time intervals and matrices do not match & # they are larger than one: mismatch - + raise ValueError( - 'The number of B matrices is inconsistent with the number of time intervals.' - ) - + "The number of B matrices is inconsistent with the number of time intervals." + ) + if len(B) > 1: - B_is_time_varying = True - + elif isinstance(B, np.ndarray): - # B is not a list - + B_m, B_n = B.shape - + elif A is None: - B_m, B_n = 0, 0 - - else: - - raise TypeError('Unrecognised B matrix format.') - + + else: + raise TypeError("Unrecognised B matrix format.") + # C matrix - + if type(C) == list: - # C is a list - + if isinstance(C[0], np.ndarray): - C_m, C_n = C[0].shape - + else: - - raise TypeError('Unrecognised C matrix format.') - + raise TypeError("Unrecognised C matrix format.") + for c in C: - if c.shape != (C_m, C_n): - - raise ValueError('The C matrices are inconsistent.') - + raise ValueError("The C matrices are inconsistent.") + if type(time_interval_durations) == list: - - if (len(time_interval_durations) != len(C) and - len(time_interval_durations) > 1 and - len(C) > 1): - + if ( + len(time_interval_durations) != len(C) + and len(time_interval_durations) > 1 + and len(C) > 1 + ): # the number of time intervals and matrices do not match & # they are larger than one: mismatch - + raise ValueError( - 'The number of C matrices is inconsistent with the number of time intervals.' - ) - + "The number of C matrices is inconsistent with the number of time intervals." + ) + if len(C) > 1: - C_is_time_varying = True - + elif isinstance(C, np.ndarray): - # C is not a list - + C_m, C_n = C.shape - + elif C is None: - C_m, C_n = 0, 0 - - else: - - raise TypeError('Unrecognised C matrix format.') - + + else: + raise TypeError("Unrecognised C matrix format.") + # D matrix - + if type(D) == list: - # D is a list - + if isinstance(D[0], np.ndarray): - D_m, D_n = D[0].shape - + else: - - raise TypeError('Unrecognised D matrix format.') - + raise TypeError("Unrecognised D matrix format.") + for d in D: - if d.shape != (D_m, D_n): - - raise ValueError('The D matrices are inconsistent.') - + raise ValueError("The D matrices are inconsistent.") + if type(time_interval_durations) == list: - - if (len(time_interval_durations) != len(D) and - len(time_interval_durations) > 1 and - len(D) > 1): - + if ( + len(time_interval_durations) != len(D) + and len(time_interval_durations) > 1 + and len(D) > 1 + ): # the number of time intervals and matrices do not match & # they are larger than one: mismatch - + raise ValueError( - 'The number of D matrices is inconsistent with the number of time intervals.' - ) - + "The number of D matrices is inconsistent with the number of time intervals." + ) + if len(D) > 1: - D_is_time_varying = True - + elif isinstance(D, np.ndarray): - # D is not a list - + D_m, D_n = D.shape - + elif D is None: - D_m, D_n = 0, 0 - - else: - - raise TypeError('Unrecognised D matrix format.') - + + else: + raise TypeError("Unrecognised D matrix format.") + # ********************************************************************* - + # make sure the matrices have appropriate shapes - + # the A matrix has to be square: A_m == A_n - + if A_m != A_n: - - raise ValueError('The A matrix is not square.') - + raise ValueError("The A matrix is not square.") + # the A and B matrices have to have the same number of rows: A_m == B_m - - # - + + # + if A_m != B_m: - raise ValueError( - 'The A and B matrices do not have the same number of rows.' - ) - + "The A and B matrices do not have the same number of rows." + ) + # the C and D matrices have to have the same number of rows: C_m == D_m # except if A, B and C have zero rows: no states - + if C_m != D_m and (C_m != 0 and A_m != 0 and B_m != 0): - raise ValueError( - 'The C and D matrices do not have the same number of rows.' - ) - + "The C and D matrices do not have the same number of rows." + ) + # the A and C matrices have to have the same number of columns: A_n == C_n - - if A_n != 0 and C_m != 0 and C_n != 0: # Note: A_m == A_n - + + if A_n != 0 and C_m != 0 and C_n != 0: # Note: A_m == A_n if A_n != C_n: - raise ValueError( - 'The A and C matrices do not have the same number of columns.' - ) - + "The A and C matrices do not have the same number of columns." + ) + # the B and D matrices have to have the same number of columns: B_n == D_n - - if B_m != 0 and B_n != 0 and D_m != 0 and D_n != 0: # both matrices need to exist - + + if ( + B_m != 0 and B_n != 0 and D_m != 0 and D_n != 0 + ): # both matrices need to exist if B_n != D_n: - raise ValueError( - 'The B and D matrices do not have the same number of columns.' - ) - + "The B and D matrices do not have the same number of columns." + ) + # ********************************************************************* - + # number of inputs, states and outputs - + # number_inputs = B_n # B_n if D_n does not exist or D_n - + # number_states = A_m # or A_n or C_n - + # number_outputs = D_m # C_m or D_m (better, since C is optional) - + # ********************************************************************* - + return ( B_n if D_n == 0 else D_n, A_m, D_m, A_is_time_varying, - B_is_time_varying, + B_is_time_varying, C_is_time_varying, - D_is_time_varying - ) - + D_is_time_varying, + ) + + # ************************************************************************* # ************************************************************************* - # ************************************************************************* def discretise(self, integrate_outputs: bool = True): - # cases: # ********************************************************************* # 1) time invariant system, regular time steps @@ -634,647 +565,583 @@ class DynamicSystem: # ********************************************************************* # note: case 3 and 4 are effectively the same # ********************************************************************* - + # TODO: do not assume that A, B, C, D lists have the same lengths if time-varying - + # TODO: add warning to notify users that A, B, C, D lists need to have the same lengths if time-varying - + # A, B, C and D are matrices that cannot simultaenously be None # if C and D are None: okay, but no outputs # if A, B and C are None: okay, but that means no states (only input-to-output functions) # if A, B, C and D are None: error - + # A matrix - + if self.A is None: - self.A_line_k = None - + elif len(self.A) == len(self.time_interval_durations): - # 1) time invariant system, regular time steps # >> only if there is only one time interval # 3) time-varying system, regular time steps # 4) time-varying system, irregular time steps - + self.A_line_k = [ - expm(dt*A_matrix) - for (A_matrix, dt) in zip(self.A, - self.time_interval_durations)] - - elif len(self.A) == 1: - + expm(dt * A_matrix) + for (A_matrix, dt) in zip(self.A, self.time_interval_durations) + ] + + elif len(self.A) == 1: # there is only one state space model and more time intervals - + # 1) time invariant system, regular time steps # >> one model and multiple time steps # 2) time invariant system, irregular time steps - + self.A_line_k = [ - expm(dt*self.A[0]) - for dt in self.time_interval_durations] - - else: # the number of time steps and models do not match - + expm(dt * self.A[0]) for dt in self.time_interval_durations + ] + + else: # the number of time steps and models do not match # 3) time-varying system, regular time steps # >> multiple models and one time step duration (the case with mul- # tiple time steps with the same duration is tackled above already) - + self.A_line_k = [ - expm(self.time_interval_durations[0]*A_matrix) - for A_matrix in self.A] - + expm(self.time_interval_durations[0] * A_matrix) for A_matrix in self.A + ] + # ********************************************************************* - + # B matrix - + if self.B is None or self.A is None: - self.B_line_k = None - + elif len(self.B) == len(self.time_interval_durations): - # 1) time invariant system, regular time steps # >> only if there is only one time interval # 3) time-varying system, regular time steps # 4) time-varying system, irregular time steps - + self.B_line_k = [ np.matmul( np.matmul( - expm(dt*self.A[i if self.A_is_time_varying else 0])- - np.eye(self.number_states), - inv(self.A[i if self.A_is_time_varying else 0]) - ), - self.B[i if self.B_is_time_varying else 0] - ) - for i, dt in enumerate(self.time_interval_durations)] - - elif len(self.B) == 1: - + expm(dt * self.A[i if self.A_is_time_varying else 0]) + - np.eye(self.number_states), + inv(self.A[i if self.A_is_time_varying else 0]), + ), + self.B[i if self.B_is_time_varying else 0], + ) + for i, dt in enumerate(self.time_interval_durations) + ] + + elif len(self.B) == 1: # there is only one state space model and more time intervals - + # 1) time invariant system, regular time steps # >> one model and multiple time steps # 2) time invariant system, irregular time steps - + self.B_line_k = [ np.matmul( np.matmul( - expm(dt*self.A[i if self.A_is_time_varying else 0])- - np.eye(self.number_states), - inv(self.A[i if self.A_is_time_varying else 0]) - ), - self.B[0] - ) - for i, dt in enumerate(self.time_interval_durations)] - - else: # the number of time steps and models do not match - + expm(dt * self.A[i if self.A_is_time_varying else 0]) + - np.eye(self.number_states), + inv(self.A[i if self.A_is_time_varying else 0]), + ), + self.B[0], + ) + for i, dt in enumerate(self.time_interval_durations) + ] + + else: # the number of time steps and models do not match # 3) time-varying system, regular time steps # >> multiple models and one time step duration (the case with mul- # tiple time steps with the same duration is tackled above already) - - self.B_line_k = [ # TODO: reach this statement + + self.B_line_k = [ # TODO: reach this statement np.matmul( np.matmul( - expm(self.time_interval_durations[0]*A_matrix) - -np.eye(self.number_states), - inv(A_matrix) - ), - B_matrix - ) - for (A_matrix, B_matrix) in zip(self.A, self.B)] - + expm(self.time_interval_durations[0] * A_matrix) + - np.eye(self.number_states), + inv(A_matrix), + ), + B_matrix, + ) + for (A_matrix, B_matrix) in zip(self.A, self.B) + ] + # ********************************************************************* - + # C matrix - - if self.C is None or self.A is None or self.B is None or self. D is None: - + + if self.C is None or self.A is None or self.B is None or self.D is None: self.C_line_k = None - + elif len(self.C) == len(self.time_interval_durations): - # 1) time invariant system, regular time steps # >> only if there is only one time interval # 3) time-varying system, regular time steps # 4) time-varying system, irregular time steps - + if integrate_outputs: - - # integrate the outputs - + # integrate the outputs + self.C_line_k = [ np.dot( self.C[i if self.C_is_time_varying else 0], np.dot( - expm(dt*self.A[i if self.A_is_time_varying else 0]) - -np.eye(self.number_states), - inv(self.A[i if self.A_is_time_varying else 0]) - ) - ) - for i, dt in enumerate(self.time_interval_durations)] - + expm(dt * self.A[i if self.A_is_time_varying else 0]) + - np.eye(self.number_states), + inv(self.A[i if self.A_is_time_varying else 0]), + ), + ) + for i, dt in enumerate(self.time_interval_durations) + ] + else: - # do not integrate the outputs - + self.C_line_k = [C_matrix for C_matrix in self.C] - - elif len(self.C) == 1: - + + elif len(self.C) == 1: # there is only one state space model and more time intervals - + # 1) time invariant system, regular time steps # >> one model and multiple time steps - + if integrate_outputs: - - # integrate the outputs - + # integrate the outputs + self.C_line_k = [ np.dot( self.C[i if self.C_is_time_varying else 0], np.dot( - expm(dt*self.A[i if self.A_is_time_varying else 0]) - -np.eye(self.number_states), - inv(self.A[i if self.A_is_time_varying else 0]) - ) - ) - for i, dt in enumerate(self.time_interval_durations)] - + expm(dt * self.A[i if self.A_is_time_varying else 0]) + - np.eye(self.number_states), + inv(self.A[i if self.A_is_time_varying else 0]), + ), + ) + for i, dt in enumerate(self.time_interval_durations) + ] + else: - # do not integrate the outputs - + self.C_line_k = [ self.C[i if self.C_is_time_varying else 0] - for i, dt in enumerate(self.time_interval_durations)] - - else: # the number of time steps and models do not match - + for i, dt in enumerate(self.time_interval_durations) + ] + + else: # the number of time steps and models do not match # 3) time-varying system, regular time steps # >> multiple models and one time step duration (the case with mul- # tiple time steps with the same duration is tackled above already) - - if integrate_outputs: #TODO: reach this statement - - # integrate the outputs - + + if integrate_outputs: # TODO: reach this statement + # integrate the outputs + self.C_line_k = [ np.dot( C_matrix, np.dot( - expm(self.time_interval_durations[0]* - self.A[i if self.A_is_time_varying else 0]) - -np.eye(self.number_states), - inv(self.A[i if self.A_is_time_varying else 0]) + expm( + self.time_interval_durations[0] + * self.A[i if self.A_is_time_varying else 0] ) - ) - for i, C_matrix in enumerate(self.C)] - + - np.eye(self.number_states), + inv(self.A[i if self.A_is_time_varying else 0]), + ), + ) + for i, C_matrix in enumerate(self.C) + ] + else: - # do not integrate the outputs - + self.C_line_k = [C_matrix for C_matrix in self.C] - + # ********************************************************************* - + # D matrix - + if self.D is None: - self.D_line_k = None - + elif len(self.D) == len(self.time_interval_durations): - # 1) time invariant system, regular time steps # >> only if there is only one time interval # 3) time-varying system, regular time steps # 4) time-varying system, irregular time steps - + if integrate_outputs: - if self.C is None or self.A is None or self.B is None: - # no states, outputs only - + self.D_line_k = [ - dt*self.D[i if self.D_is_time_varying else 0] - for i, dt in enumerate(self.time_interval_durations)] - + dt * self.D[i if self.D_is_time_varying else 0] + for i, dt in enumerate(self.time_interval_durations) + ] + else: - self.D_line_k = [ np.dot( np.dot( self.C[i if self.C_is_time_varying else 0], np.dot( - expm(dt*self.A[ - i if self.A_is_time_varying else 0]) - -np.eye(self.number_states), - inv(self.A[ - i if self.A_is_time_varying else 0]) + expm( + dt * self.A[i if self.A_is_time_varying else 0] ) - - - dt*np.eye(self.number_states) - ), + - np.eye(self.number_states), + inv(self.A[i if self.A_is_time_varying else 0]), + ) + - dt * np.eye(self.number_states), + ), np.dot( - inv(self.A[ - i if self.A_is_time_varying else 0]), - self.B[i if self.B_is_time_varying else 0]) - ) - + - dt*self.D[i if self.D_is_time_varying else 0] - for i, dt in enumerate(self.time_interval_durations)] - + inv(self.A[i if self.A_is_time_varying else 0]), + self.B[i if self.B_is_time_varying else 0], + ), + ) + + dt * self.D[i if self.D_is_time_varying else 0] + for i, dt in enumerate(self.time_interval_durations) + ] + else: - # do not integrate the outputs - + self.D_line_k = [ self.D[i if self.D_is_time_varying else 0] - for i, dt in enumerate(self.time_interval_durations)] - + for i, dt in enumerate(self.time_interval_durations) + ] + # ***************************************************************** # ***************************************************************** - - elif len(self.D) == 1: - + + elif len(self.D) == 1: # there is only one state space model and more time intervals - + # 1) time invariant system, regular time steps # >> one model and multiple time steps - + # output equations - + if integrate_outputs: - - # integrate the outputs - - if self.C is None or self.A is None or self.B is None: - + # integrate the outputs + + if self.C is None or self.A is None or self.B is None: # no states, outputs only - + self.D_line_k = [ - dt*self.D[i if self.D_is_time_varying else 0] - for i, dt in enumerate(self.time_interval_durations)] - + dt * self.D[i if self.D_is_time_varying else 0] + for i, dt in enumerate(self.time_interval_durations) + ] + else: - self.D_line_k = [ np.dot( np.dot( self.C[i if self.C_is_time_varying else 0], np.dot( - expm(dt*self.A[ - i if self.A_is_time_varying else 0]) - -np.eye(self.number_states), - inv(self.A[ - i if self.A_is_time_varying else 0]) + expm( + dt * self.A[i if self.A_is_time_varying else 0] ) - - - dt*np.eye(self.number_states) - ), + - np.eye(self.number_states), + inv(self.A[i if self.A_is_time_varying else 0]), + ) + - dt * np.eye(self.number_states), + ), np.dot( - inv(self.A[ - i if self.A_is_time_varying else 0]), - self.B[i if self.B_is_time_varying else 0]) - ) - + - dt*self.D[i if self.D_is_time_varying else 0] - for i, dt in enumerate(self.time_interval_durations)] - + inv(self.A[i if self.A_is_time_varying else 0]), + self.B[i if self.B_is_time_varying else 0], + ), + ) + + dt * self.D[i if self.D_is_time_varying else 0] + for i, dt in enumerate(self.time_interval_durations) + ] + else: - # do not integrate the outputs - + self.D_line_k = [ self.D[i if self.D_is_time_varying else 0] - for i, dt in enumerate(self.time_interval_durations)] - + for i, dt in enumerate(self.time_interval_durations) + ] + # ***************************************************************** # ***************************************************************** - - else: # the number of time steps and models do not match - + + else: # the number of time steps and models do not match # 3) time-varying system, regular time steps # >> multiple models and one time step duration (the case with mul- # tiple time steps with the same duration is tackled above already) - + # output equations # TODO: reach this statement if integrate_outputs: - - # integrate the outputs - - if self.C is None or self.A is None or self.B is None: - + # integrate the outputs + + if self.C is None or self.A is None or self.B is None: # no states, outputs only - + self.D_line_k = [ - self.time_interval_durations[0]*D_matrix - for i, D_matrix in enumerate(self.D)] - + self.time_interval_durations[0] * D_matrix + for i, D_matrix in enumerate(self.D) + ] + else: - self.D_line_k = [ np.dot( np.dot( self.C[i if self.C_is_time_varying else 0], np.dot( expm( - self.time_interval_durations[0]* - self.A[ - i if self.A_is_time_varying else 0] - ) - -np.eye(self.number_states), - inv(self.A[ - i if self.A_is_time_varying else 0]) + self.time_interval_durations[0] + * self.A[i if self.A_is_time_varying else 0] ) - - - self.time_interval_durations[0]* - np.eye(self.number_states) - ), + - np.eye(self.number_states), + inv(self.A[i if self.A_is_time_varying else 0]), + ) + - self.time_interval_durations[0] + * np.eye(self.number_states), + ), np.dot( - inv( - self.A[i if self.A_is_time_varying else 0] - ), - self.B[i if self.B_is_time_varying else 0]) - ) - + - self.time_interval_durations[0]*D_matrix - for i, D_matrix in enumerate(self.D)] - + inv(self.A[i if self.A_is_time_varying else 0]), + self.B[i if self.B_is_time_varying else 0], + ), + ) + + self.time_interval_durations[0] * D_matrix + for i, D_matrix in enumerate(self.D) + ] + else: - # do not integrate the outputs - + self.D_line_k = [D_matrix for D_matrix in self.D] - + # ***************************************************************** # ***************************************************************** - + # ********************************************************************* # ********************************************************************* - + # store information about the matrices - + if self.A_line_k is None: - self.A_line_is_time_varying = None - + else: - - self.A_line_is_time_varying = ( - True if len(self.A_line_k) > 1 else False) - + self.A_line_is_time_varying = True if len(self.A_line_k) > 1 else False + if self.B_line_k is None: - self.B_line_is_time_varying = None - + else: - - self.B_line_is_time_varying = ( - True if len(self.B_line_k) > 1 else False) - + self.B_line_is_time_varying = True if len(self.B_line_k) > 1 else False + if self.C_line_k is None: - self.C_line_is_time_varying = None - + else: - - self.C_line_is_time_varying = ( - True if len(self.C_line_k) > 1 else False) - + self.C_line_is_time_varying = True if len(self.C_line_k) > 1 else False + if self.D_line_k is None: - self.D_line_is_time_varying = None - + else: - - self.D_line_is_time_varying = ( - True if len(self.D_line_k) > 1 else False) - + self.D_line_is_time_varying = True if len(self.D_line_k) > 1 else False + # integrated or not - + if integrate_outputs: - self.outputs_integrated = [y for y in range(self.number_outputs)] - + else: - self.outputs_integrated = [] - + + # ************************************************************************* # ************************************************************************* - # ************************************************************************* - + def simulate(self, U: np.ndarray, X0: np.ndarray): - # inputs: # U: m*t, for each time interval, where t is the number of time steps # X0: n*1, only once - - # outputs: + + # outputs: # X: n*(t+1) # Y: r*(t+1), if no integration is done, or # Y: r*t, if integration is done - + # cases: # 1) A_line_k and B_line_k are not None, but C_line_k and D_line_k may be # >> the method will return X (states exist) but maybe not Y (no outputs) # 2) D_line_k is not None, but A_line_k, B_line_k and C_line_k are # >> the method will return Y (outputs exist) but not X (no states) - # 3) - - if (len(self.time_interval_durations) == 1 and - self.number_models == 1): - + # 3) + + if len(self.time_interval_durations) == 1 and self.number_models == 1: # if there is only one time interval duration and one ss model, the # discretised model can be reused multiple times with the same time # interval duration, provided the inputs make sense - + # determine how many time intervals the input is for - + m, number_steps = U.shape - + if m != self.number_inputs: - raise ValueError( - 'The U signal is not consistent with the system matrices.') - + "The U signal is not consistent with the system matrices." + ) + else: - number_steps = len(self.time_interval_durations) - + n = self.number_states - #m = self.number_inputs + # m = self.number_inputs r = self.number_outputs - + # ********************************************************************* - - if (self.A_line_is_time_varying is not None and - self.B_line_is_time_varying is not None): - + + if ( + self.A_line_is_time_varying is not None + and self.B_line_is_time_varying is not None + ): # state equations - - X = np.zeros((n, number_steps+1)) - - X[:,0] = X0 - + + X = np.zeros((n, number_steps + 1)) + + X[:, 0] = X0 + for i in range(number_steps): - if self.A_line_is_time_varying: - i_a = i - + else: - i_a = 0 - + if self.B_line_is_time_varying: - i_b = i - + else: - i_b = 0 - - X[:,i+1] = ( - np.dot(self.A_line_k[i_a], X[:,i])+ - np.dot(self.B_line_k[i_b], U[:,i]) - ) - + + X[:, i + 1] = np.dot(self.A_line_k[i_a], X[:, i]) + np.dot( + self.B_line_k[i_b], U[:, i] + ) + # there may be output equations - - if (self.C_line_is_time_varying is not None and - self.D_line_is_time_varying is not None): - + + if ( + self.C_line_is_time_varying is not None + and self.D_line_is_time_varying is not None + ): # there are output equations - - Y = np.zeros((r, number_steps+1)) - + + Y = np.zeros((r, number_steps + 1)) + if len(self.outputs_integrated) == 0: - # no integration - + # assumes zoh inputs and the same initial C and D matrices - - Y[:,0] = ( - np.dot(self.C_line_k[0], X[:,0])+ - np.dot(self.D_line_k[0], U[:,0]) - ) - + + Y[:, 0] = np.dot(self.C_line_k[0], X[:, 0]) + np.dot( + self.D_line_k[0], U[:, 0] + ) + # calculate - + for i in range(number_steps): - if self.C_line_is_time_varying: - i_c = i - + else: - - i_c = 0 # TODO: reach this statement - + i_c = 0 # TODO: reach this statement + if self.D_line_is_time_varying: - i_d = i - + else: - - i_d = 0 # TODO: reach this statement - - Y[:,i+1] = ( - np.dot(self.C_line_k[i_c], X[:,i])+ - np.dot(self.D_line_k[i_d], U[:,i]) - ) - + i_d = 0 # TODO: reach this statement + + Y[:, i + 1] = np.dot(self.C_line_k[i_c], X[:, i]) + np.dot( + self.D_line_k[i_d], U[:, i] + ) + # return statement - + if len(self.outputs_integrated) == 0: - # no integration: all Y slots - + return X, Y - + else: - # integration: skip the first Y slot - - return X, Y[:,1:] - + + return X, Y[:, 1:] + # there are no outputs - + # return statement - + return X, None - - elif (self.C_line_is_time_varying is None and - self.D_line_is_time_varying is not None): - + + elif ( + self.C_line_is_time_varying is None + and self.D_line_is_time_varying is not None + ): # there are no state equations, but there may be output equations - - Y = np.zeros((r, number_steps+1)) - + + Y = np.zeros((r, number_steps + 1)) + if len(self.outputs_integrated) == 0: - # no integration - + # assumes zoh inputs and the same initial C and D matrices - - Y[:,0] = ( - np.dot(self.D_line_k[0], U[:,0]) - ) - - # simulate - + + Y[:, 0] = np.dot(self.D_line_k[0], U[:, 0]) + + # simulate + for i in range(number_steps): - if self.D_line_is_time_varying: - i_d = i - + else: - i_d = 0 - - Y[:,i+1] = ( - np.dot(self.D_line_k[i_d], U[:,i]) - ) - + + Y[:, i + 1] = np.dot(self.D_line_k[i_d], U[:, i]) + # return statement - + if len(self.outputs_integrated) == 0: - # no integration: all Y slots - + return None, Y - + else: - # integration: skip the first Y slot - - return None, Y[:,1:] - + + return None, Y[:, 1:] + # ************************************************************************* - # ************************************************************************* - + # ************************************************************************* + + # ***************************************************************************** # ***************************************************************************** - + + class StatelessSystem(DynamicSystem): "A class for dynamic systems without states." - + # a stateless system is a dynamic system without states - - def __init__(self, - time_interval_durations: list or float or int, - D: list or np.array, - integrate_outputs: bool = True): - + + def __init__( + self, + time_interval_durations: list or float or int, + D: list or np.array, + integrate_outputs: bool = True, + ): DynamicSystem.__init__( self, time_interval_durations=time_interval_durations, @@ -1282,27 +1149,31 @@ class StatelessSystem(DynamicSystem): B=None, C=None, D=D, - integrate_outputs=integrate_outputs) # outputs can be integrated - + integrate_outputs=integrate_outputs, + ) # outputs can be integrated + # ************************************************************************* # ************************************************************************* - + def simulate(self, U: np.ndarray): "Simulate how the system responds to a set of input signals." - + return DynamicSystem.simulate(self, U=U, X0=None) - + + # ***************************************************************************** # ***************************************************************************** - + + class OutputlessSystem(DynamicSystem): "A class for dynamic systems without outputs." - - def __init__(self, - time_interval_durations: list or float or int, - A: list or np.array, - B: list or np.array): - + + def __init__( + self, + time_interval_durations: list or float or int, + A: list or np.array, + B: list or np.array, + ): DynamicSystem.__init__( self, time_interval_durations=time_interval_durations, @@ -1310,15 +1181,17 @@ class OutputlessSystem(DynamicSystem): B=B, C=None, D=None, - integrate_outputs=False) # no outputs to integrate - + integrate_outputs=False, + ) # no outputs to integrate + # ************************************************************************* # ************************************************************************* - + def simulate(self, U: np.ndarray, X0: np.ndarray): "Simulate how the system responds to a set of input signals and initial conditions." - + return DynamicSystem.simulate(self, U=U, X0=X0) + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/src/topupopt/problems/esipp/model.py b/src/topupopt/problems/esipp/model.py index 9a23a75..2f828b3 100644 --- a/src/topupopt/problems/esipp/model.py +++ b/src/topupopt/problems/esipp/model.py @@ -7,23 +7,25 @@ from math import isfinite, inf # ***************************************************************************** # ***************************************************************************** -def create_model(name: str, - enable_default_values: bool = True, - enable_validation: bool = True, - enable_initialisation: bool = True): - + +def create_model( + name: str, + enable_default_values: bool = True, + enable_validation: bool = True, + enable_initialisation: bool = True, +): # TODO: make default values, validation, and initialisation optional - + # ************************************************************************* # ************************************************************************* - + # create model object - + model = pyo.AbstractModel(name) - + # ************************************************************************* # ************************************************************************* - + # naming convention: # variables start with "var_" # sets start with "set_" @@ -34,1998 +36,1928 @@ def create_model(name: str, # methods to build constraints start with "rule_" # methods to initialise sets/.../parameters start with "init_" # methods to validate sets/.../parameters start with "validate_" - + # TODO: migrate to kernel pyomo - + # TODO: try to use blocks to improve readability and modularity - + # TODO: add piecewise constraints for the import and export flows/prices - + # ************************************************************************* # ************************************************************************* # ************************************************************************* # ************************************************************************* - + # sets - + # ************************************************************************* # ************************************************************************* - + # input sets - + # ************************************************************************* - + # set of assessments - + model.set_Q = pyo.Set() - + # TODO: use rangesets for time-related sets - + # set of time step intervals for each assessment - + model.set_K_q = pyo.Set(model.set_Q) # set of representative evaluation periods for each assessment - + model.set_P_q = pyo.Set(model.set_Q) - + # set of networks - + model.set_G = pyo.Set() - + # set of nodes on each network - + model.set_L = pyo.Set(model.set_G) - + # set of importing nodes on each network - - model.set_L_imp = pyo.Set(model.set_G, - within=model.set_L) - + + model.set_L_imp = pyo.Set(model.set_G, within=model.set_L) + # set of exporting nodes on each network - - model.set_L_exp = pyo.Set(model.set_G, - within=model.set_L) - + + model.set_L_exp = pyo.Set(model.set_G, within=model.set_L) + # set of nodes on network g incompatible with having more than one incoming # arc unless there are outgoing arcs too - - model.set_L_max_in_g = pyo.Set(model.set_G, - within=model.set_L) # should inherently exclude import nodes - + + model.set_L_max_in_g = pyo.Set( + model.set_G, within=model.set_L + ) # should inherently exclude import nodes + # ************************************************************************* # ************************************************************************* - + # sparse sets - + # (q,k) tuples - + def init_set_QK(m): - return ((q,k) for q in m.set_Q for k in m.set_K_q[q]) + return ((q, k) for q in m.set_Q for k in m.set_K_q[q]) + model.set_QK = pyo.Set( - dimen=2, - initialize=init_set_QK if enable_initialisation else None - ) - + dimen=2, initialize=init_set_QK if enable_initialisation else None + ) + # (q,p) tuples - + def init_set_QP(m): - return ((q,p) for q in m.set_Q for p in m.set_P_q[q]) + return ((q, p) for q in m.set_Q for p in m.set_P_q[q]) + model.set_QP = pyo.Set( - dimen=2, - initialize=init_set_QP if enable_initialisation else None - ) - + dimen=2, initialize=init_set_QP if enable_initialisation else None + ) + # (q,p,k) tuples - + def init_set_QPK(m): - return ((q,p,k) for (q,p) in m.set_QP for k in m.set_K_q[q]) + return ((q, p, k) for (q, p) in m.set_QP for k in m.set_K_q[q]) + model.set_QPK = pyo.Set( - dimen=3, - initialize=init_set_QPK if enable_initialisation else None - ) - + dimen=3, initialize=init_set_QPK if enable_initialisation else None + ) + # set of GL tuples for the various nodes - + def init_set_GL(m): return ( - (g,l) # GL tuple - for g in m.set_G # for each network - for l in m.set_L[g]) # for each location + (g, l) for g in m.set_G for l in m.set_L[g] # GL tuple # for each network + ) # for each location + model.set_GL = pyo.Set( - dimen=2, - initialize=( - init_set_GL if enable_initialisation else None - ) - ) - + dimen=2, initialize=(init_set_GL if enable_initialisation else None) + ) + # set of GL tuples for import nodes - + def init_set_GL_imp(m): - return ((g,l) - for (g,l) in m.set_GL - if l in m.set_L_imp[g]) + return ((g, l) for (g, l) in m.set_GL if l in m.set_L_imp[g]) + model.set_GL_imp = pyo.Set( - dimen=2, - initialize=( - init_set_GL_imp if enable_initialisation else None - ) - ) - + dimen=2, initialize=(init_set_GL_imp if enable_initialisation else None) + ) + # set of GL tuples for export nodes - + def init_set_GL_exp(m): - return ((g,l) - for (g,l) in m.set_GL - if l in m.set_L_exp[g]) + return ((g, l) for (g, l) in m.set_GL if l in m.set_L_exp[g]) + model.set_GL_exp = pyo.Set( - dimen=2, - initialize=( - init_set_GL_exp if enable_initialisation else None - ) - ) - + dimen=2, initialize=(init_set_GL_exp if enable_initialisation else None) + ) + # # set of GL tuples for import or export nodes - + # model.set_GL_exp_imp = model.set_GL_imp | model.set_GL_exp - + # # set of GL tuples for non-import, non-export nodes - + # model.set_GL_not_exp_imp = model.set_GL - model.set_GL_exp_imp # set of GL tuples for import or export nodes - + model.set_GL_exp_imp = pyo.Set( dimen=2, - initialize=( - model.set_GL_imp | model.set_GL_exp - if enable_initialisation else None - ) - ) - + initialize=( + model.set_GL_imp | model.set_GL_exp if enable_initialisation else None + ), + ) + # set of GL tuples for non-import, non-export nodes - + model.set_GL_not_exp_imp = pyo.Set( dimen=2, - initialize=( - model.set_GL - model.set_GL_exp_imp - if enable_initialisation else None - ) - ) - + initialize=( + model.set_GL - model.set_GL_exp_imp if enable_initialisation else None + ), + ) + # set of GLL tuples for all valid node pair and direction combinations - + def init_set_GLL(m): - return ((g,l1,l2) - for g in m.set_G - for l1 in m.set_L[g] - if l1 not in m.set_L_exp[g] - for l2 in m.set_L[g] - if l2 not in m.set_L_imp[g] - if l1 != l2) - model.set_GLL = pyo.Set( - dimen=3, - initialize=( - init_set_GLL if enable_initialisation else None - ) + return ( + (g, l1, l2) + for g in m.set_G + for l1 in m.set_L[g] + if l1 not in m.set_L_exp[g] + for l2 in m.set_L[g] + if l2 not in m.set_L_imp[g] + if l1 != l2 ) + model.set_GLL = pyo.Set( + dimen=3, initialize=(init_set_GLL if enable_initialisation else None) + ) + # ************************************************************************* # ************************************************************************* - + # input sets - + # set for all arcs - + model.set_J = pyo.Set(model.set_GLL) - + # set for all collectively-decided arcs - + model.set_J_col = pyo.Set(model.set_GLL) - + # set for all undirected arcs - + # def validate_J_und(m, j): # # j is valid if it is in J[g,l1,l2] and not J[g,l2,l1] - # return False - model.set_J_und = pyo.Set(model.set_GLL, - #validate=validate_J_und, - within=model.set_J) - + # return False + model.set_J_und = pyo.Set( + model.set_GLL, + # validate=validate_J_und, + within=model.set_J, + ) + # set for all preexisting arcs - - model.set_J_pre = pyo.Set(model.set_GLL, - within=model.set_J) - + + model.set_J_pre = pyo.Set(model.set_GLL, within=model.set_J) + # set for all preexisting arcs with infinite capacity - - model.set_J_pre_inf = pyo.Set(model.set_GLL, - within=model.set_J_pre, - ) - + + model.set_J_pre_inf = pyo.Set( + model.set_GLL, + within=model.set_J_pre, + ) + # set for all mandatory arcs - - model.set_J_mdt = pyo.Set(model.set_GLL, - within=model.set_J) - + + model.set_J_mdt = pyo.Set(model.set_GLL, within=model.set_J) + # set of arcs whose investment decision is to be modelled using sos1 - - model.set_J_arc_sos1 = pyo.Set(model.set_GLL, - within=model.set_J) - + + model.set_J_arc_sos1 = pyo.Set(model.set_GLL, within=model.set_J) + # set of arcs whose investment decision is to be modelled using nnr - - model.set_J_arc_nnr = pyo.Set(model.set_GLL, - within=model.set_J) - + + model.set_J_arc_nnr = pyo.Set(model.set_GLL, within=model.set_J) + # set of undirected arcs whose flow sense is determined using sos1 - - model.set_J_sns_sos1 = pyo.Set(model.set_GLL, - within=model.set_J_und) - + + model.set_J_sns_sos1 = pyo.Set(model.set_GLL, within=model.set_J_und) + # set of undirected arcs whose flow sense is determined using nnr - - model.set_J_sns_nnr = pyo.Set(model.set_GLL, - within=model.set_J_sns_sos1) - + + model.set_J_sns_nnr = pyo.Set(model.set_GLL, within=model.set_J_sns_sos1) + # set of (new, optional) arcs that are to use interface variables - - model.set_J_int = pyo.Set(model.set_GLL, - within=model.set_J) - + + model.set_J_int = pyo.Set(model.set_GLL, within=model.set_J) + # set of arcs implying static losses - - model.set_J_stt = pyo.Set(model.set_GLL, - within=model.set_J) - + + model.set_J_stt = pyo.Set(model.set_GLL, within=model.set_J) + # set of arcs whose static losses are placed in the start node - - model.set_J_stt_dep = pyo.Set(model.set_GLL, - within=model.set_J_stt) - + + model.set_J_stt_dep = pyo.Set(model.set_GLL, within=model.set_J_stt) + # set of arcs whose static losses are placed in the end node - - model.set_J_stt_arr = pyo.Set(model.set_GLL, - within=model.set_J_stt) - + + model.set_J_stt_arr = pyo.Set(model.set_GLL, within=model.set_J_stt) + # set of arcs whose static losses are placed upstream - - model.set_J_stt_us = pyo.Set(model.set_GLL, - within=model.set_J_stt) + + model.set_J_stt_us = pyo.Set(model.set_GLL, within=model.set_J_stt) # set of arcs whose static losses are placed downstream - - model.set_J_stt_ds = pyo.Set(model.set_GLL, - within=model.set_J_stt) - + + model.set_J_stt_ds = pyo.Set(model.set_GLL, within=model.set_J_stt) + # ************************************************************************* # ************************************************************************* - + # systems - + # set of all systems - + model.set_I = pyo.Set() - + # set of optional systems - + model.set_I_new = pyo.Set(within=model.set_I) # ************************************************************************* - + # inputs - + # set of inputs (indexed by system) - + model.set_M = pyo.Set(model.set_I) - + # set of inputs modelled using non-negative real variables - - model.set_M_nnr = pyo.Set(model.set_I, - within=model.set_M) - + + model.set_M_nnr = pyo.Set(model.set_I, within=model.set_M) + # set of inputs modelled using binary variables - - model.set_M_bin = pyo.Set(model.set_I, - within=model.set_M) - + + model.set_M_bin = pyo.Set(model.set_I, within=model.set_M) + # set of amplitude-constrained inputs - - model.set_M_dim = pyo.Set(model.set_I_new, - within=model.set_M) - + + model.set_M_dim = pyo.Set(model.set_I_new, within=model.set_M) + # set of amplitude-constrained inputs - - model.set_M_fix = pyo.Set(model.set_I, - within=model.set_M) - + + model.set_M_fix = pyo.Set(model.set_I, within=model.set_M) + # set of externality-inducing inputs - - model.set_M_ext = pyo.Set(model.set_I, - within=model.set_M) + + model.set_M_ext = pyo.Set(model.set_I, within=model.set_M) # ************************************************************************* - + # outputs - + # set of outputs (indexed by system) - + model.set_R = pyo.Set(model.set_I) - + # set of outputs with fixed bounds - - model.set_R_fix = pyo.Set(model.set_I, - within=model.set_R) - + + model.set_R_fix = pyo.Set(model.set_I, within=model.set_R) + # set of positive amplitude-constrained outputs - - model.set_R_dim_pos = pyo.Set(model.set_I, - within=model.set_R) - + + model.set_R_dim_pos = pyo.Set(model.set_I, within=model.set_R) + # set of negative amplitude-constrained outputs - - model.set_R_dim_neg = pyo.Set(model.set_I, - within=model.set_R) - + + model.set_R_dim_neg = pyo.Set(model.set_I, within=model.set_R) + # set of amplitude-limited outputs with matching pos. and neg. amplitudes - - model.set_R_dim_eq = pyo.Set(model.set_I, - within=model.set_R) - + + model.set_R_dim_eq = pyo.Set(model.set_I, within=model.set_R) + # set of outputs (indexed by system) inducing externalities - + model.set_R_ext = pyo.Set(model.set_I) - + # ************************************************************************* - + # states - + # set of states - + model.set_N = pyo.Set(model.set_I) - + # set of states with fixed bounds - - model.set_N_fix = pyo.Set(model.set_I, - within=model.set_N) - + + model.set_N_fix = pyo.Set(model.set_I, within=model.set_N) + # set of positive amplitude-constrained states - - model.set_N_dim_pos = pyo.Set(model.set_I, - within=model.set_N) - + + model.set_N_dim_pos = pyo.Set(model.set_I, within=model.set_N) + # set of negative amplitude-constrained states - - model.set_N_dim_neg = pyo.Set(model.set_I, - within=model.set_N) - + + model.set_N_dim_neg = pyo.Set(model.set_I, within=model.set_N) + # set of amplitude-limited states with matching pos. and neg. amplitudes - - model.set_N_dim_eq = pyo.Set(model.set_I, - within=model.set_N) - + + model.set_N_dim_eq = pyo.Set(model.set_I, within=model.set_N) + # set of states (indexed by system) inducing externalities - - model.set_N_ext = pyo.Set(model.set_I, - within=model.set_N) - + + model.set_N_ext = pyo.Set(model.set_I, within=model.set_N) + # set of positive state variation-penalised states - - model.set_N_pos_var = pyo.Set(model.set_I, - within=model.set_N) + + model.set_N_pos_var = pyo.Set(model.set_I, within=model.set_N) # set of negative state variation-penalised states - - model.set_N_neg_var = pyo.Set(model.set_I, - within=model.set_N) - + + model.set_N_neg_var = pyo.Set(model.set_I, within=model.set_N) + # set of upper reference violation-penalised states - - model.set_N_ref_u = pyo.Set(model.set_I, - within=model.set_N) + + model.set_N_ref_u = pyo.Set(model.set_I, within=model.set_N) # set of lower reference violation-penalised states - - model.set_N_ref_d = pyo.Set(model.set_I, - within=model.set_N) + + model.set_N_ref_d = pyo.Set(model.set_I, within=model.set_N) # ************************************************************************* # ************************************************************************* - + # sparse index sets # ************************************************************************* - + # set of price segments - - model.set_S = pyo.Set( - model.set_GL_exp_imp, - model.set_QPK - ) - + + model.set_S = pyo.Set(model.set_GL_exp_imp, model.set_QPK) + # set of GLQKS tuples - + def init_set_GLQPKS(m): return ( - (g,l,q,p,k,s) - #for (g,l) in m.set_GL_exp_imp - #for (q,k) in m.set_QK - for (g,l,q,p,k) in m.set_S - for s in m.set_S[(g,l,q,p,k)] - ) - model.set_GLQPKS = pyo.Set( - dimen=6, - initialize=(init_set_GLQPKS if enable_initialisation else None) + (g, l, q, p, k, s) + # for (g,l) in m.set_GL_exp_imp + # for (q,k) in m.set_QK + for (g, l, q, p, k) in m.set_S + for s in m.set_S[(g, l, q, p, k)] ) - + + model.set_GLQPKS = pyo.Set( + dimen=6, initialize=(init_set_GLQPKS if enable_initialisation else None) + ) + def init_set_GLQPKS_exp(m): return ( - glqpks - for glqpks in m.set_GLQPKS - if glqpks[1] in m.set_L_exp[glqpks[0]] - ) - model.set_GLQPKS_exp = pyo.Set( - dimen=6, - initialize=(init_set_GLQPKS_exp if enable_initialisation else None) + glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_exp[glqpks[0]] ) - + + model.set_GLQPKS_exp = pyo.Set( + dimen=6, initialize=(init_set_GLQPKS_exp if enable_initialisation else None) + ) + def init_set_GLQPKS_imp(m): return ( - glqpks - for glqpks in m.set_GLQPKS - if glqpks[1] in m.set_L_imp[glqpks[0]] - ) - model.set_GLQPKS_imp = pyo.Set( - dimen=6, - initialize=(init_set_GLQPKS_imp if enable_initialisation else None) + glqpks for glqpks in m.set_GLQPKS if glqpks[1] in m.set_L_imp[glqpks[0]] ) - + + model.set_GLQPKS_imp = pyo.Set( + dimen=6, initialize=(init_set_GLQPKS_imp if enable_initialisation else None) + ) + # ************************************************************************* - + # all arcs - + # set of GLLJ tuples for all arcs (undirected arcs appear twice) - - def init_set_GLLJ(m): - return ((g,l1,l2,j) - for (g,l1,l2) in m.set_GLL - if l1!=l2 # redundant, as it is also a condition for set_GLL - for j in m.set_J[(g,l1,l2)]) - model.set_GLLJ = pyo.Set(dimen=4, - initialize=( - init_set_GLLJ - if enable_initialisation else None - ) - ) - + + def init_set_GLLJ(m): + return ( + (g, l1, l2, j) + for (g, l1, l2) in m.set_GLL + if l1 != l2 # redundant, as it is also a condition for set_GLL + for j in m.set_J[(g, l1, l2)] + ) + + model.set_GLLJ = pyo.Set( + dimen=4, initialize=(init_set_GLLJ if enable_initialisation else None) + ) + # sets of GLLJ tuples for all arcs with static losses - + def init_set_GLLJ_static(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ - for j in m.set_J_stt[(g,l1,l2)]) - model.set_GLLJ_static = pyo.Set(dimen=4, - initialize=( - init_set_GLLJ_static - if enable_initialisation else None - ) - ) - + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ + for j in m.set_J_stt[(g, l1, l2)] + ) + + model.set_GLLJ_static = pyo.Set( + dimen=4, initialize=(init_set_GLLJ_static if enable_initialisation else None) + ) + # sets of GLLJ tuples for all pre-existing arcs with static losses - + def init_set_GLLJ_static_pre(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ_static - if j in m.set_J_pre[(g,l1,l2)]) - model.set_GLLJ_static_pre = pyo.Set(dimen=4, - initialize=( - init_set_GLLJ_static_pre - if enable_initialisation else None - ) - ) - + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ_static + if j in m.set_J_pre[(g, l1, l2)] + ) + + model.set_GLLJ_static_pre = pyo.Set( + dimen=4, + initialize=(init_set_GLLJ_static_pre if enable_initialisation else None), + ) + # sets of GLLJ tuples for all new arcs with static losses - + def init_set_GLLJ_static_new(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ_static - if j not in m.set_J_pre[(g,l1,l2)]) + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ_static + if j not in m.set_J_pre[(g, l1, l2)] + ) + model.set_GLLJ_static_new = pyo.Set( dimen=4, - initialize=( - init_set_GLLJ_static_new - if enable_initialisation else None - ) - ) + initialize=(init_set_GLLJ_static_new if enable_initialisation else None), + ) # ************************************************************************* - + # sets of GLLJ tuples for directed arcs - + # set of GLLJ tuples for directed arcs - - def init_set_GLLJ_dir(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ - if j not in m.set_J_und[(g,l1,l2)]) - model.set_GLLJ_dir = pyo.Set(dimen=4, - initialize=init_set_GLLJ_dir) - + + def init_set_GLLJ_dir(m): + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ + if j not in m.set_J_und[(g, l1, l2)] + ) + + model.set_GLLJ_dir = pyo.Set(dimen=4, initialize=init_set_GLLJ_dir) + # set of GLLJ tuples for preexisting directed arcs - - def init_set_GLLJ_dir_pre(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ_dir - if j in m.set_J_pre[(g,l1,l2)]) - model.set_GLLJ_dir_pre = pyo.Set(dimen=4, - #within=model.set_GLLJ_dir, - initialize=init_set_GLLJ_dir_pre) - + + def init_set_GLLJ_dir_pre(m): + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ_dir + if j in m.set_J_pre[(g, l1, l2)] + ) + + model.set_GLLJ_dir_pre = pyo.Set( + dimen=4, + # within=model.set_GLLJ_dir, + initialize=init_set_GLLJ_dir_pre, + ) + # set of GLLJ tuples for preexisting directed arcs with infinite capacity - + def init_set_GLLJ_dir_pre_inf(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ_dir_pre - if j in m.set_J_pre_inf[(g,l1,l2)]) - model.set_GLLJ_dir_pre_inf = pyo.Set(dimen=4, - #within=model.set_GLLJ_dir_pre, - initialize=init_set_GLLJ_dir_pre_inf) - - # set of GLLJ tuples preexisting directed arcs with finite capacity - - model.set_GLLJ_dir_pre_fin = ( - model.set_GLLJ_dir_pre - model.set_GLLJ_dir_pre_inf + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ_dir_pre + if j in m.set_J_pre_inf[(g, l1, l2)] ) - + + model.set_GLLJ_dir_pre_inf = pyo.Set( + dimen=4, + # within=model.set_GLLJ_dir_pre, + initialize=init_set_GLLJ_dir_pre_inf, + ) + + # set of GLLJ tuples preexisting directed arcs with finite capacity + + model.set_GLLJ_dir_pre_fin = model.set_GLLJ_dir_pre - model.set_GLLJ_dir_pre_inf + # set of GLLJ tuples for new directed arcs - + model.set_GLLJ_dir_new = model.set_GLLJ_dir - model.set_GLLJ_dir_pre - + # set of GLLJ tuples for new directed arcs modelled using SOS1 - + def init_set_GLLJ_dir_new_sos1(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ_dir_new - if j in m.set_J_arc_sos1[(g,l1,l2)]) + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ_dir_new + if j in m.set_J_arc_sos1[(g, l1, l2)] + ) + model.set_GLLJ_dir_new_sos1 = pyo.Set( - dimen=4, - #within=model.set_GLLJ_dir_new, - initialize=init_set_GLLJ_dir_new_sos1) - + dimen=4, + # within=model.set_GLLJ_dir_new, + initialize=init_set_GLLJ_dir_new_sos1, + ) + # set of GLLJ tuples for new yet mandatory directed arcs - + def init_set_GLLJ_dir_new_mdt(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ_dir_new - if j in m.set_J_mdt[(g,l1,l2)]) + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ_dir_new + if j in m.set_J_mdt[(g, l1, l2)] + ) + model.set_GLLJ_dir_new_mdt = pyo.Set( - dimen=4, - #within=model.set_GLLJ_dir_new, - initialize=init_set_GLLJ_dir_new_mdt) + dimen=4, + # within=model.set_GLLJ_dir_new, + initialize=init_set_GLLJ_dir_new_mdt, + ) # set of GLLJ tuples for new yet mandatory directed arcs - - model.set_GLLJ_dir_new_opt = ( - model.set_GLLJ_dir_new - model.set_GLLJ_dir_new_mdt - ) - + + model.set_GLLJ_dir_new_opt = model.set_GLLJ_dir_new - model.set_GLLJ_dir_new_mdt + # ************************************************************************* - + # sets of GLLJ tuples for undirected arcs - + # set of GLLJ tuples for undirected arcs (one tuple per undirected arc) - - def init_set_GLLJ_und(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ - if (g,l1,l2) in m.set_J_und - if j in m.set_J_und[(g,l1,l2)]) - model.set_GLLJ_und = pyo.Set( - dimen=4, - initialize=(init_set_GLLJ_und if enable_initialisation else None) + + def init_set_GLLJ_und(m): + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ + if (g, l1, l2) in m.set_J_und + if j in m.set_J_und[(g, l1, l2)] ) - + + model.set_GLLJ_und = pyo.Set( + dimen=4, initialize=(init_set_GLLJ_und if enable_initialisation else None) + ) + # set of GLLJ tuples for preexisting undirected arcs (one per arc) - - def init_set_GLLJ_und_pre(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ_und - if (g,l1,l2) in m.set_J_pre - if j in m.set_J_pre[(g,l1,l2)]) - model.set_GLLJ_und_pre = pyo.Set(dimen=4, - #within=model.set_GLLJ_und, - initialize=init_set_GLLJ_und_pre) - + + def init_set_GLLJ_und_pre(m): + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ_und + if (g, l1, l2) in m.set_J_pre + if j in m.set_J_pre[(g, l1, l2)] + ) + + model.set_GLLJ_und_pre = pyo.Set( + dimen=4, + # within=model.set_GLLJ_und, + initialize=init_set_GLLJ_und_pre, + ) + # set of GLLJ tuples for preexisting undirected arcs with infinite capacity - + def init_set_GLLJ_und_pre_inf(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ_und_pre - if (g,l1,l2) in m.set_J_pre_inf - if j in m.set_J_pre_inf[(g,l1,l2)]) - model.set_GLLJ_und_pre_inf = pyo.Set(dimen=4, - #within=model.set_GLLJ_und_pre, - initialize=init_set_GLLJ_und_pre_inf) - - # set of GLLJ tuples for preexisting undirected arcs with finite capacity - - model.set_GLLJ_und_pre_fin = ( - model.set_GLLJ_und_pre - model.set_GLLJ_und_pre_inf + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ_und_pre + if (g, l1, l2) in m.set_J_pre_inf + if j in m.set_J_pre_inf[(g, l1, l2)] ) + model.set_GLLJ_und_pre_inf = pyo.Set( + dimen=4, + # within=model.set_GLLJ_und_pre, + initialize=init_set_GLLJ_und_pre_inf, + ) + + # set of GLLJ tuples for preexisting undirected arcs with finite capacity + + model.set_GLLJ_und_pre_fin = model.set_GLLJ_und_pre - model.set_GLLJ_und_pre_inf + # set of GLLJ tuples for new undirected arcs (one per arc) - + model.set_GLLJ_und_new = model.set_GLLJ_und - model.set_GLLJ_und_pre - + # set of GLLJ tuples for new undirected arcs modelled using SOS1 - + def init_set_GLLJ_und_new_sos1(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ_und_new - if (g,l1,l2) in m.set_J_arc_sos1 - if j in m.set_J_arc_sos1[(g,l1,l2)]) + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ_und_new + if (g, l1, l2) in m.set_J_arc_sos1 + if j in m.set_J_arc_sos1[(g, l1, l2)] + ) + model.set_GLLJ_und_new_sos1 = pyo.Set( - dimen=4, - #within=model.set_GLLJ_und_new, - initialize=init_set_GLLJ_und_new_sos1) - + dimen=4, + # within=model.set_GLLJ_und_new, + initialize=init_set_GLLJ_und_new_sos1, + ) + # set of GLLJ tuples for new yet mandatory undirected arcs - + def init_set_GLLJ_und_new_mdt(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ_und_new - if (g,l1,l2) in m.set_J_mdt - if j in m.set_J_mdt[(g,l1,l2)]) - model.set_GLLJ_und_new_mdt = pyo.Set(dimen=4, - #within=model.set_GLLJ_und_new, - initialize=init_set_GLLJ_und_new_mdt) + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ_und_new + if (g, l1, l2) in m.set_J_mdt + if j in m.set_J_mdt[(g, l1, l2)] + ) + + model.set_GLLJ_und_new_mdt = pyo.Set( + dimen=4, + # within=model.set_GLLJ_und_new, + initialize=init_set_GLLJ_und_new_mdt, + ) # set of GLLJ tuples for new yet mandatory undirected arcs - - model.set_GLLJ_und_new_opt = ( - model.set_GLLJ_und_new - model.set_GLLJ_und_new_mdt - ) - + + model.set_GLLJ_und_new_opt = model.set_GLLJ_und_new - model.set_GLLJ_und_new_mdt + # set of GLLJ tuples for arcs using interfaces - + def init_set_GLLJ_int(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ - if (g,l1,l2) in m.set_J_int - if j in m.set_J_int[(g,l1,l2)]) - model.set_GLLJ_int = pyo.Set( - dimen=4, - initialize=(init_set_GLLJ_int if enable_initialisation else None) + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ + if (g, l1, l2) in m.set_J_int + if j in m.set_J_int[(g, l1, l2)] ) - + + model.set_GLLJ_int = pyo.Set( + dimen=4, initialize=(init_set_GLLJ_int if enable_initialisation else None) + ) + # ************************************************************************* - + # set of complementary GLLJ tuples for undirected arcs - - def init_set_GLLJ_und_ext(m): - return ((g,l2,l1,j) for (g,l1,l2,j) in m.set_GLLJ_und) + + def init_set_GLLJ_und_ext(m): + return ((g, l2, l1, j) for (g, l1, l2, j) in m.set_GLLJ_und) + model.set_GLLJ_und_ext = pyo.Set( - dimen=4, - initialize=(init_set_GLLJ_und_ext if enable_initialisation else None) - ) - + dimen=4, initialize=(init_set_GLLJ_und_ext if enable_initialisation else None) + ) + # set of complementary GLLJ tuples for preexisting undirected arcs - - def init_set_GLLJ_und_pre_ext(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ_und_ext - if j in m.set_J_pre[(g,l2,l1)]) - model.set_GLLJ_und_pre_ext = pyo.Set(dimen=4, - #within=model.set_GLLJ_und_ext, - initialize=init_set_GLLJ_und_pre_ext) - - # set of complementary GLLJ tuples for new undirected arcs - - model.set_GLLJ_und_new_ext = ( - model.set_GLLJ_und_ext - model.set_GLLJ_und_pre_ext + + def init_set_GLLJ_und_pre_ext(m): + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ_und_ext + if j in m.set_J_pre[(g, l2, l1)] ) - - # set of complementary GLLJ tuples for inf. cap. presel. undirected arcs - + + model.set_GLLJ_und_pre_ext = pyo.Set( + dimen=4, + # within=model.set_GLLJ_und_ext, + initialize=init_set_GLLJ_und_pre_ext, + ) + + # set of complementary GLLJ tuples for new undirected arcs + + model.set_GLLJ_und_new_ext = model.set_GLLJ_und_ext - model.set_GLLJ_und_pre_ext + + # set of complementary GLLJ tuples for inf. cap. presel. undirected arcs + def init_set_GLLJ_und_pre_inf_ext(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ_und_pre_ext - if j in m.set_J_pre_inf[(g,l2,l1)]) + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ_und_pre_ext + if j in m.set_J_pre_inf[(g, l2, l1)] + ) + model.set_GLLJ_und_pre_inf_ext = pyo.Set( - dimen=4, - #within=model.set_GLLJ_und_pre_ext, - initialize=init_set_GLLJ_und_pre_inf_ext) - + dimen=4, + # within=model.set_GLLJ_und_pre_ext, + initialize=init_set_GLLJ_und_pre_inf_ext, + ) + # set of complementary GLLJ tuples for fin. cap. presel. undirected arcs - + model.set_GLLJ_und_pre_fin_ext = ( model.set_GLLJ_und_pre_ext - model.set_GLLJ_und_pre_inf_ext - ) - + ) + # set of complementary GLLJ tuples for sel. und. arcs modelled using SOS1 - + def init_set_GLLJ_und_new_sos1_ext(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ_und_new_ext - if j in m.set_J_arc_sos1[(g,l2,l1)]) + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ_und_new_ext + if j in m.set_J_arc_sos1[(g, l2, l1)] + ) + model.set_GLLJ_und_new_sos1_ext = pyo.Set( - dimen=4, - #within=model.set_GLLJ_und_new_ext, - initialize=init_set_GLLJ_und_new_sos1_ext) - + dimen=4, + # within=model.set_GLLJ_und_new_ext, + initialize=init_set_GLLJ_und_new_sos1_ext, + ) + # set of complementary GLLJ tuples for sel. yet mandatory undirected arcs - + def init_set_GLLJ_und_new_mdt_ext(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ_und_new_ext - if j in m.set_J_mdt[(g,l2,l1)]) + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ_und_new_ext + if j in m.set_J_mdt[(g, l2, l1)] + ) + model.set_GLLJ_und_new_mdt_ext = pyo.Set( - dimen=4, - #within=model.set_GLLJ_und_new_ext, - initialize=init_set_GLLJ_und_new_mdt) + dimen=4, + # within=model.set_GLLJ_und_new_ext, + initialize=init_set_GLLJ_und_new_mdt, + ) # set of GLLJ tuples for new yet mandatory undirected arcs - + model.set_GLLJ_und_new_opt_ext = ( model.set_GLLJ_und_new_ext - model.set_GLLJ_und_new_mdt_ext + ) + + def init_set_GLLJ_static_und(m): + return ( + (g, l1, l2, j) + # for (g,l1,l2) in m.set_GLL # or m.J_und + # if l1!=l2 # redundant, as it is also a condition for set_GLL + # for (g,l1,l2,j) in m.set_GLLJ + # if j in m.set_J_und[(g,l1,l2)] + for (g, l1, l2, j) in m.set_GLLJ_und + if (g, l1, l2) in m.set_J_stt + if j in m.set_J_stt[(g, l1, l2)] ) - - def init_set_GLLJ_static_und(m): - return ((g,l1,l2,j) - # for (g,l1,l2) in m.set_GLL # or m.J_und - # if l1!=l2 # redundant, as it is also a condition for set_GLL - # for (g,l1,l2,j) in m.set_GLLJ - # if j in m.set_J_und[(g,l1,l2)] - for (g,l1,l2,j) in m.set_GLLJ_und - if (g,l1,l2) in m.set_J_stt - if j in m.set_J_stt[(g,l1,l2)]) - model.set_GLLJ_static_und = pyo.Set( - dimen=4, - initialize=init_set_GLLJ_static_und) - - def init_set_GLLJ_static_und_ext(m): - return ((g,l2,l1,j) - # for (g,l1,l2) in m.set_GLL # or m.J_und - # if l1!=l2 # redundant, as it is also a condition for set_GLL - # for (g,l1,l2,j) in m.set_GLLJ - # if j in m.set_J_und[(g,l1,l2)] - for (g,l1,l2,j) in m.set_GLLJ_und - if (g,l1,l2) in m.set_J_stt - if j in m.set_J_stt[(g,l1,l2)]) + + model.set_GLLJ_static_und = pyo.Set(dimen=4, initialize=init_set_GLLJ_static_und) + + def init_set_GLLJ_static_und_ext(m): + return ( + (g, l2, l1, j) + # for (g,l1,l2) in m.set_GLL # or m.J_und + # if l1!=l2 # redundant, as it is also a condition for set_GLL + # for (g,l1,l2,j) in m.set_GLLJ + # if j in m.set_J_und[(g,l1,l2)] + for (g, l1, l2, j) in m.set_GLLJ_und + if (g, l1, l2) in m.set_J_stt + if j in m.set_J_stt[(g, l1, l2)] + ) + model.set_GLLJ_static_und_ext = pyo.Set( - dimen=4, - initialize=init_set_GLLJ_static_und_ext) - + dimen=4, initialize=init_set_GLLJ_static_und_ext + ) + # model.set_GLLJ_static_und_red = ( # model.set_GLLJ_static_und | model.set_GLLJ_static_und_ext - # ) - - def init_set_GLLJ_static_und_red(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ_und_red - if (j in m.set_J_stt[(g,l1,l2)] or j in m.set_J_stt[(g,l2,l1)]) - ) - model.set_GLLJ_static_und_red = pyo.Set( - dimen=4, - initialize=( - init_set_GLLJ_static_und_red if enable_initialisation else None - ) + # ) + + def init_set_GLLJ_static_und_red(m): + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ_und_red + if (j in m.set_J_stt[(g, l1, l2)] or j in m.set_J_stt[(g, l2, l1)]) ) - + + model.set_GLLJ_static_und_red = pyo.Set( + dimen=4, + initialize=(init_set_GLLJ_static_und_red if enable_initialisation else None), + ) + # sets of GLLJ tuples for pre-existing undirected arcs w/ static losses - - model.set_GLLJ_static_und_pre = ( - model.set_GLLJ_static & model.set_GLLJ_und_pre - ) - + + model.set_GLLJ_static_und_pre = model.set_GLLJ_static & model.set_GLLJ_und_pre + # sets of GLLJ tuples for new undirected arcs w/ static losses - - model.set_GLLJ_static_und_new = ( - model.set_GLLJ_static & model.set_GLLJ_und_new - ) - + + model.set_GLLJ_static_und_new = model.set_GLLJ_static & model.set_GLLJ_und_new + # ************************************************************************* - + # set of GLLJ tuples for undirected arcs with redundancies - + # set of GLLJ tuples for undirected arcs - - def init_set_GLLJ_und_red(m): - return m.set_GLLJ_und | m.set_GLLJ_und_ext + + def init_set_GLLJ_und_red(m): + return m.set_GLLJ_und | m.set_GLLJ_und_ext + model.set_GLLJ_und_red = pyo.Set( - dimen=4, - initialize=( - init_set_GLLJ_und_red if enable_initialisation else None - ) - ) - + dimen=4, initialize=(init_set_GLLJ_und_red if enable_initialisation else None) + ) + # set of GLLJ tuples for preexisting undirected arcs - - model.set_GLLJ_und_pre_red = ( - model.set_GLLJ_und_pre | model.set_GLLJ_und_pre_ext) - + + model.set_GLLJ_und_pre_red = model.set_GLLJ_und_pre | model.set_GLLJ_und_pre_ext + # # set of GLLJ tuples for new undirected arcs - + # model.set_GLLJ_und_new_red = ( # model.set_GLLJ_und_red - model.set_GLLJ_und_pre_red) - + # set of GLLJ tuples for preexisting undirected arcs with infinite capacity - + model.set_GLLJ_und_pre_inf_red = ( - model.set_GLLJ_und_pre_inf | model.set_GLLJ_und_pre_inf_ext) - + model.set_GLLJ_und_pre_inf | model.set_GLLJ_und_pre_inf_ext + ) + # set of GLLJ tuples for preexisting undirected arcs with finite capacity - + model.set_GLLJ_und_pre_fin_red = ( model.set_GLLJ_und_pre_red - model.set_GLLJ_und_pre_inf_red - ) + ) # ************************************************************************* - + # sets of GLLJ tuples for directed and undirected arcs (no redundancies) - + # set of GLLJ tuples for preexisting arcs with finite capacity - - model.set_GLLJ_pre_fin = ( - model.set_GLLJ_dir_pre_fin | model.set_GLLJ_und_pre_fin - ) - + + model.set_GLLJ_pre_fin = model.set_GLLJ_dir_pre_fin | model.set_GLLJ_und_pre_fin + # set of GLLJ tuples for new arcs - + def init_set_GLLJ_new(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ - if j not in m.set_J_pre[(g,l1,l2)]) - model.set_GLLJ_new = pyo.Set( - dimen=4, - initialize=( - init_set_GLLJ_new if enable_initialisation else None - ) + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ + if j not in m.set_J_pre[(g, l1, l2)] ) - + + model.set_GLLJ_new = pyo.Set( + dimen=4, initialize=(init_set_GLLJ_new if enable_initialisation else None) + ) + # set of GLLJ tuples for new individual arcs - + def init_set_GLLJ_sgl(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ_new - if j not in m.set_J_col[(g,l1,l2)]) - model.set_GLLJ_sgl = pyo.Set( - dimen=4, - initialize=( - init_set_GLLJ_sgl if enable_initialisation else None - ) + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ_new + if j not in m.set_J_col[(g, l1, l2)] ) - + + model.set_GLLJ_sgl = pyo.Set( + dimen=4, initialize=(init_set_GLLJ_sgl if enable_initialisation else None) + ) + # ************************************************************************* - + # sets of GLLJ tuples for directed and undirected arcs (with redundancies) - + # set of GLLJ tuples for all arcs (two tuples per undirected arc) - + # model.set_GLLJ_red = ( # model.set_GLLJ_dir | model.set_GLLJ_und_red # ) - + def init_set_GLLJ_red(m): # return model.set_GLLJ_dir | model.set_GLLJ_und_red return m.set_GLLJ | m.set_GLLJ_und_ext - # return ((g,l1,l2,j) + # return ((g,l1,l2,j) # for (g,l1,l2,j) in m.set_GLLJ # if j not in m.set_J_pre[(g,l1,l2)]) + model.set_GLLJ_red = pyo.Set( - dimen=4, - initialize=( - init_set_GLLJ_red if enable_initialisation else None - ) - ) - + dimen=4, initialize=(init_set_GLLJ_red if enable_initialisation else None) + ) + # set of GLLJ tuples for preexisting arcs with finite capacity - + # model.set_GLLJ_pre_fin_red = ( # model.set_GLLJ_dir_pre_fin | model.set_GLLJ_und_pre_fin_red - # ) - + # ) + def init_set_GLLJ_pre_fin_red(m): # return m.set_GLLJ_dir_pre_fin | m.set_GLLJ_und_pre_fin_red return m.set_GLLJ_pre_fin | m.set_GLLJ_und_pre_fin_ext + model.set_GLLJ_pre_fin_red = pyo.Set( - dimen=4, - initialize=( - init_set_GLLJ_pre_fin_red if enable_initialisation else None - ) - ) - + dimen=4, + initialize=(init_set_GLLJ_pre_fin_red if enable_initialisation else None), + ) + # ************************************************************************* - + # set of arc options - + model.set_H_gllj = pyo.Set(model.set_GLLJ_sgl) - + # TODO: set_GLLJH - + # sets of GLLJH tuples for arc options - + def init_set_GLLJH_sgl(m): - return ((g,l1,l2,j,h) - for (g,l1,l2,j) in m.set_GLLJ_sgl - for h in m.set_H_gllj[(g,l1,l2,j)]) - model.set_GLLJH_sgl = pyo.Set( - dimen=5, - initialize=( - init_set_GLLJH_sgl if enable_initialisation else None - ) + return ( + (g, l1, l2, j, h) + for (g, l1, l2, j) in m.set_GLLJ_sgl + for h in m.set_H_gllj[(g, l1, l2, j)] ) + + model.set_GLLJH_sgl = pyo.Set( + dimen=5, initialize=(init_set_GLLJH_sgl if enable_initialisation else None) + ) # set of GLLJH tuples for new arcs with static losses - + def init_set_GLLJH_static_new(m): _temp = [ - (g,l1,l2,j,h) - for (g,l1,l2,j,h) in m.set_GLLJH_sgl - if j in m.set_J_stt[(g,l1,l2)] + (g, l1, l2, j, h) + for (g, l1, l2, j, h) in m.set_GLLJH_sgl + if j in m.set_J_stt[(g, l1, l2)] + ] + _temp.extend( + [ + (g, l1, l2, j, h) + for (t, g, l1, l2, j) in m.set_TGLLJ + if j in m.set_J_stt[(g, l1, l2)] + for h in m.set_H_t[t] ] - _temp.extend([ - (g,l1,l2,j,h) - for (t,g,l1,l2,j) in m.set_TGLLJ - if j in m.set_J_stt[(g,l1,l2)] - for h in m.set_H_t[t] - ]) + ) return _temp + model.set_GLLJH_static_new = pyo.Set( - dimen=5, - initialize=( - init_set_GLLJH_static_new if enable_initialisation else None - ) - ) + dimen=5, + initialize=(init_set_GLLJH_static_new if enable_initialisation else None), + ) # ************************************************************************* - + # arc selection using SOS1 - + # set of GLLJ tuples for all new arcs using sos1 - - def init_set_GLLJ_arc_inv_sos1(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ_sgl - if (g,l1,l2) in m.set_J_arc_sos1 - if j in m.set_J_arc_sos1[(g,l1,l2)]) - model.set_GLLJ_arc_inv_sos1 = pyo.Set( - dimen=4, - initialize=( - init_set_GLLJ_arc_inv_sos1 - if enable_initialisation else None - ) + + def init_set_GLLJ_arc_inv_sos1(m): + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ_sgl + if (g, l1, l2) in m.set_J_arc_sos1 + if j in m.set_J_arc_sos1[(g, l1, l2)] ) - + + model.set_GLLJ_arc_inv_sos1 = pyo.Set( + dimen=4, + initialize=(init_set_GLLJ_arc_inv_sos1 if enable_initialisation else None), + ) + # set of GLLJH tuples for new arcs modelled using SOS1 - + def init_set_GLLJH_arc_inv_sos1(m): - return ((g,l1,l2,j,h) - for (g,l1,l2,j) in m.set_GLLJ_arc_inv_sos1 - for h in m.set_H_gllj[(g,l1,l2,j)]) + return ( + (g, l1, l2, j, h) + for (g, l1, l2, j) in m.set_GLLJ_arc_inv_sos1 + for h in m.set_H_gllj[(g, l1, l2, j)] + ) + model.set_GLLJH_arc_inv_sos1 = pyo.Set( - dimen=5, + dimen=5, within=model.set_GLLJH_sgl, - initialize=( - init_set_GLLJH_arc_inv_sos1 - if enable_initialisation else None - ) - ) - + initialize=(init_set_GLLJH_arc_inv_sos1 if enable_initialisation else None), + ) + # set of GLLJ-indexed GLLJH tuples for new arcs modelled using SOS1 - + def init_set_GLLJH_arc_inv_sos1_gllj(m, g, l1, l2, j): - return ((g, l1, l2, j, h) - for h in m.set_H_gllj[(g, l1, l2, j)]) + return ((g, l1, l2, j, h) for h in m.set_H_gllj[(g, l1, l2, j)]) + model.set_GLLJH_arc_inv_sos1_gllj = pyo.Set( model.set_GLLJ_arc_inv_sos1, - dimen=5, + dimen=5, within=model.set_GLLJH_arc_inv_sos1, initialize=( - init_set_GLLJH_arc_inv_sos1_gllj - if enable_initialisation else None - ) - ) - + init_set_GLLJH_arc_inv_sos1_gllj if enable_initialisation else None + ), + ) + # ************************************************************************* # flow sense determination using SOS1 - + # set of GLLJ tuples for undirected arcs whose flow sense is det. via SOS1 - + def init_set_GLLJ_und_sns_sos1(m): - return ((g,l1,l2,j) - for (g,l1,l2,j) in m.set_GLLJ_und - if (g,l1,l2) in m.set_J_sns_sos1 - if j in m.set_J_sns_sos1[(g,l1,l2)]) - model.set_GLLJ_und_sns_sos1 = pyo.Set( - dimen=4, - #within=model.set_GLLJ_und, - initialize=( - init_set_GLLJ_und_sns_sos1 - if enable_initialisation else None - ) + return ( + (g, l1, l2, j) + for (g, l1, l2, j) in m.set_GLLJ_und + if (g, l1, l2) in m.set_J_sns_sos1 + if j in m.set_J_sns_sos1[(g, l1, l2)] ) - + + model.set_GLLJ_und_sns_sos1 = pyo.Set( + dimen=4, + # within=model.set_GLLJ_und, + initialize=(init_set_GLLJ_und_sns_sos1 if enable_initialisation else None), + ) + # set of GLLJQK tuples for flow sense determination using SOS1 - + model.set_GLLJQK_und_sns_sos1_red = pyo.Set( dimen=6, initialize=( - model.set_GLLJ_und_red*model.set_QK - if enable_initialisation else None - ) - ) - + model.set_GLLJ_und_red * model.set_QK if enable_initialisation else None + ), + ) + # set of GLLJQK tuples for both directions indexed by the ref. GLLJQK tuple # note: this set includes tuples for both directions - + def init_set_GLLJQK_und_sns_sos1_red_gllj(m, g, l1, l2, j, q, k): return ((g, l1, l2, j, q, k), (g, l2, l1, j, q, k)) + model.set_GLLJQK_und_sns_sos1_red_gllj = pyo.Set( model.set_GLLJ_und_sns_sos1, model.set_QK, - dimen=6, - initialize=init_set_GLLJQK_und_sns_sos1_red_gllj) - + dimen=6, + initialize=init_set_GLLJQK_und_sns_sos1_red_gllj, + ) + # ************************************************************************* - + # inputs - + # set of IM tuples - - def init_set_IM(m): - return ((i,m_i) - for i in m.set_I - for m_i in m.set_M[i]) - model.set_IM = pyo.Set(dimen=2, - initialize=init_set_IM) - + + def init_set_IM(m): + return ((i, m_i) for i in m.set_I for m_i in m.set_M[i]) + + model.set_IM = pyo.Set(dimen=2, initialize=init_set_IM) + # set of IM tuples for systems with binary signals - - def init_set_IM_bin(m): - return ((i,m_i) - for (i,m_i) in m.set_IM - if m_i in m.set_M_bin[i]) - model.set_IM_bin = pyo.Set(dimen=2, - initialize=init_set_IM_bin, - within=model.set_IM) - + + def init_set_IM_bin(m): + return ((i, m_i) for (i, m_i) in m.set_IM if m_i in m.set_M_bin[i]) + + model.set_IM_bin = pyo.Set(dimen=2, initialize=init_set_IM_bin, within=model.set_IM) + # set of IM tuples for tech. with dimensionable reference mode levels - + def init_set_IM_dim(m): - return ((i,m_i) - for (i,m_i) in m.set_IM - if m_i in m.set_M_dim[i]) - model.set_IM_dim = pyo.Set(dimen=2, - initialize=init_set_IM_dim, - within=model.set_IM) - + return ((i, m_i) for (i, m_i) in m.set_IM if m_i in m.set_M_dim[i]) + + model.set_IM_dim = pyo.Set(dimen=2, initialize=init_set_IM_dim, within=model.set_IM) + # set of IM tuples for fixed amplitude inputs - + def init_set_IM_fix(m): - - return ((i,m_i) - for (i,m_i) in m.set_IM - if m_i in m.set_M_fix[i]) - model.set_IM_fix = pyo.Set(dimen=2, - initialize=init_set_IM_fix, - within=model.set_IM) - + return ((i, m_i) for (i, m_i) in m.set_IM if m_i in m.set_M_fix[i]) + + model.set_IM_fix = pyo.Set(dimen=2, initialize=init_set_IM_fix, within=model.set_IM) + # set of IM tuples for technologies whose modes can induce externalities - + def init_set_IM_ext(m): - - return ((i,m_i) - for (i,m_i) in m.set_IM - if m_i in m.set_M_ext[i]) - model.set_IM_ext = pyo.Set(dimen=2, - initialize=init_set_IM_ext, - within=model.set_IM) + return ((i, m_i) for (i, m_i) in m.set_IM if m_i in m.set_M_ext[i]) + + model.set_IM_ext = pyo.Set(dimen=2, initialize=init_set_IM_ext, within=model.set_IM) # ************************************************************************* - + # states - + # set of IN tuples - - def init_set_IN(m): - return ((i, n_i) # IN tuple - for i in m.set_I - for n_i in m.set_N[i]) # for each state - model.set_IN = pyo.Set(dimen=2, - initialize=init_set_IN) - + + def init_set_IN(m): + return ( + (i, n_i) for i in m.set_I for n_i in m.set_N[i] # IN tuple + ) # for each state + + model.set_IN = pyo.Set(dimen=2, initialize=init_set_IN) + # set of IN tuples for states with fixed bounds - - def init_set_IN_fix(m): - return ((i, n_i) - for i in m.set_I - for n_i in m.set_N_fix[i]) - model.set_IN_fix = pyo.Set(dimen=2, - initialize=init_set_IN_fix) - + + def init_set_IN_fix(m): + return ((i, n_i) for i in m.set_I for n_i in m.set_N_fix[i]) + + model.set_IN_fix = pyo.Set(dimen=2, initialize=init_set_IN_fix) + # set of IN tuples for converters with amplitude-constrained states - + def init_set_IN_dim_eq(m): - return ((i,n_i) - for (i,n_i) in m.set_IN - if n_i in m.set_N_dim_eq[i]) - model.set_IN_dim_eq = pyo.Set(dimen=2, - initialize=init_set_IN_dim_eq, - within=model.set_IN) - + return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_dim_eq[i]) + + model.set_IN_dim_eq = pyo.Set( + dimen=2, initialize=init_set_IN_dim_eq, within=model.set_IN + ) + # set of IN tuples for converters with pos. amplitude-constrained states - + def init_set_IN_dim_pos(m): - return ((i,n_i) - for (i,n_i) in m.set_IN - if n_i in m.set_N_dim_pos[i]) - model.set_IN_dim_pos = pyo.Set(dimen=2, - initialize=init_set_IN_dim_pos, - within=model.set_IN) - + return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_dim_pos[i]) + + model.set_IN_dim_pos = pyo.Set( + dimen=2, initialize=init_set_IN_dim_pos, within=model.set_IN + ) + # set of IN tuples for converters with neg. amplitude-constrained states - - def init_set_IN_dim_neg(m): - return ((i,n_i) - for (i,n_i) in m.set_IN - if n_i in m.set_N_dim_neg[i]) - model.set_IN_dim_neg = pyo.Set(dimen=2, - initialize=init_set_IN_dim_neg, - within=model.set_IN) - + + def init_set_IN_dim_neg(m): + return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_dim_neg[i]) + + model.set_IN_dim_neg = pyo.Set( + dimen=2, initialize=init_set_IN_dim_neg, within=model.set_IN + ) + # set of IN tuples for converters with externality-inducing states - + def init_set_IN_ext(m): - return ((i,n_i) - for (i,n_i) in m.set_IN - if n_i in m.set_N_ext[i]) - model.set_IN_ext = pyo.Set(dimen=2, - initialize=init_set_IN_ext, - within=model.set_IN) - - + return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_ext[i]) + + model.set_IN_ext = pyo.Set(dimen=2, initialize=init_set_IN_ext, within=model.set_IN) + # set of IN tuples for positive variation-penalised states - + def init_set_IN_pos_var(m): - return ((i,n_i) - for (i,n_i) in m.set_IN - if n_i in m.set_N_pos_var[i]) - model.set_IN_pos_var = pyo.Set(dimen=2, - initialize=init_set_IN_pos_var, - within=model.set_IN) - + return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_pos_var[i]) + + model.set_IN_pos_var = pyo.Set( + dimen=2, initialize=init_set_IN_pos_var, within=model.set_IN + ) + # set of IN tuples for negative variation-penalised states - + def init_set_IN_neg_var(m): - return ((i,n_i) - for (i,n_i) in m.set_IN - if n_i in m.set_N_neg_var[i]) - model.set_IN_neg_var = pyo.Set(dimen=2, - initialize=init_set_IN_neg_var, - within=model.set_IN) - + return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_neg_var[i]) + + model.set_IN_neg_var = pyo.Set( + dimen=2, initialize=init_set_IN_neg_var, within=model.set_IN + ) + # set of IN tuples for upper reference violation penalised states - + def init_set_IN_ref_u(m): - return ((i,n_i) - for (i,n_i) in m.set_IN - if n_i in m.set_N_ref_u[i]) - model.set_IN_ref_u = pyo.Set(dimen=2, - initialize=init_set_IN_ref_u, - within=model.set_IN) - + return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_ref_u[i]) + + model.set_IN_ref_u = pyo.Set( + dimen=2, initialize=init_set_IN_ref_u, within=model.set_IN + ) + # set of IN tuples for lower reference violation penalised states - + def init_set_IN_ref_d(m): - return ((i,n_i) - for (i,n_i) in m.set_IN - if n_i in m.set_N_ref_d[i]) - model.set_IN_ref_d = pyo.Set(dimen=2, - initialize=init_set_IN_ref_d, - within=model.set_IN) - - # ************************************************************************* - + return ((i, n_i) for (i, n_i) in m.set_IN if n_i in m.set_N_ref_d[i]) + + model.set_IN_ref_d = pyo.Set( + dimen=2, initialize=init_set_IN_ref_d, within=model.set_IN + ) + + # ************************************************************************* + # outputs - + # set of IR tuples - - def init_set_IR(m): - return ((i, r_i) - for i in m.set_I - for r_i in m.set_R[i]) - model.set_IR = pyo.Set(dimen=2, - initialize=init_set_IR) - + + def init_set_IR(m): + return ((i, r_i) for i in m.set_I for r_i in m.set_R[i]) + + model.set_IR = pyo.Set(dimen=2, initialize=init_set_IR) + # set of IR tuples for outputs with fixed bounds - - def init_set_IR_fix(m): - return ((i, r_i) - for i in m.set_I - for r_i in m.set_R_fix[i]) - model.set_IR_fix = pyo.Set(dimen=2, - initialize=init_set_IR_fix) - + + def init_set_IR_fix(m): + return ((i, r_i) for i in m.set_I for r_i in m.set_R_fix[i]) + + model.set_IR_fix = pyo.Set(dimen=2, initialize=init_set_IR_fix) + # set of IR tuples for converters with matching pos. and neg. out. amp. limits - - def init_set_IR_dim_eq(m): - return ((i, r_i) - for (i, r_i) in m.set_IR - if r_i in m.set_R_dim_eq[i]) - model.set_IR_dim_eq = pyo.Set(dimen=2, - initialize=init_set_IR_dim_eq) - + + def init_set_IR_dim_eq(m): + return ((i, r_i) for (i, r_i) in m.set_IR if r_i in m.set_R_dim_eq[i]) + + model.set_IR_dim_eq = pyo.Set(dimen=2, initialize=init_set_IR_dim_eq) + # set of IR tuples for converters with amplitude-penalised outputs - def init_set_IR_dim_neg(m): - return ((i, r_i) - for (i, r_i) in m.set_IR - if r_i in m.set_R_dim_neg[i]) - model.set_IR_dim_neg = pyo.Set(dimen=2, - initialize=init_set_IR_dim_neg) - + def init_set_IR_dim_neg(m): + return ((i, r_i) for (i, r_i) in m.set_IR if r_i in m.set_R_dim_neg[i]) + + model.set_IR_dim_neg = pyo.Set(dimen=2, initialize=init_set_IR_dim_neg) + # set of IR tuples for converters with amplitude-penalised outputs - - def init_set_IR_dim(m): - return ((i, r_i) - for (i, r_i) in m.set_IR - if r_i in m.set_R_dim[i]) - model.set_IR_dim = pyo.Set(dimen=2, - initialize=init_set_IR_dim) - + + def init_set_IR_dim(m): + return ((i, r_i) for (i, r_i) in m.set_IR if r_i in m.set_R_dim[i]) + + model.set_IR_dim = pyo.Set(dimen=2, initialize=init_set_IR_dim) + # set of IR tuples for converters with pos. amplitude-constrained outputs - - def init_set_IR_dim_pos(m): - return ((i, r_i) - for (i, r_i) in m.set_IR - if r_i in m.set_R_dim_pos[i]) - model.set_IR_dim_pos = pyo.Set(dimen=2, - initialize=init_set_IR_dim_pos) - + + def init_set_IR_dim_pos(m): + return ((i, r_i) for (i, r_i) in m.set_IR if r_i in m.set_R_dim_pos[i]) + + model.set_IR_dim_pos = pyo.Set(dimen=2, initialize=init_set_IR_dim_pos) + # set of IR tuples for converters with externality-inducing outputs - - def init_set_IR_ext(m): - return ((i, r_i) - for (i, r_i) in m.set_IR - if r_i in m.set_R_ext[i]) - model.set_IR_ext = pyo.Set(dimen=2, - initialize=init_set_IR_ext) - - # ************************************************************************* - + + def init_set_IR_ext(m): + return ((i, r_i) for (i, r_i) in m.set_IR if r_i in m.set_R_ext[i]) + + model.set_IR_ext = pyo.Set(dimen=2, initialize=init_set_IR_ext) + + # ************************************************************************* + # combined inputs/states/outputs # TODO: narrow down these sets if possible - + # set of INN tuples - + def init_set_INN(m): - return ((i, n1, n2) - for (i, n1) in m.set_IN - for n2 in m.set_N[i]) - model.set_INN = pyo.Set(dimen=3, - initialize=init_set_INN) - + return ((i, n1, n2) for (i, n1) in m.set_IN for n2 in m.set_N[i]) + + model.set_INN = pyo.Set(dimen=3, initialize=init_set_INN) + # set of INM tuples - - def init_set_INM(m): - return ((i, n_i, m_i) - for (i, n_i) in m.set_IN - for m_i in m.set_M[i]) - model.set_INM = pyo.Set(dimen=3, - initialize=init_set_INM) - + + def init_set_INM(m): + return ((i, n_i, m_i) for (i, n_i) in m.set_IN for m_i in m.set_M[i]) + + model.set_INM = pyo.Set(dimen=3, initialize=init_set_INM) + # set of IRM tuples - - def init_set_IRM(m): - return ((i, r_i, m_i) - for (i, r_i) in m.set_IR - for m_i in m.set_M[i]) # can be further constrained - model.set_IRM = pyo.Set(dimen=3, - initialize=init_set_IRM) + + def init_set_IRM(m): + return ( + (i, r_i, m_i) for (i, r_i) in m.set_IR for m_i in m.set_M[i] + ) # can be further constrained + + model.set_IRM = pyo.Set(dimen=3, initialize=init_set_IRM) # set of IRN tuples - - def init_set_IRN(m): - return ((i, r_i, n_i) - for (i, r_i) in m.set_IR - for n_i in m.set_N[i]) # can be further constrained - model.set_IRN = pyo.Set(dimen=3, - initialize=init_set_IRN) - + + def init_set_IRN(m): + return ( + (i, r_i, n_i) for (i, r_i) in m.set_IR for n_i in m.set_N[i] + ) # can be further constrained + + model.set_IRN = pyo.Set(dimen=3, initialize=init_set_IRN) + # ************************************************************************* # ************************************************************************* - + # set of arc groups - + model.set_T = pyo.Set() - + # set of mandatory arc groups - + model.set_T_mdt = pyo.Set(within=model.set_T) - + # set of arc groups requiring interface variables - + model.set_T_int = pyo.Set(within=model.set_T) - + # set of arc groups relying on SOS1 - + model.set_T_sos1 = pyo.Set(within=model.set_T) - + # set of arc groups relying on binary variables - + model.set_T_bin = pyo.Set(within=model.set_T) - + # set of arg groups relying on non-negative real variables - - model.set_T_nnr = pyo.Set(within=model.set_T_sos1) # or set_T - + + model.set_T_nnr = pyo.Set(within=model.set_T_sos1) # or set_T + # # set of GLLJ tuples for arcs in arc groups - + # model.set_GLLJ_col = pyo.Set(within=model.set_GLLJ_new) - + # set of arcs in the various arc groups - - model.set_GLLJ_col_t = pyo.Set(model.set_T, - within=model.set_GLLJ_new) - + + model.set_GLLJ_col_t = pyo.Set(model.set_T, within=model.set_GLLJ_new) + # set of arc options for arc groups - + model.set_H_t = pyo.Set(model.set_T) - + # set of (t,h) tuples - - def init_set_TH(m): - return ((t, h) - for t in m.set_T - for h in m.set_H_t[t]) - model.set_TH = pyo.Set(dimen=2, - initialize=init_set_TH) - + + def init_set_TH(m): + return ((t, h) for t in m.set_T for h in m.set_H_t[t]) + + model.set_TH = pyo.Set(dimen=2, initialize=init_set_TH) + # set of (t,g,l1,l2,j) tuples - - def init_set_TGLLJ(m): - return ((t, g, l1, l2, j) - for t in m.set_T - for (g,l1,l2,j) in m.set_GLLJ_col_t[t]) - model.set_TGLLJ = pyo.Set(dimen=5, - initialize=init_set_TGLLJ) - + + def init_set_TGLLJ(m): + return ( + (t, g, l1, l2, j) for t in m.set_T for (g, l1, l2, j) in m.set_GLLJ_col_t[t] + ) + + model.set_TGLLJ = pyo.Set(dimen=5, initialize=init_set_TGLLJ) + # set of (t,g,l1,l2,j) tuples for undirected arcs - - def init_set_TGLLJ_und(m): - return ((t, g, l1, l2, j) - for (t, g, l1, l2, j) in m.set_TGLLJ - if j in m.set_J_und[(g,l1,l2)]) - model.set_TGLLJ_und = pyo.Set(dimen=5, - initialize=init_set_TGLLJ_und) - + + def init_set_TGLLJ_und(m): + return ( + (t, g, l1, l2, j) + for (t, g, l1, l2, j) in m.set_TGLLJ + if j in m.set_J_und[(g, l1, l2)] + ) + + model.set_TGLLJ_und = pyo.Set(dimen=5, initialize=init_set_TGLLJ_und) + # set of TH tuples for groups using SOS1 - + def init_set_TH_arc_inv_sos1(m): - return ((t,h) - for (t,h) in m.set_TH - if t in m.set_T_sos1) - # for t in m.set_T_sos1 - # for h in m.set_H_t[t]) + return ((t, h) for (t, h) in m.set_TH if t in m.set_T_sos1) + # for t in m.set_T_sos1 + # for h in m.set_H_t[t]) + model.set_TH_arc_inv_sos1 = pyo.Set( - dimen=2, - within=model.set_TH, - initialize=init_set_TH_arc_inv_sos1) - + dimen=2, within=model.set_TH, initialize=init_set_TH_arc_inv_sos1 + ) + # set of t-indexed TH tuples for groups of arcs relying on SOS1 - + def init_set_TH_arc_inv_sos1_t(m, t): return ((t, h) for h in m.set_H_t[t]) + model.set_TH_arc_inv_sos1_t = pyo.Set( model.set_T_sos1, - dimen=2, + dimen=2, within=model.set_TH_arc_inv_sos1, - initialize=init_set_TH_arc_inv_sos1_t) - + initialize=init_set_TH_arc_inv_sos1_t, + ) + # minimum cost of a group of arcs - - model.param_c_arc_min_th = pyo.Param(model.set_TH, - within=pyo.NonNegativeReals) - + + model.param_c_arc_min_th = pyo.Param(model.set_TH, within=pyo.NonNegativeReals) + # unit flow amplitude cost - - model.param_c_arc_var_t = pyo.Param(model.set_T, - within=pyo.NonNegativeReals) - + + model.param_c_arc_var_t = pyo.Param(model.set_T, within=pyo.NonNegativeReals) + # maximum nominal amplitude - - model.param_v_amp_max_th = pyo.Param(model.set_TH, - within=pyo.PositiveReals) - + + model.param_v_amp_max_th = pyo.Param(model.set_TH, within=pyo.PositiveReals) + # sos1 weights for arc group options - + model.param_arc_inv_sos1_weights_th = pyo.Param( - model.set_TH_arc_inv_sos1, - within=pyo.NonNegativeReals) - + model.set_TH_arc_inv_sos1, within=pyo.NonNegativeReals + ) + # capital cost of group - - model.var_capex_arc_col_t = pyo.Var(model.set_T, - within=pyo.NonNegativeReals) - + + model.var_capex_arc_col_t = pyo.Var(model.set_T, within=pyo.NonNegativeReals) + # investment decision for group (can be binary or nnr) - - def domain_var_delta_arc_inv_th(m, t, h): + + def domain_var_delta_arc_inv_th(m, t, h): # has to be in set_J_sos1 and set_J_mdt - if t in m.set_T_nnr: - return pyo.UnitInterval # [0,1]; alternatively: [0,inf] + if t in m.set_T_nnr: + return pyo.UnitInterval # [0,1]; alternatively: [0,inf] else: - return pyo.Binary # {0,1} - model.var_delta_arc_inv_th = pyo.Var(model.set_TH, - domain=domain_var_delta_arc_inv_th) - + return pyo.Binary # {0,1} + + model.var_delta_arc_inv_th = pyo.Var( + model.set_TH, domain=domain_var_delta_arc_inv_th + ) + # interface variable for group - - model.var_xi_arc_inv_t = pyo.Var(model.set_T_int, - within=pyo.UnitInterval) - + + model.var_xi_arc_inv_t = pyo.Var(model.set_T_int, within=pyo.UnitInterval) + # nominal amplitude - - model.var_v_amp_t = pyo.Var(model.set_T) - + + model.var_v_amp_t = pyo.Var(model.set_T) + # ************************************************************************* # ************************************************************************* # ************************************************************************* # ************************************************************************* - + # parameters # ************************************************************************* # ************************************************************************* - + # objective function - + # ************************************************************************* - + # general parameters - + # assessment weight (default: 1) - - model.param_c_wgt_q = pyo.Param(model.set_Q, - within=pyo.PositiveReals, - default=1) - + + model.param_c_wgt_q = pyo.Param(model.set_Q, within=pyo.PositiveReals, default=1) + # discount factors - - model.param_c_df_qp = pyo.Param(model.set_QP, - within=pyo.PositiveReals) - + + model.param_c_df_qp = pyo.Param(model.set_QP, within=pyo.PositiveReals) + # relative weight of time step k within representative period p - - model.param_c_time_qpk = pyo.Param(model.set_QPK, - within=pyo.PositiveReals, - default=1) - + + model.param_c_time_qpk = pyo.Param( + model.set_QPK, within=pyo.PositiveReals, default=1 + ) + # resource prices - - model.param_p_glqpks = pyo.Param(model.set_GLQPKS, - within=pyo.NonNegativeReals) - + + model.param_p_glqpks = pyo.Param(model.set_GLQPKS, within=pyo.NonNegativeReals) + # maximum resource volumes for each prices - - model.param_v_max_glqpks = pyo.Param(model.set_GLQPKS, - within=pyo.NonNegativeReals) - + + model.param_v_max_glqpks = pyo.Param(model.set_GLQPKS, within=pyo.NonNegativeReals) + # converters - + # externality cost per input unit - - model.param_c_ext_u_imqk = pyo.Param(model.set_IM_ext, - model.set_QK, - within=pyo.NonNegativeReals, - default=0) - + + model.param_c_ext_u_imqk = pyo.Param( + model.set_IM_ext, model.set_QK, within=pyo.NonNegativeReals, default=0 + ) + # externality cost per output unit - - model.param_c_ext_y_irqk = pyo.Param(model.set_IR_ext, - model.set_QK, - within=pyo.NonNegativeReals, - default=0) - + + model.param_c_ext_y_irqk = pyo.Param( + model.set_IR_ext, model.set_QK, within=pyo.NonNegativeReals, default=0 + ) + # externality cost per state unit - - model.param_c_ext_x_inqk = pyo.Param(model.set_IN_ext, - model.set_QK, - within=pyo.NonNegativeReals, - default=0) - + + model.param_c_ext_x_inqk = pyo.Param( + model.set_IN_ext, model.set_QK, within=pyo.NonNegativeReals, default=0 + ) + # unit cost of positive state variations - - model.param_c_pos_var_in = pyo.Param(model.set_IN_pos_var, - within=pyo.NonNegativeReals, - default=0) - + + model.param_c_pos_var_in = pyo.Param( + model.set_IN_pos_var, within=pyo.NonNegativeReals, default=0 + ) + # unit cost of negative state variations - - model.param_c_neg_var_in = pyo.Param(model.set_IN_neg_var, - within=pyo.NonNegativeReals, - default=0) - + + model.param_c_neg_var_in = pyo.Param( + model.set_IN_neg_var, within=pyo.NonNegativeReals, default=0 + ) + # unit cost of upper state reference violations - - model.param_c_ref_u_inqk = pyo.Param(model.set_IN_ref_u, - model.set_QK, - within=pyo.NonNegativeReals, - default=0) - + + model.param_c_ref_u_inqk = pyo.Param( + model.set_IN_ref_u, model.set_QK, within=pyo.NonNegativeReals, default=0 + ) + # unit cost of lower state reference violations - - model.param_c_ref_d_inqk = pyo.Param(model.set_IN_ref_d, - model.set_QK, - within=pyo.NonNegativeReals, - default=0) - + + model.param_c_ref_d_inqk = pyo.Param( + model.set_IN_ref_d, model.set_QK, within=pyo.NonNegativeReals, default=0 + ) + # minimum converter cost - - model.param_c_cvt_min_i = pyo.Param(model.set_I_new, - within=pyo.NonNegativeReals, - default=0) - + + model.param_c_cvt_min_i = pyo.Param( + model.set_I_new, within=pyo.NonNegativeReals, default=0 + ) + # unit (positive) input amplitude cost - - model.param_c_cvt_u_im = pyo.Param(model.set_IM_dim, - within=pyo.NonNegativeReals, - default=0) + + model.param_c_cvt_u_im = pyo.Param( + model.set_IM_dim, within=pyo.NonNegativeReals, default=0 + ) # unit output amplitude cost - - model.param_c_cvt_y_ir = pyo.Param(model.set_IR_dim, - within=pyo.NonNegativeReals, - default=0) - + + model.param_c_cvt_y_ir = pyo.Param( + model.set_IR_dim, within=pyo.NonNegativeReals, default=0 + ) + # unit positive state amplitude cost - - model.param_c_cvt_x_pos_in = pyo.Param(model.set_IN_dim_pos, - within=pyo.NonNegativeReals, - default=0) - + + model.param_c_cvt_x_pos_in = pyo.Param( + model.set_IN_dim_pos, within=pyo.NonNegativeReals, default=0 + ) + # unit negative state amplitude cost - - model.param_c_cvt_x_neg_in = pyo.Param(model.set_IN_dim_neg, - within=pyo.NonNegativeReals, - default=0) + + model.param_c_cvt_x_neg_in = pyo.Param( + model.set_IN_dim_neg, within=pyo.NonNegativeReals, default=0 + ) # unit positive output amplitude cost - - model.param_c_cvt_y_pos_ir = pyo.Param(model.set_IR_dim_pos, - within=pyo.NonNegativeReals, - default=0) + + model.param_c_cvt_y_pos_ir = pyo.Param( + model.set_IR_dim_pos, within=pyo.NonNegativeReals, default=0 + ) # unit negative output amplitude cost - - model.param_c_cvt_y_neg_ir = pyo.Param(model.set_IR_dim_neg, - within=pyo.NonNegativeReals, - default=0) - + + model.param_c_cvt_y_neg_ir = pyo.Param( + model.set_IR_dim_neg, within=pyo.NonNegativeReals, default=0 + ) + # ************************************************************************* - + # arcs - + # minimum arc cost - - model.param_c_arc_min_glljh = pyo.Param(model.set_GLLJH_sgl, - within=pyo.NonNegativeReals) - + + model.param_c_arc_min_glljh = pyo.Param( + model.set_GLLJH_sgl, within=pyo.NonNegativeReals + ) + # specific arc flow amplitude cost - - model.param_c_arc_var_gllj = pyo.Param(model.set_GLLJ_sgl, # inclusive - within=pyo.NonNegativeReals) - + + model.param_c_arc_var_gllj = pyo.Param( + model.set_GLLJ_sgl, within=pyo.NonNegativeReals # inclusive + ) + # arc transmission efficiency - - model.param_eta_glljqk = pyo.Param(model.set_GLLJ_red, - model.set_QK, - within=pyo.PositiveReals) + + model.param_eta_glljqk = pyo.Param( + model.set_GLLJ_red, model.set_QK, within=pyo.PositiveReals + ) # maximum arc flow amplitude (new arcs only) - - model.param_v_amp_max_glljh = pyo.Param(model.set_GLLJH_sgl, - within=pyo.PositiveReals) - + + model.param_v_amp_max_glljh = pyo.Param( + model.set_GLLJH_sgl, within=pyo.PositiveReals + ) + # upper bounds for arc flows - - model.param_v_ub_glljqk = pyo.Param(model.set_GLLJ_pre_fin_red, - model.set_QK, - within=pyo.PositiveReals) - + + model.param_v_ub_glljqk = pyo.Param( + model.set_GLLJ_pre_fin_red, model.set_QK, within=pyo.PositiveReals + ) + # arc flow amplitude adjustment coefficient (can be zero) - - model.param_f_amp_v_glljqk = pyo.Param(model.set_GLLJ_new, - model.set_QK, - within=pyo.NonNegativeReals) - + + model.param_f_amp_v_glljqk = pyo.Param( + model.set_GLLJ_new, model.set_QK, within=pyo.NonNegativeReals + ) + # sos1 weights for arc options - + model.param_arc_inv_sos1_weights_glljh = pyo.Param( - model.set_GLLJH_arc_inv_sos1, - within=pyo.NonNegativeReals) - + model.set_GLLJH_arc_inv_sos1, within=pyo.NonNegativeReals + ) + # sos1 weights for flow senses # TODO: define these coefficients here using an init method model.param_arc_sns_sos1_weights_glljqk = pyo.Param( - model.set_GLLJQK_und_sns_sos1_red, - within=pyo.NonNegativeReals) - + model.set_GLLJQK_und_sns_sos1_red, within=pyo.NonNegativeReals + ) + # ************************************************************************* - + # relative variation of arc-dependent losses - + # arc-dependent losses (new arcs) - - model.param_w_new_glljhqk = pyo.Param(model.set_GLLJH_static_new, - model.set_QK, - within=pyo.NonNegativeReals) - + + model.param_w_new_glljhqk = pyo.Param( + model.set_GLLJH_static_new, model.set_QK, within=pyo.NonNegativeReals + ) + # arc-dependent losses (preexisting arcs) - - model.param_w_pre_glljqk = pyo.Param(model.set_GLLJ_static_pre, - model.set_QK, - within=pyo.NonNegativeReals) - + + model.param_w_pre_glljqk = pyo.Param( + model.set_GLLJ_static_pre, model.set_QK, within=pyo.NonNegativeReals + ) + # ************************************************************************* - + # network # static flow needs - - model.param_v_base_glqk = pyo.Param(model.set_GL_not_exp_imp, - model.set_QK, - within=pyo.Reals, - default=0) + + model.param_v_base_glqk = pyo.Param( + model.set_GL_not_exp_imp, model.set_QK, within=pyo.Reals, default=0 + ) # maximum number of arcs per node pair - + model.param_max_number_parallel_arcs = pyo.Param( model.set_GLL, - #within=pyo.PositiveIntegers, + # within=pyo.PositiveIntegers, within=pyo.PositiveReals, - default=inf) - + default=inf, + ) + def init_set_GLL_arc_max(m): - return ((g,l1,l2) - for (g,l1,l2) in m.param_max_number_parallel_arcs - if isfinite(m.param_max_number_parallel_arcs[(g,l1,l2)])) - model.set_GLL_arc_max = pyo.Set(dimen=3, - within=model.set_GLL, - initialize=init_set_GLL_arc_max) - + return ( + (g, l1, l2) + for (g, l1, l2) in m.param_max_number_parallel_arcs + if isfinite(m.param_max_number_parallel_arcs[(g, l1, l2)]) + ) + + model.set_GLL_arc_max = pyo.Set( + dimen=3, within=model.set_GLL, initialize=init_set_GLL_arc_max + ) + # effect of system inputs on specific network and node pairs - - model.param_a_nw_glimqk = pyo.Param(model.set_GL_not_exp_imp, - model.set_IM, - model.set_QK, - default=0, # default: no effect - within=pyo.Reals) - + + model.param_a_nw_glimqk = pyo.Param( + model.set_GL_not_exp_imp, + model.set_IM, + model.set_QK, + default=0, # default: no effect + within=pyo.Reals, + ) + # effect of system outputs on specific network and node pairs - - model.param_a_nw_glirqk = pyo.Param(model.set_GL_not_exp_imp, - model.set_IR, - model.set_QK, - default=0, # default: no effect - within=pyo.Reals) - - # ************************************************************************* - + + model.param_a_nw_glirqk = pyo.Param( + model.set_GL_not_exp_imp, + model.set_IR, + model.set_QK, + default=0, # default: no effect + within=pyo.Reals, + ) + + # ************************************************************************* + # inputs - + # upper bounds for (non-binary, non-dimensionable) inputs - - model.param_u_ub_imqk = pyo.Param(model.set_IM_fix, - model.set_QK, - within=pyo.PositiveReals) - + + model.param_u_ub_imqk = pyo.Param( + model.set_IM_fix, model.set_QK, within=pyo.PositiveReals + ) + # maximum input limits - - model.param_u_amp_max_im = pyo.Param(model.set_IM_dim, - within=pyo.PositiveReals, - default=1) - + + model.param_u_amp_max_im = pyo.Param( + model.set_IM_dim, within=pyo.PositiveReals, default=1 + ) + # time interval-dependent adjustment coefficients for input limits - - model.param_f_amp_u_imqk = pyo.Param(model.set_IM_dim, - model.set_QK, - within=pyo.PositiveReals, - default=1) - - # ************************************************************************* - + + model.param_f_amp_u_imqk = pyo.Param( + model.set_IM_dim, model.set_QK, within=pyo.PositiveReals, default=1 + ) + + # ************************************************************************* + # states - + # initial conditions - - model.param_x_inq0 = pyo.Param(model.set_IN, - model.set_Q, - within=pyo.Reals) - + + model.param_x_inq0 = pyo.Param(model.set_IN, model.set_Q, within=pyo.Reals) + # fixed upper bounds for state variables - - model.param_x_ub_irqk = pyo.Param(model.set_IN_fix, - model.set_QK, - within=pyo.Reals) - + + model.param_x_ub_irqk = pyo.Param(model.set_IN_fix, model.set_QK, within=pyo.Reals) + # fixed lower bounds for state variables - - model.param_x_lb_irqk = pyo.Param(model.set_IN_fix, - model.set_QK, - within=pyo.Reals) + + model.param_x_lb_irqk = pyo.Param(model.set_IN_fix, model.set_QK, within=pyo.Reals) # maximum positive amplitude for states - - model.param_x_amp_pos_max_in = pyo.Param(model.set_IN_dim_pos, - within=pyo.PositiveReals) - + + model.param_x_amp_pos_max_in = pyo.Param( + model.set_IN_dim_pos, within=pyo.PositiveReals + ) + # maximum negative amplitude for states - - model.param_x_amp_neg_max_in = pyo.Param(model.set_IN_dim_neg, - within=pyo.PositiveReals) - + + model.param_x_amp_neg_max_in = pyo.Param( + model.set_IN_dim_neg, within=pyo.PositiveReals + ) + # adjustment of positive state amplitude limits - - model.param_f_amp_pos_x_inqk = pyo.Param(model.set_IN_dim_pos, - model.set_QK, - within=pyo.PositiveReals, - default=1) - + + model.param_f_amp_pos_x_inqk = pyo.Param( + model.set_IN_dim_pos, model.set_QK, within=pyo.PositiveReals, default=1 + ) + # adjustment of negative state amplitude limits - - model.param_f_amp_neg_x_inqk = pyo.Param(model.set_IN_dim_neg, - model.set_QK, - within=pyo.PositiveReals, - default=1) - + + model.param_f_amp_neg_x_inqk = pyo.Param( + model.set_IN_dim_neg, model.set_QK, within=pyo.PositiveReals, default=1 + ) + # state equations: coefficients from C matrix - - model.param_a_eq_x_innqk = pyo.Param(model.set_INN, - model.set_QK, - default=0, # default: no effect - within=pyo.Reals) - + + model.param_a_eq_x_innqk = pyo.Param( + model.set_INN, model.set_QK, default=0, within=pyo.Reals # default: no effect + ) + # state equations: coefficients from D matrix - - model.param_b_eq_x_inmqk = pyo.Param(model.set_INM, - model.set_QK, - default=0, # default: no effect - within=pyo.Reals) - + + model.param_b_eq_x_inmqk = pyo.Param( + model.set_INM, model.set_QK, default=0, within=pyo.Reals # default: no effect + ) + # state equations: constant term - - model.param_e_eq_x_inqk = pyo.Param(model.set_IN, - model.set_QK, - default=0, # default: no effect - within=pyo.Reals) - - # ************************************************************************* - + + model.param_e_eq_x_inqk = pyo.Param( + model.set_IN, model.set_QK, default=0, within=pyo.Reals # default: no effect + ) + + # ************************************************************************* + # outputs - + # fixed upper bounds for output variables - - model.param_y_ub_irqk = pyo.Param(model.set_IR_fix, - model.set_QK, - within=pyo.Reals) - + + model.param_y_ub_irqk = pyo.Param(model.set_IR_fix, model.set_QK, within=pyo.Reals) + # fixed lower bounds for output variables - - model.param_y_lb_irqk = pyo.Param(model.set_IR_fix, - model.set_QK, - within=pyo.Reals) + + model.param_y_lb_irqk = pyo.Param(model.set_IR_fix, model.set_QK, within=pyo.Reals) # adjustment of positive output amplitude limits - - model.param_f_amp_y_pos_irqk = pyo.Param(model.set_IR_dim_pos, - model.set_QK, - within=pyo.PositiveReals, - default=1) + + model.param_f_amp_y_pos_irqk = pyo.Param( + model.set_IR_dim_pos, model.set_QK, within=pyo.PositiveReals, default=1 + ) # adjustment of negative output amplitude limits - - model.param_f_amp_y_neg_irqk = pyo.Param(model.set_IR_dim_neg, - model.set_QK, - within=pyo.PositiveReals, - default=1) - + + model.param_f_amp_y_neg_irqk = pyo.Param( + model.set_IR_dim_neg, model.set_QK, within=pyo.PositiveReals, default=1 + ) + # maximum positive amplitude limit for outputs - - model.param_y_amp_pos_max_ir = pyo.Param(model.set_IR_dim_pos, - within=pyo.PositiveReals) - + + model.param_y_amp_pos_max_ir = pyo.Param( + model.set_IR_dim_pos, within=pyo.PositiveReals + ) + # maximum negative amplitude limit for outputs - - model.param_y_amp_neg_max_ir = pyo.Param(model.set_IR_dim_neg, - within=pyo.PositiveReals) - + + model.param_y_amp_neg_max_ir = pyo.Param( + model.set_IR_dim_neg, within=pyo.PositiveReals + ) + # output equation coefficients from C matrix - - model.param_c_eq_y_irnqk = pyo.Param(model.set_IRN, - model.set_QK, - default=0, # default: no effect - within=pyo.Reals) - + + model.param_c_eq_y_irnqk = pyo.Param( + model.set_IRN, model.set_QK, default=0, within=pyo.Reals # default: no effect + ) + # output equation coefficients from D matrix - - model.param_d_eq_y_irmqk = pyo.Param(model.set_IRM, - model.set_QK, - default=0, # default: no effect - within=pyo.Reals) - + + model.param_d_eq_y_irmqk = pyo.Param( + model.set_IRM, model.set_QK, default=0, within=pyo.Reals # default: no effect + ) + # output equation constant - - model.param_e_eq_y_irqk = pyo.Param(model.set_IR, - model.set_QK, - default=0, # default: no effect - within=pyo.Reals) - + + model.param_e_eq_y_irqk = pyo.Param( + model.set_IR, model.set_QK, default=0, within=pyo.Reals # default: no effect + ) + # ************************************************************************* # ************************************************************************* # ************************************************************************* # ************************************************************************* - + # variables - + # ************************************************************************* # ************************************************************************* - + # objective function - + # capex - + model.var_capex = pyo.Var(within=pyo.NonNegativeReals) - + # sum of discounted net cash flows - - model.var_sdncf_q = pyo.Var(model.set_Q, - within=pyo.Reals) - + + model.var_sdncf_q = pyo.Var(model.set_Q, within=pyo.Reals) + # sum of discounted externalities - - model.var_sdext_q = pyo.Var(model.set_Q, - within=pyo.Reals) - + + model.var_sdext_q = pyo.Var(model.set_Q, within=pyo.Reals) + # capex for individual arcs - - model.var_capex_arc_gllj = pyo.Var(model.set_GLLJ_sgl, - within=pyo.NonNegativeReals) - + + model.var_capex_arc_gllj = pyo.Var(model.set_GLLJ_sgl, within=pyo.NonNegativeReals) + # capex for installing individual converters - - model.var_capex_cvt_i = pyo.Var(model.set_I_new, - within=pyo.NonNegativeReals) - + + model.var_capex_cvt_i = pyo.Var(model.set_I_new, within=pyo.NonNegativeReals) + # exported flow revenue - - model.var_efr_glqpk = pyo.Var(model.set_GL_exp, - model.set_QPK, - within=pyo.NonNegativeReals) - + + model.var_efr_glqpk = pyo.Var( + model.set_GL_exp, model.set_QPK, within=pyo.NonNegativeReals + ) + # imported flow cost - - model.var_ifc_glqpk = pyo.Var(model.set_GL_imp, - model.set_QPK, - within=pyo.NonNegativeReals) - + + model.var_ifc_glqpk = pyo.Var( + model.set_GL_imp, model.set_QPK, within=pyo.NonNegativeReals + ) + # exported flow - + # TODO: validate the bounds by ensuring inf. cap. only exists in last segm. - + def bounds_var_ef_glqpks(m, g, l, q, p, k, s): - if (g,l,q,p,k,s) in m.param_v_max_glqpks: + if (g, l, q, p, k, s) in m.param_v_max_glqpks: # predefined finite capacity - return (0, m.param_v_max_glqpks[(g,l,q,p,k,s)]) + return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)]) else: # infinite capacity return (0, None) - model.var_ef_glqpks = pyo.Var(model.set_GLQPKS_exp, - within=pyo.NonNegativeReals, - bounds=bounds_var_ef_glqpks) - + + model.var_ef_glqpks = pyo.Var( + model.set_GLQPKS_exp, within=pyo.NonNegativeReals, bounds=bounds_var_ef_glqpks + ) + # imported flow - + def bounds_var_if_glqpks(m, g, l, q, p, k, s): - if (g,l,q,p,k,s) in m.param_v_max_glqpks: + if (g, l, q, p, k, s) in m.param_v_max_glqpks: # predefined finite capacity - return (0, m.param_v_max_glqpks[(g,l,q,p,k,s)]) + return (0, m.param_v_max_glqpks[(g, l, q, p, k, s)]) else: # infinite capacity return (0, None) - model.var_if_glqpks = pyo.Var(model.set_GLQPKS_imp, - within=pyo.NonNegativeReals, - bounds=bounds_var_if_glqpks) - + + model.var_if_glqpks = pyo.Var( + model.set_GLQPKS_imp, within=pyo.NonNegativeReals, bounds=bounds_var_if_glqpks + ) + # ************************************************************************* - + # arcs - + # arc flow variables - + def bounds_var_v_glljqk(m, g, l1, l2, j, q, k): # if ((g, l1, l2, j) in m.set_GLLJ_pre_fin_red and # (g, l1, l2, j) not in m.set_GLLJ_und and - # (g, l2, l1, j) not in m.set_GLLJ_und): - if ((g, l1, l2, j) in m.set_GLLJ_pre_fin_red and - (g, l1, l2) in m.set_J and - j not in m.set_J_und[(g,l1,l2)]): + # (g, l2, l1, j) not in m.set_GLLJ_und): + if ( + (g, l1, l2, j) in m.set_GLLJ_pre_fin_red + and (g, l1, l2) in m.set_J + and j not in m.set_J_und[(g, l1, l2)] + ): # directed, preexisting and finite capacity return (0, m.param_v_ub_glljqk[(g, l1, l2, j, q, k)]) else: # infinite capacity or dynamic finite capacity (set elsewhere) return (0, None) - model.var_v_glljqk = pyo.Var(model.set_GLLJ_red, - model.set_QK, - within=pyo.NonNegativeReals, - bounds=bounds_var_v_glljqk) - + + model.var_v_glljqk = pyo.Var( + model.set_GLLJ_red, + model.set_QK, + within=pyo.NonNegativeReals, + bounds=bounds_var_v_glljqk, + ) + # arc flow amplitude variables - - model.var_v_amp_gllj = pyo.Var(model.set_GLLJ_sgl, - within=pyo.NonNegativeReals) - + + model.var_v_amp_gllj = pyo.Var(model.set_GLLJ_sgl, within=pyo.NonNegativeReals) + # static losses per arc - - model.var_w_glljqk = pyo.Var(model.set_GLLJ_static_new, - model.set_QK, - within=pyo.NonNegativeReals) - + + model.var_w_glljqk = pyo.Var( + model.set_GLLJ_static_new, model.set_QK, within=pyo.NonNegativeReals + ) + # static losses modulated by the flow sense - - model.var_w_sns_glljqk = pyo.Var(model.set_GLLJ_static_und_red, - model.set_QK, - within=pyo.NonNegativeReals) - + + model.var_w_sns_glljqk = pyo.Var( + model.set_GLLJ_static_und_red, model.set_QK, within=pyo.NonNegativeReals + ) + # ************************************************************************* - + # decision to install a given arc - + def domain_var_delta_arc_inv_glljh(m, g, l1, l2, j, h): - if j in m.set_J_arc_nnr[(g,l1,l2)]: - return pyo.UnitInterval # [0,1]; alternatively: [0,inf] + if j in m.set_J_arc_nnr[(g, l1, l2)]: + return pyo.UnitInterval # [0,1]; alternatively: [0,inf] else: - return pyo.Binary # {0,1} + return pyo.Binary # {0,1} + model.var_delta_arc_inv_glljh = pyo.Var( - model.set_GLLJH_sgl, - domain=domain_var_delta_arc_inv_glljh - ) - + model.set_GLLJH_sgl, domain=domain_var_delta_arc_inv_glljh + ) + # flow direction variables for undirected arcs - - def domain_var_zeta_sns_glljqk(m, g, l1, l2, j, q, k): + + def domain_var_zeta_sns_glljqk(m, g, l1, l2, j, q, k): try: - if j in m.set_J_sns_nnr[(g,l1,l2)]: - return pyo.UnitInterval # [0,1]; alternatively: [0,inf] + if j in m.set_J_sns_nnr[(g, l1, l2)]: + return pyo.UnitInterval # [0,1]; alternatively: [0,inf] else: - return pyo.Binary # default: {0,1} + return pyo.Binary # default: {0,1} except KeyError: - return pyo.Binary # default: {0,1} + return pyo.Binary # default: {0,1} + model.var_zeta_sns_glljqk = pyo.Var( - model.set_GLLJ_und_red, # undirected arcs, both preexisting and new - model.set_QK, # one per time interval - domain=domain_var_zeta_sns_glljqk # can be binary or nnr - ) - + model.set_GLLJ_und_red, # undirected arcs, both preexisting and new + model.set_QK, # one per time interval + domain=domain_var_zeta_sns_glljqk, # can be binary or nnr + ) + # interface variables to separate the flow sense selection and arc invest. - - model.var_xi_arc_inv_gllj = pyo.Var(model.set_GLLJ_int, - within=pyo.UnitInterval) - - # ************************************************************************* - - # converters - + + model.var_xi_arc_inv_gllj = pyo.Var(model.set_GLLJ_int, within=pyo.UnitInterval) + + # ************************************************************************* + + # converters + # decision to install converter i - - model.var_cvt_inv_i = pyo.Var(model.set_I_new, - within=pyo.Binary) - + + model.var_cvt_inv_i = pyo.Var(model.set_I_new, within=pyo.Binary) + # inputs - + # input variables - + def bounds_var_u_imqk(m, i, m_i, q, k): if (i, m_i) in m.param_u_ub_imqk: # predefined limit @@ -2033,801 +1965,836 @@ def create_model(name: str, else: # dynamic limit (set elsewhere) return (0, None) - + def domain_var_u_imqk(m, i, m_i, q, k): try: if m_i in m.set_M_bin[i]: - return pyo.Binary # binary: {0,1} + return pyo.Binary # binary: {0,1} else: - return pyo.NonNegativeReals # nonnegative real: [0,inf] + return pyo.NonNegativeReals # nonnegative real: [0,inf] except KeyError: - return pyo.NonNegativeReals # nonnegative real: [0,inf] - - model.var_u_imqk = pyo.Var(model.set_IM, - model.set_QK, - domain=domain_var_u_imqk, - #within=pyo.NonNegativeReals, - bounds=bounds_var_u_imqk) - + return pyo.NonNegativeReals # nonnegative real: [0,inf] + + model.var_u_imqk = pyo.Var( + model.set_IM, + model.set_QK, + domain=domain_var_u_imqk, + # within=pyo.NonNegativeReals, + bounds=bounds_var_u_imqk, + ) + # input amplitude variables (only one per sign is needed, as vars. are nnr) - - model.var_u_amp_im = pyo.Var(model.set_IM_dim, - within=pyo.NonNegativeReals) + + model.var_u_amp_im = pyo.Var(model.set_IM_dim, within=pyo.NonNegativeReals) # ************************************************************************* - + # outputs - + # output variables - + def bounds_var_y_irqk(m, i, r, q, k): if r in m.set_R_fix: # predefined limit - return (m.param_u_lb_irqk[(i, r, q, k)], - m.param_u_ub_irqk[(i, r, q, k)]) + return (m.param_u_lb_irqk[(i, r, q, k)], m.param_u_ub_irqk[(i, r, q, k)]) else: # do not enforce any limits return (None, None) - + # def domain_var_y_irqk(m, i, r, k): # try: # if m_i in m.set_M_bin[i]: - # return pyo.Binary # binary: {0,1} + # return pyo.Binary # binary: {0,1} # else: # return pyo.NonNegativeReals # nonnegative real: [0,inf] # except KeyError: - # return pyo.NonNegativeReals # nonnegative real: [0,inf] + # return pyo.NonNegativeReals # nonnegative real: [0,inf] + + model.var_y_irqk = pyo.Var( + model.set_IR, model.set_QK, bounds=bounds_var_y_irqk, within=pyo.Reals + ) - model.var_y_irqk = pyo.Var(model.set_IR, - model.set_QK, - bounds=bounds_var_y_irqk, - within=pyo.Reals) - # positive output amplitudes - - model.var_y_amp_pos_ir = pyo.Var(model.set_IR_dim_pos, - within=pyo.Reals) - + + model.var_y_amp_pos_ir = pyo.Var(model.set_IR_dim_pos, within=pyo.Reals) + # output amplitudes - - model.var_y_amp_neg_ir = pyo.Var(model.set_IR_dim_neg, - within=pyo.Reals) - + + model.var_y_amp_neg_ir = pyo.Var(model.set_IR_dim_neg, within=pyo.Reals) + # ************************************************************************* - + # states - + # state variables - - model.var_x_inqk = pyo.Var(model.set_IN, - model.set_QK, - within=pyo.Reals) - + + model.var_x_inqk = pyo.Var(model.set_IN, model.set_QK, within=pyo.Reals) + # positive amplitude variables - - model.var_x_amp_pos_in = pyo.Var(model.set_IN_dim_pos, - within=pyo.NonNegativeReals) - + + model.var_x_amp_pos_in = pyo.Var(model.set_IN_dim_pos, within=pyo.NonNegativeReals) + # negative amplitude variables - - model.var_x_amp_neg_in = pyo.Var(model.set_IN_dim_neg, - within=pyo.NonNegativeReals) - + + model.var_x_amp_neg_in = pyo.Var(model.set_IN_dim_neg, within=pyo.NonNegativeReals) + # positive state variation - - model.var_delta_x_pos_var_in = pyo.Var(model.set_IN_pos_var, - within=pyo.NonNegativeReals) - + + model.var_delta_x_pos_var_in = pyo.Var( + model.set_IN_pos_var, within=pyo.NonNegativeReals + ) + # negative state variation - - model.var_delta_x_neg_var_in = pyo.Var(model.set_IN_neg_var, - within=pyo.NonNegativeReals) - + + model.var_delta_x_neg_var_in = pyo.Var( + model.set_IN_neg_var, within=pyo.NonNegativeReals + ) + # positive reference state violation - - model.var_delta_x_ref_u_inqk = pyo.Var(model.set_IN_ref_u, - model.set_QK, - within=pyo.NonNegativeReals) - + + model.var_delta_x_ref_u_inqk = pyo.Var( + model.set_IN_ref_u, model.set_QK, within=pyo.NonNegativeReals + ) + # negative reference state violation - - model.var_delta_x_ref_d_inqk = pyo.Var(model.set_IN_ref_d, - model.set_QK, - within=pyo.NonNegativeReals) - + + model.var_delta_x_ref_d_inqk = pyo.Var( + model.set_IN_ref_d, model.set_QK, within=pyo.NonNegativeReals + ) + # ************************************************************************* # ************************************************************************* # ************************************************************************* # ************************************************************************* - + # objective function # ************************************************************************* # ************************************************************************* - + # maximise npv - + def obj_f_rule(m): return ( - sum((m.var_sdncf_q[q]+m.var_sdext_q[q])*m.param_c_wgt_q[q] - for q in m.set_Q) - -m.var_capex + sum( + (m.var_sdncf_q[q] + m.var_sdext_q[q]) * m.param_c_wgt_q[q] + for q in m.set_Q ) + - m.var_capex + ) + model.obj_f = pyo.Objective(rule=obj_f_rule, sense=pyo.maximize) - + + # ************************************************************************* # ************************************************************************* - # ************************************************************************* # ************************************************************************* # ************************************************************************* # Constraints - + # ************************************************************************* # ************************************************************************* - + # objective function - + # ************************************************************************* - + # opex - + # sum of discounted net cash flows - + def rule_sdncf_q(m, q): return ( sum( - m.param_c_df_qp[(q,p)]* - sum( - m.param_c_time_qpk[(q,p,k)]* - sum( + m.param_c_df_qp[(q, p)] + * sum( + m.param_c_time_qpk[(q, p, k)] + * sum( sum( - m.var_efr_glqpk[(g,l_exp,q,p,k)] + m.var_efr_glqpk[(g, l_exp, q, p, k)] for l_exp in m.set_L_exp[g] - ) - - - sum( - m.var_ifc_glqpk[(g,l_imp,q,p,k)] + ) + - sum( + m.var_ifc_glqpk[(g, l_imp, q, p, k)] for l_imp in m.set_L_imp[g] - ) - for g in m.set_G ) - for k in m.set_K_q[q] + for g in m.set_G ) + for k in m.set_K_q[q] + ) for p in m.set_P_q[q] - ) == m.var_sdncf_q[q] ) + == m.var_sdncf_q[q] + ) + model.constr_sdncf_q = pyo.Constraint(model.set_Q, rule=rule_sdncf_q) - + # exported flow revenue - + def rule_constr_exp_flow_revenue(m, g, l, q, p, k): - return ( - sum( - m.var_ef_glqpks[(g,l,q,p,k,s)]* - m.param_p_glqpks[(g,l,q,p,k,s)] - for s in m.set_S[(g,l,q,p,k)] - ) == m.var_efr_glqpk[(g,l,q,p,k)] + return ( + sum( + m.var_ef_glqpks[(g, l, q, p, k, s)] + * m.param_p_glqpks[(g, l, q, p, k, s)] + for s in m.set_S[(g, l, q, p, k)] ) - model.constr_exp_flow_revenue = pyo.Constraint( - model.set_GL_exp, - model.set_QPK, - rule=rule_constr_exp_flow_revenue + == m.var_efr_glqpk[(g, l, q, p, k)] ) - + + model.constr_exp_flow_revenue = pyo.Constraint( + model.set_GL_exp, model.set_QPK, rule=rule_constr_exp_flow_revenue + ) + # imported flow cost - + def rule_constr_imp_flow_cost(m, g, l, q, p, k): - return ( - sum( - m.var_if_glqpks[(g,l,q,p,k,s)]* - m.param_p_glqpks[(g,l,q,p,k,s)] - for s in m.set_S[(g,l,q,p,k)] - ) == m.var_ifc_glqpk[(g,l,q,p,k)] + return ( + sum( + m.var_if_glqpks[(g, l, q, p, k, s)] + * m.param_p_glqpks[(g, l, q, p, k, s)] + for s in m.set_S[(g, l, q, p, k)] ) - model.constr_imp_flow_cost = pyo.Constraint( - model.set_GL_imp, - model.set_QPK, - rule=rule_constr_imp_flow_cost + == m.var_ifc_glqpk[(g, l, q, p, k)] ) - + + model.constr_imp_flow_cost = pyo.Constraint( + model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flow_cost + ) + # exported flows - + def rule_constr_exp_flows(m, g, l, q, p, k): - return ( - sum( - m.var_v_glljqk[(g,l_star,l,j,q,k)]* - m.param_eta_glljqk[(g,l_star,l,j,q,k)] - for l_star in m.set_L[g] - if l_star not in m.set_L_exp[g] - for j in m.set_J[(g,l_star,l)] # only directed arcs - ) == sum( - m.var_ef_glqpks[(g,l,q,p,k,s)] - for s in m.set_S[(g,l,q,p,k)] - ) - ) - model.constr_exp_flows = pyo.Constraint(model.set_GL_exp, - model.set_QPK, - rule=rule_constr_exp_flows) + return sum( + m.var_v_glljqk[(g, l_star, l, j, q, k)] + * m.param_eta_glljqk[(g, l_star, l, j, q, k)] + for l_star in m.set_L[g] + if l_star not in m.set_L_exp[g] + for j in m.set_J[(g, l_star, l)] # only directed arcs + ) == sum(m.var_ef_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)]) + + model.constr_exp_flows = pyo.Constraint( + model.set_GL_exp, model.set_QPK, rule=rule_constr_exp_flows + ) # imported flows - + def rule_constr_imp_flows(m, g, l, q, p, k): - return ( - sum( - m.var_v_glljqk[(g,l,l_star,j,q,k)] - for l_star in m.set_L[g] - if l_star not in m.set_L_imp[g] - for j in m.set_J[(g,l,l_star)] # only directed arcs - ) == sum( - m.var_if_glqpks[(g,l,q,p,k,s)] - for s in m.set_S[(g,l,q,p,k)] - ) - ) - model.constr_imp_flows = pyo.Constraint(model.set_GL_imp, - model.set_QPK, - rule=rule_constr_imp_flows) - + return sum( + m.var_v_glljqk[(g, l, l_star, j, q, k)] + for l_star in m.set_L[g] + if l_star not in m.set_L_imp[g] + for j in m.set_J[(g, l, l_star)] # only directed arcs + ) == sum(m.var_if_glqpks[(g, l, q, p, k, s)] for s in m.set_S[(g, l, q, p, k)]) + + model.constr_imp_flows = pyo.Constraint( + model.set_GL_imp, model.set_QPK, rule=rule_constr_imp_flows + ) + # ************************************************************************* - + # sum of discounted externalities - + def rule_sdext_q(m, q): return ( sum( - m.param_c_df_qp[(q,p)]*( - sum( - m.param_c_time_qpk[(q,p,k)]* + m.param_c_df_qp[(q, p)] + * ( sum( - m.var_u_imqk[(i, m_i, q, k)]* - m.param_c_ext_u_imqk[(i, m_i, q, k)] - for (i, m_i) in m.set_IM_ext + m.param_c_time_qpk[(q, p, k)] + * sum( + m.var_u_imqk[(i, m_i, q, k)] + * m.param_c_ext_u_imqk[(i, m_i, q, k)] + for (i, m_i) in m.set_IM_ext ) - + - sum( - m.var_x_inqk[(i, n_i, q, k)]* - m.param_c_ext_x_inqk[(i, n_i, q, k)] - for (i, n_i) in m.set_IN_ext + + sum( + m.var_x_inqk[(i, n_i, q, k)] + * m.param_c_ext_x_inqk[(i, n_i, q, k)] + for (i, n_i) in m.set_IN_ext ) - + - sum( - m.var_y_irqk[(i, r_i, q, k)]* - m.param_c_ext_y_irqk[(i, r_i, q, k)] - for (i, r_i) in m.set_IR_ext + + sum( + m.var_y_irqk[(i, r_i, q, k)] + * m.param_c_ext_y_irqk[(i, r_i, q, k)] + for (i, r_i) in m.set_IR_ext ) - + - sum( - m.var_delta_x_ref_d_inqk[(i, n_i, q, k)]* - m.param_c_ref_d_inqk[(i, n_i, q, k)] - for (i, n_i) in m.set_IN_ref_d + + sum( + m.var_delta_x_ref_d_inqk[(i, n_i, q, k)] + * m.param_c_ref_d_inqk[(i, n_i, q, k)] + for (i, n_i) in m.set_IN_ref_d ) - + - sum( - m.var_delta_x_ref_u_inqk[(i, n_i, q, k)]* - m.param_c_ref_u_inqk[(i, n_i, q, k)] - for (i, n_i) in m.set_IN_ref_u + + sum( + m.var_delta_x_ref_u_inqk[(i, n_i, q, k)] + * m.param_c_ref_u_inqk[(i, n_i, q, k)] + for (i, n_i) in m.set_IN_ref_u ) - for k in m.set_K_q[q] + for k in m.set_K_q[q] ) - + - sum( - m.param_c_pos_var_in[(i,n_i)]* - m.var_delta_x_pos_var_in[(i,n_i)] - for (i,n_i) in m.set_IN_pos_var + + sum( + m.param_c_pos_var_in[(i, n_i)] + * m.var_delta_x_pos_var_in[(i, n_i)] + for (i, n_i) in m.set_IN_pos_var ) - + - sum( - m.param_c_neg_var_in[(i,n_i)]* - m.var_delta_x_neg_var_in[(i,n_i)] - for (i,n_i) in m.set_IN_neg_var + + sum( + m.param_c_neg_var_in[(i, n_i)] + * m.var_delta_x_neg_var_in[(i, n_i)] + for (i, n_i) in m.set_IN_neg_var ) ) for p in m.set_P_q[q] - ) == m.var_sdext_q[q] ) + == m.var_sdext_q[q] + ) + model.constr_sdext_q = pyo.Constraint(model.set_Q, rule=rule_sdext_q) - + # ************************************************************************* - + # capex - + def rule_capex(m): return ( sum( - m.var_capex_arc_gllj[(g,l1,l2,j)] - for (g,l1,l2,j) in m.set_GLLJ_sgl - ) - + - sum( - m.var_capex_arc_col_t[t] - for t in m.set_T - ) - + - sum( - m.var_capex_cvt_i[i] - for i in m.set_I_new - ) <= m.var_capex + m.var_capex_arc_gllj[(g, l1, l2, j)] + for (g, l1, l2, j) in m.set_GLLJ_sgl ) + + sum(m.var_capex_arc_col_t[t] for t in m.set_T) + + sum(m.var_capex_cvt_i[i] for i in m.set_I_new) + <= m.var_capex + ) + model.constr_capex = pyo.Constraint(rule=rule_capex) - + # capex for arcs - - def rule_constr_capex_arcs(m,g,l1,l2,j): + + def rule_constr_capex_arcs(m, g, l1, l2, j): return ( - m.var_v_amp_gllj[(g,l1,l2,j)]* - m.param_c_arc_var_gllj[(g,l1,l2,j)] - + - sum(m.var_delta_arc_inv_glljh[(g,l1,l2,j,h)]* - m.param_c_arc_min_glljh[(g,l1,l2,j,h)] - for h in m.set_H_gllj[(g,l1,l2,j)] - ) <= m.var_capex_arc_gllj[(g,l1,l2,j)] + m.var_v_amp_gllj[(g, l1, l2, j)] * m.param_c_arc_var_gllj[(g, l1, l2, j)] + + sum( + m.var_delta_arc_inv_glljh[(g, l1, l2, j, h)] + * m.param_c_arc_min_glljh[(g, l1, l2, j, h)] + for h in m.set_H_gllj[(g, l1, l2, j)] ) + <= m.var_capex_arc_gllj[(g, l1, l2, j)] + ) + model.constr_capex_arcs = pyo.Constraint( - model.set_GLLJ_sgl, # - rule=rule_constr_capex_arcs) - + model.set_GLLJ_sgl, rule=rule_constr_capex_arcs # + ) + # capex for groups - - def rule_constr_capex_arcs_col(m,t): + + def rule_constr_capex_arcs_col(m, t): return ( - m.var_v_amp_t[t]* - m.param_c_arc_var_t[t] - + - sum(m.var_delta_arc_inv_th[(t,h)]* - m.param_c_arc_min_th[(t,h)] + m.var_v_amp_t[t] * m.param_c_arc_var_t[t] + + sum( + m.var_delta_arc_inv_th[(t, h)] * m.param_c_arc_min_th[(t, h)] for h in m.set_H_t[t] - ) <= m.var_capex_arc_col_t[t] ) + <= m.var_capex_arc_col_t[t] + ) + model.constr_capex_arcs_col = pyo.Constraint( - model.set_T, - rule=rule_constr_capex_arcs_col) - + model.set_T, rule=rule_constr_capex_arcs_col + ) + # capex for individual systems - - def rule_capex_converter(m,i): + + def rule_capex_converter(m, i): return ( - m.var_cvt_inv_i[i]* - m.param_c_cvt_min_i[i] - + - sum(m.var_u_amp_im[(i,m_i)]* - m.param_c_cvt_u_im[(i,m_i)] - for m_i in m.set_M_dim_i[i]) - + - sum(m.var_x_amp_pos_in[(i,n)]* - m.param_c_cvt_x_pos_in[(i,n)] - for n in m.set_N_dim_pos[i]) - + - sum(m.var_x_amp_neg_in[(i,n)]* - m.param_c_cvt_x_neg_in[(i,n)] - for n in m.set_N_dim_neg[i]) - + - sum(m.var_y_amp_pos_ir[(i,r)]* - m.param_c_cvt_y_pos_ir[(i,r)] - for r in m.set_N_dim_pos[i]) - + - sum(m.var_y_amp_neg_ir[(i,r)]* - m.param_c_cvt_y_neg_ir[(i,r)] - for r in m.set_N_dim_neg[i]) <= m.var_capex_cvt_i[i] + m.var_cvt_inv_i[i] * m.param_c_cvt_min_i[i] + + sum( + m.var_u_amp_im[(i, m_i)] * m.param_c_cvt_u_im[(i, m_i)] + for m_i in m.set_M_dim_i[i] + ) + + sum( + m.var_x_amp_pos_in[(i, n)] * m.param_c_cvt_x_pos_in[(i, n)] + for n in m.set_N_dim_pos[i] + ) + + sum( + m.var_x_amp_neg_in[(i, n)] * m.param_c_cvt_x_neg_in[(i, n)] + for n in m.set_N_dim_neg[i] + ) + + sum( + m.var_y_amp_pos_ir[(i, r)] * m.param_c_cvt_y_pos_ir[(i, r)] + for r in m.set_N_dim_pos[i] ) - model.constr_capex_system = pyo.Constraint(model.set_I_new, - rule=rule_capex_converter) - + + sum( + m.var_y_amp_neg_ir[(i, r)] * m.param_c_cvt_y_neg_ir[(i, r)] + for r in m.set_N_dim_neg[i] + ) + <= m.var_capex_cvt_i[i] + ) + + model.constr_capex_system = pyo.Constraint( + model.set_I_new, rule=rule_capex_converter + ) + # ************************************************************************* # ************************************************************************* - + # network # ************************************************************************* # ************************************************************************* - + # flow equilibrium equation - - def rule_node_balance(m,g,l,q,k): + + def rule_node_balance(m, g, l, q, k): return ( - m.param_v_base_glqk[(g,l,q,k)], + m.param_v_base_glqk[(g, l, q, k)], # incoming arcs, default direction sum( sum( - m.param_eta_glljqk[(g,l2,l,j,q,k)]* - m.var_v_glljqk[(g,l2,l,j,q,k)] - for j in m.set_J[(g,l2,l)] - ) - for l2 in m.set_L[g]-m.set_L_exp[g] if l!=l2 + m.param_eta_glljqk[(g, l2, l, j, q, k)] + * m.var_v_glljqk[(g, l2, l, j, q, k)] + for j in m.set_J[(g, l2, l)] ) + for l2 in m.set_L[g] - m.set_L_exp[g] + if l != l2 + ) + # incoming undirected arcs, reverse direction sum( sum( - m.param_eta_glljqk[(g,l2,l,j,q,k)]* - m.var_v_glljqk[(g,l2,l,j,q,k)] - for j in m.set_J_und[(g,l,l2)] - ) - for l2 in m.set_L[g]-m.set_L_exp[g]-m.set_L_imp[g] if l!=l2 + m.param_eta_glljqk[(g, l2, l, j, q, k)] + * m.var_v_glljqk[(g, l2, l, j, q, k)] + for j in m.set_J_und[(g, l, l2)] ) + for l2 in m.set_L[g] - m.set_L_exp[g] - m.set_L_imp[g] + if l != l2 + ) - # outgoing directed arcs, default direction sum( - sum( - m.var_v_glljqk[(g,l,l2,j,q,k)] - for j in m.set_J[(g,l,l2)] - ) - for l2 in m.set_L[g]-m.set_L_imp[g] if l!=l2 - ) + sum(m.var_v_glljqk[(g, l, l2, j, q, k)] for j in m.set_J[(g, l, l2)]) + for l2 in m.set_L[g] - m.set_L_imp[g] + if l != l2 + ) - # outgoing directed arcs, reverse direction sum( sum( - m.var_v_glljqk[(g,l,l2,j,q,k)] - for j in m.set_J_und[(g,l2,l)] - ) - for l2 in m.set_L[g]-m.set_L_imp[g]-m.set_L_exp[g] if l!=l2 + m.var_v_glljqk[(g, l, l2, j, q, k)] for j in m.set_J_und[(g, l2, l)] ) + for l2 in m.set_L[g] - m.set_L_imp[g] - m.set_L_exp[g] + if l != l2 + ) # static losses placed downstream for new directed incoming arcs - - - sum( + - sum( sum( - m.param_eta_glljqk[(g,l2,l,j,q,k)]*( - m.var_w_glljqk[(g,l2,l,j,q,k)] - if j not in m.set_J_pre[(g,l2,l)] else - m.param_w_pre_glljqk[(g,l2,l,j,q,k)]) - for j in m.set_J_stt_arr[(g,l2,l)] - if j not in m.set_J_und[(g,l2,l)] # directed arcs only + m.param_eta_glljqk[(g, l2, l, j, q, k)] + * ( + m.var_w_glljqk[(g, l2, l, j, q, k)] + if j not in m.set_J_pre[(g, l2, l)] + else m.param_w_pre_glljqk[(g, l2, l, j, q, k)] ) - for l2 in m.set_L[g]-m.set_L_exp[g] if l!=l2 + for j in m.set_J_stt_arr[(g, l2, l)] + if j not in m.set_J_und[(g, l2, l)] # directed arcs only ) + for l2 in m.set_L[g] - m.set_L_exp[g] + if l != l2 + ) # static losses placed upstream for new directed outgoing arcs - - - sum( + - sum( sum( - (m.var_w_glljqk[(g,l,l2,j,q,k)] - if j not in m.set_J_pre[(g,l,l2)] else - m.param_w_pre_glljqk[(g,l,l2,j,q,k)]) - for j in m.set_J_stt[(g,l,l2)] - if j not in m.set_J_stt_arr[(g,l,l2)] - if j not in m.set_J_und[(g,l,l2)] # directed arcs only + ( + m.var_w_glljqk[(g, l, l2, j, q, k)] + if j not in m.set_J_pre[(g, l, l2)] + else m.param_w_pre_glljqk[(g, l, l2, j, q, k)] ) - for l2 in m.set_L[g]-m.set_L_imp[g] if l!=l2 + for j in m.set_J_stt[(g, l, l2)] + if j not in m.set_J_stt_arr[(g, l, l2)] + if j not in m.set_J_und[(g, l, l2)] # directed arcs only ) + for l2 in m.set_L[g] - m.set_L_imp[g] + if l != l2 + ) # static losses for undirected arcs, incoming (a and ds) # static losses for undirected arcs, outgoing (a and us) - - - sum( + - sum( # nominal direction sum( - m.param_eta_glljqk[(g,l2,l,j,q,k)]* - m.var_w_sns_glljqk[(g,l2,l,j,q,k)] - for j in m.set_J_und[(g,l,l2)] - if (j in m.set_J_stt_dep[(g,l,l2)] or - j in m.set_J_stt_ds[(g,l,l2)]) + m.param_eta_glljqk[(g, l2, l, j, q, k)] + * m.var_w_sns_glljqk[(g, l2, l, j, q, k)] + for j in m.set_J_und[(g, l, l2)] + if ( + j in m.set_J_stt_dep[(g, l, l2)] + or j in m.set_J_stt_ds[(g, l, l2)] ) + ) + # reverse direction sum( - m.var_w_sns_glljqk[(g,l,l2,j,q,k)] - for j in m.set_J_und[(g,l,l2)] - if (j in m.set_J_stt_dep[(g,l,l2)] or - j in m.set_J_stt_us[(g,l,l2)]) + m.var_w_sns_glljqk[(g, l, l2, j, q, k)] + for j in m.set_J_und[(g, l, l2)] + if ( + j in m.set_J_stt_dep[(g, l, l2)] + or j in m.set_J_stt_us[(g, l, l2)] ) - for l2 in m.set_L[g]-m.set_L_exp[g]-m.set_L_imp[g] if l!=l2 ) + for l2 in m.set_L[g] - m.set_L_exp[g] - m.set_L_imp[g] + if l != l2 + ) # static losses for undirected arcs, incoming (b and ds) # static losses for undirected arcs, outgoing (b and us) - - - sum( + - sum( # nominal direction sum( - m.param_eta_glljqk[(g,l2,l,j,q,k)]* - m.var_w_sns_glljqk[(g,l2,l,j,q,k)] - for j in m.set_J_und[(g,l2,l)] - if (j in m.set_J_stt_arr[(g,l2,l)] or - j in m.set_J_stt_ds[(g,l2,l)]) + m.param_eta_glljqk[(g, l2, l, j, q, k)] + * m.var_w_sns_glljqk[(g, l2, l, j, q, k)] + for j in m.set_J_und[(g, l2, l)] + if ( + j in m.set_J_stt_arr[(g, l2, l)] + or j in m.set_J_stt_ds[(g, l2, l)] ) + ) + # reverse direction sum( - m.var_w_sns_glljqk[(g,l,l2,j,q,k)] - for j in m.set_J_und[(g,l2,l)] - if (j in m.set_J_stt_arr[(g,l2,l)] or - j in m.set_J_stt_us[(g,l2,l)]) + m.var_w_sns_glljqk[(g, l, l2, j, q, k)] + for j in m.set_J_und[(g, l2, l)] + if ( + j in m.set_J_stt_arr[(g, l2, l)] + or j in m.set_J_stt_us[(g, l2, l)] ) - for l2 in m.set_L[g]-m.set_L_exp[g]-m.set_L_imp[g] if l!=l2 ) + for l2 in m.set_L[g] - m.set_L_exp[g] - m.set_L_imp[g] + if l != l2 + ) # converters - + - sum(m.var_y_irqk[(i,r,q,k)]* - m.param_a_nw_glirqk[(g,l,i,r,q,k)] + + sum( + m.var_y_irqk[(i, r, q, k)] * m.param_a_nw_glirqk[(g, l, i, r, q, k)] for (i, r) in m.set_IR - ) - + - sum(m.var_u_imqk[(i,m_i,q,k)]* - m.param_a_nw_glimqk[(g,l,i,m_i,q,k)] + ) + + sum( + m.var_u_imqk[(i, m_i, q, k)] * m.param_a_nw_glimqk[(g, l, i, m_i, q, k)] for (i, m_i) in m.set_IM - ), - m.param_v_base_glqk[(g,l,q,k)]) - model.constr_node_balance = pyo.Constraint(model.set_GL_not_exp_imp, - model.set_QK, - rule=rule_node_balance) - - # ************************************************************************* - + ), + m.param_v_base_glqk[(g, l, q, k)], + ) + + model.constr_node_balance = pyo.Constraint( + model.set_GL_not_exp_imp, model.set_QK, rule=rule_node_balance + ) + + # ************************************************************************* + # limit number of directed arcs per direction - - def rule_constr_limited_parallel_arcs_per_direction(m,g,l1,l2): - + + def rule_constr_limited_parallel_arcs_per_direction(m, g, l1, l2): # cases: # 1) the number of options is lower than or equal to the limit (skip) - # 2) the number of preexisting and new mandatory arcs exceeds + # 2) the number of preexisting and new mandatory arcs exceeds # the limit (infeasible: pyo.Constraint.Infeasible) # 3) all other cases (constraint) - - - + # number of preexisting arcs going from l1 to l2 - - number_arcs_pre_nom = len(m.set_J_pre[(g,l1,l2)] - ) if (g,l1,l2) in m.set_J_pre else 0 - + + number_arcs_pre_nom = ( + len(m.set_J_pre[(g, l1, l2)]) if (g, l1, l2) in m.set_J_pre else 0 + ) + number_arcs_pre_rev = ( - sum(1 for j in m.set_J_pre[(g,l2,l1)] - if j in m.set_J_und[(g,l2,l1)]) - if (g,l2,l1) in m.set_J_pre else 0 - ) - + sum(1 for j in m.set_J_pre[(g, l2, l1)] if j in m.set_J_und[(g, l2, l1)]) + if (g, l2, l1) in m.set_J_pre + else 0 + ) + # number of mandatory arcs going from l1 to l2 - - number_arcs_mdt_nom = len(m.set_J_mdt[(g,l1,l2)] - ) if (g,l1,l2) in m.set_J_mdt else 0 - + + number_arcs_mdt_nom = ( + len(m.set_J_mdt[(g, l1, l2)]) if (g, l1, l2) in m.set_J_mdt else 0 + ) + number_arcs_mdt_rev = ( - sum(1 for j in m.set_J_mdt[(g,l2,l1)] - if j in m.set_J_und[(g,l2,l1)]) - if (g,l2,l1) in m.set_J_mdt else 0 - ) - + sum(1 for j in m.set_J_mdt[(g, l2, l1)] if j in m.set_J_und[(g, l2, l1)]) + if (g, l2, l1) in m.set_J_mdt + else 0 + ) + # number of optional arcs going from l1 to l2 - + number_arcs_opt_nom = ( - sum(1 - for j in m.set_J[(g,l1,l2)] - if j not in m.set_J_pre[(g,l1,l2)] - if j not in m.set_J_mdt[(g,l1,l2)]) - if (g,l1,l2) in m.set_J else 0 + sum( + 1 + for j in m.set_J[(g, l1, l2)] + if j not in m.set_J_pre[(g, l1, l2)] + if j not in m.set_J_mdt[(g, l1, l2)] ) - + if (g, l1, l2) in m.set_J + else 0 + ) + number_arcs_opt_rev = ( - sum(1 - for j in m.set_J[(g,l2,l1)] - if j not in m.set_J_pre[(g,l2,l1)] - if j not in m.set_J_mdt[(g,l2,l1)] - if j in m.set_J_und[(g,l2,l1)]) - if (g,l2,l1) in m.set_J else 0 + sum( + 1 + for j in m.set_J[(g, l2, l1)] + if j not in m.set_J_pre[(g, l2, l1)] + if j not in m.set_J_mdt[(g, l2, l1)] + if j in m.set_J_und[(g, l2, l1)] ) - + if (g, l2, l1) in m.set_J + else 0 + ) + # build the constraints - - if (number_arcs_mdt_nom+number_arcs_mdt_rev+ - number_arcs_pre_nom+number_arcs_pre_rev > - m.param_max_number_parallel_arcs[(g,l1,l2)]): - + + if ( + number_arcs_mdt_nom + + number_arcs_mdt_rev + + number_arcs_pre_nom + + number_arcs_pre_rev + > m.param_max_number_parallel_arcs[(g, l1, l2)] + ): # the number of unavoidable arcs already exceeds the limit - + return pyo.Constraint.Infeasible - - elif (number_arcs_opt_nom+number_arcs_opt_rev+ - number_arcs_mdt_nom+number_arcs_mdt_rev+ - number_arcs_pre_nom+number_arcs_pre_rev > - m.param_max_number_parallel_arcs[(g,l1,l2)]): - + + elif ( + number_arcs_opt_nom + + number_arcs_opt_rev + + number_arcs_mdt_nom + + number_arcs_mdt_rev + + number_arcs_pre_nom + + number_arcs_pre_rev + > m.param_max_number_parallel_arcs[(g, l1, l2)] + ): # the number of potential arcs exceeds the limit: cannot be skipped - + return ( # preexisting arcs - number_arcs_pre_nom+number_arcs_pre_rev - + + number_arcs_pre_nom + number_arcs_pre_rev + # mandatory arcs - number_arcs_mdt_nom+number_arcs_mdt_rev - + + number_arcs_mdt_nom + number_arcs_mdt_rev + # arcs within an (optional) group that uses interfaces sum( - (sum(1 - for j in m.set_J_col[(g,l1,l2)] - if (g,l1,l2,j) in m.set_GLLJ_col_t[t] - ) if (g,l1,l2) in m.set_J_col else 0 - + - sum(1 - for j in m.set_J_col[(g,l2,l1)] - if j in m.set_J_und[(g,l2,l1)] - if (g,l2,l1,j) in m.set_GLLJ_col_t[t] - ) if ((g,l2,l1) in m.set_J_col and - (g,l2,l1) in m.set_J_und) else 0 - )*m.var_xi_arc_inv_t[t] - for t in m.set_T_int + ( + sum( + 1 + for j in m.set_J_col[(g, l1, l2)] + if (g, l1, l2, j) in m.set_GLLJ_col_t[t] + ) + if (g, l1, l2) in m.set_J_col + else 0 + + sum( + 1 + for j in m.set_J_col[(g, l2, l1)] + if j in m.set_J_und[(g, l2, l1)] + if (g, l2, l1, j) in m.set_GLLJ_col_t[t] + ) + if ((g, l2, l1) in m.set_J_col and (g, l2, l1) in m.set_J_und) + else 0 ) + * m.var_xi_arc_inv_t[t] + for t in m.set_T_int + ) + # arcs within an (optional) group that does not use interfaces sum( - (sum(1 - for j in m.set_J_col[(g,l1,l2)] - if (g,l1,l2,j) in m.set_GLLJ_col_t[t] - ) if (g,l1,l2) in m.set_J_col else 0 - + - sum(1 - for j in m.set_J_col[(g,l2,l1)] - if j in m.set_J_und[(g,l2,l1)] - if (g,l2,l1,j) in m.set_GLLJ_col_t[t] - ) if ((g,l2,l1) in m.set_J_col and - (g,l2,l1) in m.set_J_und) else 0 - )*sum(m.var_delta_arc_inv_th[(t,h)] - for h in m.set_H_t[t]) - for t in m.set_T # new - if t not in m.set_T_mdt # optional - if t not in m.set_T_int # not interfaced + ( + sum( + 1 + for j in m.set_J_col[(g, l1, l2)] + if (g, l1, l2, j) in m.set_GLLJ_col_t[t] + ) + if (g, l1, l2) in m.set_J_col + else 0 + + sum( + 1 + for j in m.set_J_col[(g, l2, l1)] + if j in m.set_J_und[(g, l2, l1)] + if (g, l2, l1, j) in m.set_GLLJ_col_t[t] + ) + if ((g, l2, l1) in m.set_J_col and (g, l2, l1) in m.set_J_und) + else 0 ) + * sum(m.var_delta_arc_inv_th[(t, h)] for h in m.set_H_t[t]) + for t in m.set_T # new + if t not in m.set_T_mdt # optional + if t not in m.set_T_int # not interfaced + ) + # optional individual arcs using interfaces, nominal direction - sum(m.var_xi_arc_inv_gllj[(g,l1,l2,j)] - for j in m.set_J_int[(g,l1,l2)] # interfaced - if j not in m.set_J_col[(g,l1,l2)] # individual - ) if (g,l1,l2) in m.set_J_int else 0 - + + sum( + m.var_xi_arc_inv_gllj[(g, l1, l2, j)] + for j in m.set_J_int[(g, l1, l2)] # interfaced + if j not in m.set_J_col[(g, l1, l2)] # individual + ) + if (g, l1, l2) in m.set_J_int + else 0 + # optional individual arcs using interfaces, reverse direction - sum(m.var_xi_arc_inv_gllj[(g,l2,l1,j)] - for j in m.set_J_int[(g,l2,l1)] # interfaced - if j in m.set_J_und[(g,l2,l1)] # undirected - if j not in m.set_J_col[(g,l1,l2)] # individual - ) if ((g,l2,l1) in m.set_J_int and - (g,l2,l1) in m.set_J_und) else 0 - + + sum( + m.var_xi_arc_inv_gllj[(g, l2, l1, j)] + for j in m.set_J_int[(g, l2, l1)] # interfaced + if j in m.set_J_und[(g, l2, l1)] # undirected + if j not in m.set_J_col[(g, l1, l2)] # individual + ) + if ((g, l2, l1) in m.set_J_int and (g, l2, l1) in m.set_J_und) + else 0 + # optional individual arcs not using interfaces, nominal dir. sum( - sum(m.var_delta_arc_inv_glljh[(g,l1,l2,j,h)] - for h in m.set_H_gllj[(g,l1,l2,j)] - ) - for j in m.set_J[(g,l1,l2)] - if j not in m.set_J_pre[(g,l1,l2)] # not preexisting - if j not in m.set_J_mdt[(g,l1,l2)] # not mandatory - if j not in m.set_J_int[(g,l1,l2)] # not interfaced - if j not in m.set_J_col[(g,l1,l2)] # individual - ) if (g,l1,l2) in m.set_J else 0 - + + sum( + m.var_delta_arc_inv_glljh[(g, l1, l2, j, h)] + for h in m.set_H_gllj[(g, l1, l2, j)] + ) + for j in m.set_J[(g, l1, l2)] + if j not in m.set_J_pre[(g, l1, l2)] # not preexisting + if j not in m.set_J_mdt[(g, l1, l2)] # not mandatory + if j not in m.set_J_int[(g, l1, l2)] # not interfaced + if j not in m.set_J_col[(g, l1, l2)] # individual + ) + if (g, l1, l2) in m.set_J + else 0 + # optional individual arcs not using interfaces, reverse dir. sum( - sum(m.var_delta_arc_inv_glljh[(g,l2,l1,j,h)] - for h in m.set_H_gllj[(g,l2,l1,j)] - ) - for j in m.set_J_opt[(g,l2,l1)] - if j in m.set_J_und[(g,l2,l1)] - if j not in m.set_J_pre[(g,l2,l1)] # not preexisting - if j not in m.set_J_mdt[(g,l2,l1)] # not mandatory - if j not in m.set_J_int[(g,l2,l1)] # not interfaced - if j not in m.set_J_col[(g,l2,l1)] # individual - ) if (g,l2,l1) in m.set_J else 0 - <= m.param_max_number_parallel_arcs[(g,l1,l2)] + sum( + m.var_delta_arc_inv_glljh[(g, l2, l1, j, h)] + for h in m.set_H_gllj[(g, l2, l1, j)] + ) + for j in m.set_J_opt[(g, l2, l1)] + if j in m.set_J_und[(g, l2, l1)] + if j not in m.set_J_pre[(g, l2, l1)] # not preexisting + if j not in m.set_J_mdt[(g, l2, l1)] # not mandatory + if j not in m.set_J_int[(g, l2, l1)] # not interfaced + if j not in m.set_J_col[(g, l2, l1)] # individual ) - - else: # the number of options is lower than or equal to the limit: skip - + if (g, l2, l1) in m.set_J + else 0 <= m.param_max_number_parallel_arcs[(g, l1, l2)] + ) + + else: # the number of options is lower than or equal to the limit: skip return pyo.Constraint.Skip - + model.constr_limited_parallel_arcs_per_direction = pyo.Constraint( - model.set_GLL_arc_max, - rule=rule_constr_limited_parallel_arcs_per_direction) - + model.set_GLL_arc_max, rule=rule_constr_limited_parallel_arcs_per_direction + ) + # ************************************************************************* - + # there can only one incoming arc at most, if there are no outgoing arcs - - def rule_constr_max_incoming_directed_arcs(m,g,l): - + + def rule_constr_max_incoming_directed_arcs(m, g, l): # check if the constraint should be ignored - + if l not in m.set_L_max_in_g[g]: - return pyo.Constraint.Skip - + # max number of directed incoming arcs - - n_max_dir_in = sum( - sum(1 - for j in m.set_J[(g,l_line,l)] - if j not in m.set_J_und[(g,l_line,l)]) # directed + + n_max_dir_in = sum( + sum( + 1 + for j in m.set_J[(g, l_line, l)] + if j not in m.set_J_und[(g, l_line, l)] + ) # directed for l_line in m.set_L[g] if l_line != l if l_line not in m.set_L_imp[g] - if (g,l_line,l) in m.set_J - ) - + if (g, l_line, l) in m.set_J + ) + # check the maximum number of incoming arcs - + if n_max_dir_in <= 1: - # there can only be one incoming arc at most: redundant constraint - + return pyo.Constraint.Skip - - else: # more than one incoming arc is possible - + + else: # more than one incoming arc is possible # ***************************************************************** - + # number of (new) incoming directed arcs in a group - + # ***************************************************************** - + b_max_in_gl = 0 - + # the big m - - M_gl = n_max_dir_in-1 # has to be positive since n_max_dir_in > 1 - + + M_gl = n_max_dir_in - 1 # has to be positive since n_max_dir_in > 1 + temp_constr = ( sum( # ********************************************************* # interfaced groups sum( - sum(1 - for j in m.set_J_col[(g,l_circ,l)] # part of group - if j not in m.set_J_und[(g,l_circ,l)] # directed - if (g,l_circ,l,j) in m.set_GLLJ_col_t[t])* # in t - m.var_xi_arc_inv_t[t] - for t in m.set_T_int + sum( + 1 + for j in m.set_J_col[(g, l_circ, l)] # part of group + if j not in m.set_J_und[(g, l_circ, l)] # directed + if (g, l_circ, l, j) in m.set_GLLJ_col_t[t] ) + * m.var_xi_arc_inv_t[t] # in t + for t in m.set_T_int + ) + # ********************************************************* # optional non-interfaced groups sum( sum( - sum(1 - for j in m.set_J_col[(g,l_circ,l)] # part of group - if j not in m.set_J_und[(g,l_circ,l)] # directed - if (g,l_circ,l,j) in m.set_GLLJ_col_t[t])* - m.var_delta_arc_inv_th[(t,h)] - for h in m.set_H_t[t]) - for t in m.set_T - if t not in m.set_T_mdt # optional - if t not in m.set_T_int # not interfaced + sum( + 1 + for j in m.set_J_col[(g, l_circ, l)] # part of group + if j not in m.set_J_und[(g, l_circ, l)] # directed + if (g, l_circ, l, j) in m.set_GLLJ_col_t[t] + ) + * m.var_delta_arc_inv_th[(t, h)] + for h in m.set_H_t[t] ) + for t in m.set_T + if t not in m.set_T_mdt # optional + if t not in m.set_T_int # not interfaced + ) + # ********************************************************* # interfaced arcs sum( - m.var_xi_arc_inv_gllj[(g,l_circ,l,j_circ)] - for j_circ in m.set_J[(g,l_circ,l)] - if j_circ not in m.set_J_und[(g,l_circ,l)] # directed - if j_circ in m.set_J_int[(g,l_circ,l)] # interfaced - if j_circ not in m.set_J_col[(g,l_circ,l)] # individual - ) if (g,l_circ,l) in m.set_J else 0 - + + m.var_xi_arc_inv_gllj[(g, l_circ, l, j_circ)] + for j_circ in m.set_J[(g, l_circ, l)] + if j_circ not in m.set_J_und[(g, l_circ, l)] # directed + if j_circ in m.set_J_int[(g, l_circ, l)] # interfaced + if j_circ not in m.set_J_col[(g, l_circ, l)] # individual + ) + if (g, l_circ, l) in m.set_J + else 0 + # ********************************************************* # optional non-interfaced arcs sum( - sum(m.var_delta_arc_inv_glljh[(g,l_circ,l,j_dot,h_dot)] - for h_dot in m.set_H_gllj[(g,l_circ,l,j_dot)]) - for j_dot in m.set_J[(g,l_circ,l)] - if j_dot not in m.set_J_und[(g,l_circ,l)] # directed - if j_dot not in m.set_J_int[(g,l_circ,l)] # not interfaced - if j_dot not in m.set_J_col[(g,l_circ,l)] # individual - if j_dot not in m.set_J_mdt[(g,l_circ,l)] # optional - ) if (g,l_circ,l) in m.set_J else 0 - + + sum( + m.var_delta_arc_inv_glljh[(g, l_circ, l, j_dot, h_dot)] + for h_dot in m.set_H_gllj[(g, l_circ, l, j_dot)] + ) + for j_dot in m.set_J[(g, l_circ, l)] + if j_dot not in m.set_J_und[(g, l_circ, l)] # directed + if j_dot not in m.set_J_int[(g, l_circ, l)] # not interfaced + if j_dot not in m.set_J_col[(g, l_circ, l)] # individual + if j_dot not in m.set_J_mdt[(g, l_circ, l)] # optional + ) + if (g, l_circ, l) in m.set_J + else 0 + # ********************************************************* # preexisting directed arcs sum( 1 - for j_pre_dir in m.set_J_pre[(g,l_circ,l)] # preexisting - if j_pre_dir not in m.set_J_und[(g,l_circ,l)] # directed - ) if (g,l_circ,l) in m.set_J_pre else 0 - + + for j_pre_dir in m.set_J_pre[(g, l_circ, l)] # preexisting + if j_pre_dir not in m.set_J_und[(g, l_circ, l)] # directed + ) + if (g, l_circ, l) in m.set_J_pre + else 0 + # ********************************************************* # mandatory directed arcs sum( 1 - for j_mdt_dir in m.set_J_mdt[(g,l_circ,l)] - if j_mdt_dir not in m.set_J_und[(g,l_circ,l)] # directed - ) if (g,l_circ,l) in m.set_J_mdt else 0 + for j_mdt_dir in m.set_J_mdt[(g, l_circ, l)] + if j_mdt_dir not in m.set_J_und[(g, l_circ, l)] # directed + ) + if (g, l_circ, l) in m.set_J_mdt + else 0 # ********************************************************* for l_circ in m.set_L[g] if l_circ not in m.set_L_exp[g] if l_circ != l - ) <= 1 #+ + ) + <= 1 # + # M_gl*sum( # # ********************************************************* # # outgoing arcs in interfaced groups, nominal direction @@ -2856,12 +2823,12 @@ def create_model(name: str, # #if j in m.set_J_int[(g,l,l_diamond)] # if (g,l,l_diamond,j) in m.set_GLLJ_col_t[t] # )*sum( - # m.var_delta_arc_inv_th[(t,h)] + # m.var_delta_arc_inv_th[(t,h)] # for h in m.set_H_t[t] # ) # for t in m.set_T # if t not in m.set_T_mdt - # if t not in m.set_T_int + # if t not in m.set_T_int # ) if (g,l,l_diamond) in m.set_J_col else 0 # + # # TODO: outgoing arcs in non-interfaced optional groups, reverse @@ -2871,12 +2838,12 @@ def create_model(name: str, # if j in m.set_J_und[(g,l_diamond,l)] # if (g,l_diamond,l,j) in m.set_GLLJ_col_t[t] # )*sum( - # m.var_delta_arc_inv_th[(t,h)] + # m.var_delta_arc_inv_th[(t,h)] # for h in m.set_H_t[t] # ) # for t in m.set_T # if t not in m.set_T_mdt - # if t not in m.set_T_int + # if t not in m.set_T_int # ) if (g,l_diamond,l) in m.set_J_col else 0 # + # # ********************************************************* @@ -2895,7 +2862,7 @@ def create_model(name: str, # ) if (g,l_diamond,l) in m.set_J_und else 0 # + # # ********************************************************* - # # outgoing non-interfaced individual optional arcs + # # outgoing non-interfaced individual optional arcs # sum( # sum(m.var_delta_arc_inv_glljh[(g,l,l_diamond,j,h)] # for h in m.set_H_gllj[(g,l,l_diamond,j)]) @@ -2944,120 +2911,116 @@ def create_model(name: str, # if l_diamond not in m.set_L_imp[g] # if l_diamond != l # ) - ) - + ) + if temp_constr == True: - return pyo.Constraint.Feasible - + elif temp_constr == False: - return pyo.Constraint.Infeasible - + else: - return temp_constr - + model.constr_max_incoming_directed_arcs = pyo.Constraint( - model.set_GL_not_exp_imp, - rule=rule_constr_max_incoming_directed_arcs - ) - + model.set_GL_not_exp_imp, rule=rule_constr_max_incoming_directed_arcs + ) + # ************************************************************************* - + # def rule_constr_max_outgoing_directed_arcs(m, g, l): - + # pass - + # model.constr_max_outgoing_directed_arcs = pyo.Constraint( # model.set_GL_not_exp_imp, # rule=rule_constr_max_outgoing_directed_arcs # ) - -# # ************************************************************************* - + + # # ************************************************************************* + # # there can only one outgoing arc at most, if there are no incoming arcs - + # def rule_constr_max_outgoing_arcs(m,g,l): - + # # the number of predefined incoming arcs - + # n_in_pre = sum( # len(m.set_J_pre[(g,l_star,l)]) # = n_in_pre # for l_star in m.set_L[g] # if l_star not in m.set_L_exp[g] # if l_star != l # ) - + # # if there is at least one predefined incoming arc, skip constraint - + # if n_in_pre >= 1: - + # return pyo.Constraint.Skip - + # # the number of non-predefined incoming arcs - + # n_in_opt = sum( # len(m.set_J_new[(g,l_star,l)]) # = n_in_pre # for l_star in m.set_L[g] # if l_star not in m.set_L_exp[g] # if l_star != l # ) - + # n_in_max = n_in_pre + n_in_opt - + # # the number of predefined outgoing arcs - + # n_out_pre = sum( # len(m.set_J_pre[(g,l,l_line)]) # for l_line in m.set_L[g] # if l_line not in m.set_L_imp[g] # if l_line != l # ) - - # # the constraint is infeasible if the maximum number of incoming arcs + + # # the constraint is infeasible if the maximum number of incoming arcs # # is zero and the number of predefined outgoing arcs is bigger than 1 - + # if n_in_max == 0 and n_out_pre >= 2: - + # return pyo.Constraint.Infeasible - + # # DONE: it is also infeasible if the maximum number of incoming arcs is # # zero and the number of predefined outgoing arcs is one and the poten- # # tial outgoing arcs include mandatory arcs (i.e. sum(...)=1 ) - + # n_out_fcd = sum( # len(m.set_J_mdt[(g,l,l_line)]) # for l_line in m.set_L[g] # if l_line not in m.set_L_imp[g] # if l_line != l # ) - + # if n_in_max == 0 and n_out_pre == 1 and n_out_fcd >= 1: - + # return pyo.Constraint.Infeasible - + # # the number of non-predefined outgoing arcs - + # n_out_opt = sum( # len(m.set_J_new[(g,l,l_line)]) # for l_line in m.set_L[g] # if l_line not in m.set_L_imp[g] # if l_line != l # ) - + # n_out_max = n_out_pre + n_out_opt - + # if n_out_max <= 1: - + # # there can only be one outgoing arc at most: redundant constraint - + # return pyo.Constraint.Skip - + # else: # more than one outgoing arc is possible - + # M_gl = n_out_max - 1 - + # return ( # sum( # sum( @@ -3084,109 +3047,118 @@ def create_model(name: str, # if l_star != l # )+n_in_pre # ) - + # model.constr_max_outgoing_arcs = pyo.Constraint( # model.set_GL_not_exp_imp, # rule=rule_constr_max_outgoing_arcs) - + # ************************************************************************* # ************************************************************************* - + # arcs # ************************************************************************* # ************************************************************************* - + # maximum flow amplitude per time interval - + # nominal direction, directed and undirected arcs - - def rule_constr_arc_max_flow_time_nom(m, g, l1, l2, j, q, k): - return ( - m.var_v_glljqk[(g,l1,l2,j,q,k)] <= - m.param_f_amp_v_glljqk[(g,l1,l2,j,q,k)]* - m.var_v_amp_gllj[(g,l1,l2,j)] - - - (m.var_w_glljqk[(g,l1,l2,j,q,k)] - if # j in m.set_J_stt[(g,l1,l2)] and - (j in m.set_J_stt_dep[(g,l1,l2)] or - j in m.set_J_stt_us[(g,l1,l2)]) else 0) + + def rule_constr_arc_max_flow_time_nom(m, g, l1, l2, j, q, k): + return m.var_v_glljqk[(g, l1, l2, j, q, k)] <= m.param_f_amp_v_glljqk[ + (g, l1, l2, j, q, k) + ] * m.var_v_amp_gllj[(g, l1, l2, j)] - ( + m.var_w_glljqk[(g, l1, l2, j, q, k)] + if ( # j in m.set_J_stt[(g,l1,l2)] and + j in m.set_J_stt_dep[(g, l1, l2)] or j in m.set_J_stt_us[(g, l1, l2)] ) + else 0 + ) + model.constr_arc_max_flow_time_nom = pyo.Constraint( - model.set_GLLJ_sgl, # new directed and undirected arcs - model.set_QK, # once per time interval - rule=rule_constr_arc_max_flow_time_nom) - + model.set_GLLJ_sgl, # new directed and undirected arcs + model.set_QK, # once per time interval + rule=rule_constr_arc_max_flow_time_nom, + ) + # reverse direction, for undirected arcs - - def rule_constr_arc_max_flow_time_rev(m, g, l1, l2, j, q, k): - if j in m.set_J_col[(g,l1,l2)]: + + def rule_constr_arc_max_flow_time_rev(m, g, l1, l2, j, q, k): + if j in m.set_J_col[(g, l1, l2)]: # group arc # TODO: remove this part return pyo.Constraint.Skip - else: # individual arc - return ( - m.var_v_glljqk[(g,l2,l1,j,q,k)] <= - m.param_f_amp_v_glljqk[(g,l1,l2,j,q,k)]* - m.var_v_amp_gllj[(g,l1,l2,j)] - - - (m.var_w_glljqk[(g,l1,l2,j,q,k)] - if # j in m.set_J_stt[(g,l1,l2)] and - (j in m.set_J_stt_arr[(g,l1,l2)] or - j in m.set_J_stt_us[(g,l1,l2)]) else 0) + else: # individual arc + return m.var_v_glljqk[(g, l2, l1, j, q, k)] <= m.param_f_amp_v_glljqk[ + (g, l1, l2, j, q, k) + ] * m.var_v_amp_gllj[(g, l1, l2, j)] - ( + m.var_w_glljqk[(g, l1, l2, j, q, k)] + if ( # j in m.set_J_stt[(g,l1,l2)] and + j in m.set_J_stt_arr[(g, l1, l2)] + or j in m.set_J_stt_us[(g, l1, l2)] ) + else 0 + ) + model.constr_arc_max_flow_time_rev = pyo.Constraint( - model.set_GLLJ_und_new, # new and undirected - model.set_QK, # once per time interval - rule=rule_constr_arc_max_flow_time_rev) - + model.set_GLLJ_und_new, # new and undirected + model.set_QK, # once per time interval + rule=rule_constr_arc_max_flow_time_rev, + ) + # ************************************************************************* - + # maximum nominal flow amplitude (applies to both directions) - + def rule_constr_max_nominal_flow_amplitude(m, g, l1, l2, j): - return ( - m.var_v_amp_gllj[(g,l1,l2,j)] <= - sum( - m.var_delta_arc_inv_glljh[(g,l1,l2,j,h)]* - m.param_v_amp_max_glljh[(g,l1,l2,j,h)] - for h in m.set_H_gllj[(g,l1,l2,j)] - ) - ) + return m.var_v_amp_gllj[(g, l1, l2, j)] <= sum( + m.var_delta_arc_inv_glljh[(g, l1, l2, j, h)] + * m.param_v_amp_max_glljh[(g, l1, l2, j, h)] + for h in m.set_H_gllj[(g, l1, l2, j)] + ) + model.constr_max_nominal_flow_amplitude = pyo.Constraint( - model.set_GLLJ_sgl, # directed and undirected arcs, new only - rule=rule_constr_max_nominal_flow_amplitude) - + model.set_GLLJ_sgl, # directed and undirected arcs, new only + rule=rule_constr_max_nominal_flow_amplitude, + ) + # ************************************************************************* - + # one option per arc # note: one constraint per (undirected) arc - + def rule_constr_one_arc_option_per_arc(m, g, l1, l2, j): - if j in m.set_J_mdt[(g,l1,l2)]: + if j in m.set_J_mdt[(g, l1, l2)]: return ( - sum(m.var_delta_arc_inv_glljh[(g,l1,l2,j,h)] - for h in m.set_H_gllj[(g,l1,l2,j)]) - == 1) # one arc is mandatory - elif j in m.set_J_int[(g,l1,l2)]: + sum( + m.var_delta_arc_inv_glljh[(g, l1, l2, j, h)] + for h in m.set_H_gllj[(g, l1, l2, j)] + ) + == 1 + ) # one arc is mandatory + elif j in m.set_J_int[(g, l1, l2)]: # not mandatory and interfaced´ + return m.var_xi_arc_inv_gllj[(g, l1, l2, j)] == sum( + m.var_delta_arc_inv_glljh[(g, l1, l2, j, h)] + for h in m.set_H_gllj[(g, l1, l2, j)] + ) + else: # optional and not interfaced return ( - m.var_xi_arc_inv_gllj[(g,l1,l2,j)] == - sum(m.var_delta_arc_inv_glljh[(g,l1,l2,j,h)] - for h in m.set_H_gllj[(g,l1,l2,j)]) + sum( + m.var_delta_arc_inv_glljh[(g, l1, l2, j, h)] + for h in m.set_H_gllj[(g, l1, l2, j)] ) - else: # optional and not interfaced - return ( - sum(m.var_delta_arc_inv_glljh[(g,l1,l2,j,h)] - for h in m.set_H_gllj[(g,l1,l2,j)]) - <= 1) # arcs are optional + <= 1 + ) # arcs are optional + model.constr_one_arc_option_per_arc = pyo.Constraint( - model.set_GLLJ_sgl, # directed and undirected arcs, new only - rule=rule_constr_one_arc_option_per_arc) - + model.set_GLLJ_sgl, # directed and undirected arcs, new only + rule=rule_constr_one_arc_option_per_arc, + ) + # ************************************************************************* - + # # interface equations (unnecessary if the domain is UnitInterval) - + # def rule_constr_single_arc_interfaces(m, g, l1, l2, j): # return ( # m.var_xi_arc_inv_gllj[(g,l1,l2,j)] <= 1 @@ -3194,851 +3166,875 @@ def create_model(name: str, # model.constr_single_arc_interfaces = pyo.Constraint( # model.set_GLLJ_int, # rule=rule_constr_single_arc_interfaces) - + # ************************************************************************* - + # SOS1 constraints for arc selection - + model.constr_arc_sos1 = pyo.SOSConstraint( - model.set_GLLJ_arc_inv_sos1, # for (directed and undirected) new arcs - var=model.var_delta_arc_inv_glljh, # set_GLLJH_sgl indexes the variables - index=model.set_GLLJH_arc_inv_sos1_gllj, # key: GLLJ; value: GLLJH - weights=model.param_arc_inv_sos1_weights_glljh, # key: GLLJH; alue: weight - sos=1) - - # ************************************************************************* - + model.set_GLLJ_arc_inv_sos1, # for (directed and undirected) new arcs + var=model.var_delta_arc_inv_glljh, # set_GLLJH_sgl indexes the variables + index=model.set_GLLJH_arc_inv_sos1_gllj, # key: GLLJ; value: GLLJH + weights=model.param_arc_inv_sos1_weights_glljh, # key: GLLJH; alue: weight + sos=1, + ) + + # ************************************************************************* + # one flow direction per time interval in undirected preexisting arcs - + def rule_constr_one_sns_per_time_interval(m, g, l1, l2, j, q, k): - if j in m.set_J_col[(g,l1,l2)]: - return pyo.Constraint.Skip # TODO: remove this part - if j in m.set_J_int[(g,l1,l2)]: + if j in m.set_J_col[(g, l1, l2)]: + return pyo.Constraint.Skip # TODO: remove this part + if j in m.set_J_int[(g, l1, l2)]: # using interfaces return ( - m.var_zeta_sns_glljqk[(g,l1,l2,j,q,k)]+ - m.var_zeta_sns_glljqk[(g,l2,l1,j,q,k)] - == m.var_xi_arc_inv_gllj[(g,l1,l2,j)] - ) - elif j in m.set_J_mdt[(g,l1,l2)] or j in m.set_J_pre[(g,l1,l2)]: + m.var_zeta_sns_glljqk[(g, l1, l2, j, q, k)] + + m.var_zeta_sns_glljqk[(g, l2, l1, j, q, k)] + == m.var_xi_arc_inv_gllj[(g, l1, l2, j)] + ) + elif j in m.set_J_mdt[(g, l1, l2)] or j in m.set_J_pre[(g, l1, l2)]: # mandatory or pre-existing return ( - m.var_zeta_sns_glljqk[(g,l1,l2,j,q,k)]+ - m.var_zeta_sns_glljqk[(g,l2,l1,j,q,k)] + m.var_zeta_sns_glljqk[(g, l1, l2, j, q, k)] + + m.var_zeta_sns_glljqk[(g, l2, l1, j, q, k)] == 1 - ) + ) else: # optional - return ( - m.var_zeta_sns_glljqk[(g,l1,l2,j,q,k)]+ - m.var_zeta_sns_glljqk[(g,l2,l1,j,q,k)] - == sum(m.var_delta_arc_inv_glljh[(g,l1,l2,j,h)] - for h in m.set_H_gllj[(g,l1,l2,j)]) - ) + return m.var_zeta_sns_glljqk[(g, l1, l2, j, q, k)] + m.var_zeta_sns_glljqk[ + (g, l2, l1, j, q, k) + ] == sum( + m.var_delta_arc_inv_glljh[(g, l1, l2, j, h)] + for h in m.set_H_gllj[(g, l1, l2, j)] + ) + model.constr_one_sns_per_time_interval = pyo.Constraint( - model.set_GLLJ_und, # once per undirected arc - model.set_QK, # once per time interval - rule=rule_constr_one_sns_per_time_interval) - + model.set_GLLJ_und, # once per undirected arc + model.set_QK, # once per time interval + rule=rule_constr_one_sns_per_time_interval, + ) + # ************************************************************************* - + # no flow except in the flow direction, for new undirected arcs - + # nominal direction - + def rule_constr_no_flow_except_in_sns_nom(m, g, l1, l2, j, q, k): - if j in m.set_J_col[(g,l1,l2)]: - return pyo.Constraint.Skip # TODO: remove this - if j in m.set_J_pre[(g,l1,l2)]: # pre-existing arc + if j in m.set_J_col[(g, l1, l2)]: + return pyo.Constraint.Skip # TODO: remove this + if j in m.set_J_pre[(g, l1, l2)]: # pre-existing arc return ( - m.var_v_glljqk[(g,l1,l2,j,q,k)] <= - m.var_zeta_sns_glljqk[(g,l1,l2,j,q,k)]* - m.param_v_ub_glljqk[(g,l1,l2,j,q,k)] - ) - else: # new arc - return ( - m.var_v_glljqk[(g,l1,l2,j,q,k)] <= - m.var_zeta_sns_glljqk[(g,l1,l2,j,q,k)]*( - max( - m.param_f_amp_v_glljqk[(g,l1,l2,j,q,k)]* - m.param_v_amp_max_glljh[(g,l1,l2,j,h)] - - - (m.param_w_new_glljhqk[(g,l1,l2,j,h,q,k)] - if (((g,l1,l2) in m.set_J_stt_dep and - j in m.set_J_stt_dep[(g,l1,l2)]) or - ((g,l1,l2) in m.set_J_stt_us and - j in m.set_J_stt_us[(g,l1,l2)])) else 0) - for h in m.set_H_gllj[(g,l1,l2,j)] - ) + m.var_v_glljqk[(g, l1, l2, j, q, k)] + <= m.var_zeta_sns_glljqk[(g, l1, l2, j, q, k)] + * m.param_v_ub_glljqk[(g, l1, l2, j, q, k)] + ) + else: # new arc + return m.var_v_glljqk[(g, l1, l2, j, q, k)] <= m.var_zeta_sns_glljqk[ + (g, l1, l2, j, q, k) + ] * ( + max( + m.param_f_amp_v_glljqk[(g, l1, l2, j, q, k)] + * m.param_v_amp_max_glljh[(g, l1, l2, j, h)] + - ( + m.param_w_new_glljhqk[(g, l1, l2, j, h, q, k)] + if ( + ( + (g, l1, l2) in m.set_J_stt_dep + and j in m.set_J_stt_dep[(g, l1, l2)] + ) + or ( + (g, l1, l2) in m.set_J_stt_us + and j in m.set_J_stt_us[(g, l1, l2)] + ) + ) + else 0 ) - ) + for h in m.set_H_gllj[(g, l1, l2, j)] + ) + ) + model.constr_no_flow_except_in_sns_nom = pyo.Constraint( - model.set_GLLJ_und, # new undirected arcs - model.set_QK, # once per time interval - rule=rule_constr_no_flow_except_in_sns_nom) - + model.set_GLLJ_und, # new undirected arcs + model.set_QK, # once per time interval + rule=rule_constr_no_flow_except_in_sns_nom, + ) + # reverse direction def rule_constr_no_flow_except_in_sns_rev(m, g, l1, l2, j, q, k): - if j in m.set_J_col[(g,l1,l2)]: - return pyo.Constraint.Skip # TODO: remove this - if j in m.set_J_pre[(g,l1,l2)]: # pre-existing arc + if j in m.set_J_col[(g, l1, l2)]: + return pyo.Constraint.Skip # TODO: remove this + if j in m.set_J_pre[(g, l1, l2)]: # pre-existing arc return ( - m.var_v_glljqk[(g,l2,l1,j,q,k)] <= - m.var_zeta_sns_glljqk[(g,l2,l1,j,q,k)]* - m.param_v_ub_glljqk[(g,l2,l1,j,q,k)] - ) - else: # new arc - return ( - m.var_v_glljqk[(g,l2,l1,j,q,k)] <= - m.var_zeta_sns_glljqk[(g,l2,l1,j,q,k)]*( - max( - m.param_f_amp_v_glljqk[(g,l1,l2,j,q,k)]* - m.param_v_amp_max_glljh[(g,l1,l2,j, h)] - - - (m.param_w_new_glljhqk[(g,l1,l2,j,h,q,k)] - if (((g,l1,l2) in m.set_J_stt_arr and - j in m.set_J_stt_arr[(g,l1,l2)]) or - ((g,l1,l2) in m.set_J_stt_us and - j in m.set_J_stt_us[(g,l1,l2)])) else 0) - for h in m.set_H_gllj[(g,l1,l2,j)] - ) + m.var_v_glljqk[(g, l2, l1, j, q, k)] + <= m.var_zeta_sns_glljqk[(g, l2, l1, j, q, k)] + * m.param_v_ub_glljqk[(g, l2, l1, j, q, k)] + ) + else: # new arc + return m.var_v_glljqk[(g, l2, l1, j, q, k)] <= m.var_zeta_sns_glljqk[ + (g, l2, l1, j, q, k) + ] * ( + max( + m.param_f_amp_v_glljqk[(g, l1, l2, j, q, k)] + * m.param_v_amp_max_glljh[(g, l1, l2, j, h)] + - ( + m.param_w_new_glljhqk[(g, l1, l2, j, h, q, k)] + if ( + ( + (g, l1, l2) in m.set_J_stt_arr + and j in m.set_J_stt_arr[(g, l1, l2)] + ) + or ( + (g, l1, l2) in m.set_J_stt_us + and j in m.set_J_stt_us[(g, l1, l2)] + ) + ) + else 0 ) - ) + for h in m.set_H_gllj[(g, l1, l2, j)] + ) + ) + model.constr_no_flow_except_in_sns_rev = pyo.Constraint( - model.set_GLLJ_und, # new undirected arcs - model.set_QK, # once per time interval - rule=rule_constr_no_flow_except_in_sns_rev) - + model.set_GLLJ_und, # new undirected arcs + model.set_QK, # once per time interval + rule=rule_constr_no_flow_except_in_sns_rev, + ) + # ************************************************************************* - + # SOS1 constraints for flow sense determination (undirected arcs) - + model.constr_sns_sos1 = pyo.SOSConstraint( - model.set_GLLJ_und, # one constraint per undirected arc - model.set_QK, # and time interval - var=model.var_zeta_sns_glljqk, # set_GLLJ_und_red and set_K - index=model.set_GLLJQK_und_sns_sos1_red_gllj, # + model.set_GLLJ_und, # one constraint per undirected arc + model.set_QK, # and time interval + var=model.var_zeta_sns_glljqk, # set_GLLJ_und_red and set_K + index=model.set_GLLJQK_und_sns_sos1_red_gllj, # weights=model.param_arc_sns_sos1_weights_glljqk, - sos=1) - + sos=1, + ) + # ************************************************************************* - + # static losses - + # static losses only exist if the arc exists - + def rule_constr_static_losses_existence(m, g, l1, l2, j, q, k): - if j in m.set_J_col[(g,l1,l2)]: # group - return ( - m.var_w_glljqk[(g,l1,l2,j,q,k)] == - sum(m.var_delta_arc_inv_th[(t,h)]* - m.param_w_new_glljhqk[(g,l1,l2,j,h,q,k)] - for t in m.set_T - if (g,l1,l2,j) in m.set_GLLJ_col_t[t] - for h in m.set_H_t[t]) - ) - else: # individual - return ( - m.var_w_glljqk[(g,l1,l2,j,q,k)] == - sum(m.var_delta_arc_inv_glljh[(g,l1,l2,j,h)]* - m.param_w_new_glljhqk[(g,l1,l2,j,h,q,k)] - for h in m.set_H_gllj[(g,l1,l2,j)]) - ) + if j in m.set_J_col[(g, l1, l2)]: # group + return m.var_w_glljqk[(g, l1, l2, j, q, k)] == sum( + m.var_delta_arc_inv_th[(t, h)] + * m.param_w_new_glljhqk[(g, l1, l2, j, h, q, k)] + for t in m.set_T + if (g, l1, l2, j) in m.set_GLLJ_col_t[t] + for h in m.set_H_t[t] + ) + else: # individual + return m.var_w_glljqk[(g, l1, l2, j, q, k)] == sum( + m.var_delta_arc_inv_glljh[(g, l1, l2, j, h)] + * m.param_w_new_glljhqk[(g, l1, l2, j, h, q, k)] + for h in m.set_H_gllj[(g, l1, l2, j)] + ) + model.constr_static_losses_existence = pyo.Constraint( model.set_GLLJ_static_new, model.set_QK, - rule=rule_constr_static_losses_existence) - + rule=rule_constr_static_losses_existence, + ) + # ************************************************************************* - + # static losses placed downstream - + # nominal direction - + def rule_constr_static_losses_downstream_nom(m, g, l1, l2, j, q, k): - if (j not in m.set_J_stt_arr[(g,l1,l2)] and - j not in m.set_J_stt_ds[(g,l1,l2)]): + if ( + j not in m.set_J_stt_arr[(g, l1, l2)] + and j not in m.set_J_stt_ds[(g, l1, l2)] + ): return pyo.Constraint.Skip - - if j in m.set_J_und[(g,l1,l2)]: - + + if j in m.set_J_und[(g, l1, l2)]: # undirected arc - + # TODO: adjust if losses for pre. undirected arcs are handled diff. - + # arrival node or downstream - + return ( - m.var_v_glljqk[(g,l1,l2,j,q,k)] >= - m.var_w_sns_glljqk[(g,l1,l2,j,q,k)] + m.var_v_glljqk[(g, l1, l2, j, q, k)] + >= m.var_w_sns_glljqk[(g, l1, l2, j, q, k)] + ) + + else: # directed arc, arrival + if j in m.set_J_stt_ds[(g, l1, l2)]: + raise ValueError( + "Static losses for directed arcs should be mo" + + "delled as being in the departure or in the" + + " arrival nodes." ) - - else: # directed arc, arrival - - if j in m.set_J_stt_ds[(g,l1,l2)]: - - raise ValueError('Static losses for directed arcs should be mo' - +'delled as being in the departure or in the'+ - ' arrival nodes.') - - if j in m.set_J_pre[(g,l1,l2)]: - + + if j in m.set_J_pre[(g, l1, l2)]: # pre-existing arc - + return ( - m.var_v_glljqk[(g,l1,l2,j,q,k)] >= - m.param_w_pre_glljqk[(g,l1,l2,j,q,k)] - ) - + m.var_v_glljqk[(g, l1, l2, j, q, k)] + >= m.param_w_pre_glljqk[(g, l1, l2, j, q, k)] + ) + else: - # new arc - + return ( - m.var_v_glljqk[(g,l1,l2,j,q,k)] >= - m.var_w_glljqk[(g,l1,l2,j,q,k)] - ) + m.var_v_glljqk[(g, l1, l2, j, q, k)] + >= m.var_w_glljqk[(g, l1, l2, j, q, k)] + ) + model.constr_static_losses_downstream_nom = pyo.Constraint( - model.set_GLLJ_static, + model.set_GLLJ_static, model.set_QK, - rule=rule_constr_static_losses_downstream_nom) - + rule=rule_constr_static_losses_downstream_nom, + ) + # reverse direction - + def rule_constr_static_losses_downstream_und_rev(m, g, l1, l2, j, q, k): - if (j in m.set_J_stt_ds[(g,l2,l1)] or - j in m.set_J_stt_dep[(g,l2,l1)]): - + if j in m.set_J_stt_ds[(g, l2, l1)] or j in m.set_J_stt_dep[(g, l2, l1)]: # departure node or downstream - + return ( - m.var_v_glljqk[(g,l2,l1,j,q,k)] >= - m.var_w_sns_glljqk[(g,l2,l1,j,q,k)] - ) - + m.var_v_glljqk[(g, l2, l1, j, q, k)] + >= m.var_w_sns_glljqk[(g, l2, l1, j, q, k)] + ) + else: - return pyo.Constraint.Skip - + model.constr_static_losses_downstream_und_rev = pyo.Constraint( model.set_GLLJ_static_und, model.set_QK, - rule=rule_constr_static_losses_downstream_und_rev) - + rule=rule_constr_static_losses_downstream_und_rev, + ) + # ************************************************************************* - + # static losses modulated by flow sense for pre-existing arcs - + # TODO: replace this constraint by reformulating the node balance - + def rule_constr_static_losses_sense_pre_nom(m, g, l1, l2, j, q, k): return ( - m.var_w_sns_glljqk[(g,l1,l2,j,q,k)] == - m.var_zeta_sns_glljqk[(g,l1,l2,j,q,k)]* - m.param_w_pre_glljqk[(g,l1,l2,j,q,k)] - ) + m.var_w_sns_glljqk[(g, l1, l2, j, q, k)] + == m.var_zeta_sns_glljqk[(g, l1, l2, j, q, k)] + * m.param_w_pre_glljqk[(g, l1, l2, j, q, k)] + ) + model.constr_static_losses_sense_pre_nom = pyo.Constraint( model.set_GLLJ_static_und_pre, model.set_QK, - rule=rule_constr_static_losses_sense_pre_nom) - + rule=rule_constr_static_losses_sense_pre_nom, + ) + def rule_constr_static_losses_sense_pre_rev(m, g, l1, l2, j, q, k): return ( - m.var_w_sns_glljqk[(g,l2,l1,j,q,k)] == - m.var_zeta_sns_glljqk[(g,l2,l1,j,q,k)]* - m.param_w_pre_glljqk[(g,l1,l2,j,q,k)] - ) + m.var_w_sns_glljqk[(g, l2, l1, j, q, k)] + == m.var_zeta_sns_glljqk[(g, l2, l1, j, q, k)] + * m.param_w_pre_glljqk[(g, l1, l2, j, q, k)] + ) + model.constr_static_losses_sense_pre_rev = pyo.Constraint( model.set_GLLJ_static_und_pre, model.set_QK, - rule=rule_constr_static_losses_sense_pre_rev) - + rule=rule_constr_static_losses_sense_pre_rev, + ) + # static losses modulated by the flow sense for new undirected arcs - + # general upper bound - + def rule_constr_static_losses_sense_new_ub_nom(m, g, l1, l2, j, q, k): - if j in m.set_J_col[(g,l1,l2)]: - + if j in m.set_J_col[(g, l1, l2)]: return ( - m.var_w_sns_glljqk[(g,l1,l2,j,q,k)] <= - max(m.param_w_new_glljhqk[(g,l1,l2,j,h,q,k)] + m.var_w_sns_glljqk[(g, l1, l2, j, q, k)] + <= max( + m.param_w_new_glljhqk[(g, l1, l2, j, h, q, k)] for t, h in m.set_TH - if (g,l1,l2,j) in m.set_GLLJ_col_t[t])* - m.var_zeta_sns_glljqk[(g,l1,l2,j,q,k)] + if (g, l1, l2, j) in m.set_GLLJ_col_t[t] ) - + * m.var_zeta_sns_glljqk[(g, l1, l2, j, q, k)] + ) + else: - return ( - m.var_w_sns_glljqk[(g,l1,l2,j,q,k)] <= - max(m.param_w_new_glljhqk[(g,l1,l2,j,h,q,k)] - for h in m.set_H_gllj[(g,l1,l2,j)])* - m.var_zeta_sns_glljqk[(g,l1,l2,j,q,k)] + m.var_w_sns_glljqk[(g, l1, l2, j, q, k)] + <= max( + m.param_w_new_glljhqk[(g, l1, l2, j, h, q, k)] + for h in m.set_H_gllj[(g, l1, l2, j)] ) + * m.var_zeta_sns_glljqk[(g, l1, l2, j, q, k)] + ) + model.constr_static_losses_sense_new_ub_nom = pyo.Constraint( model.set_GLLJ_static_und_new, model.set_QK, - rule=rule_constr_static_losses_sense_new_ub_nom) - + rule=rule_constr_static_losses_sense_new_ub_nom, + ) + def rule_constr_static_losses_sense_new_ub_rev(m, g, l1, l2, j, q, k): - if j in m.set_J_col[(g,l1,l2)]: + if j in m.set_J_col[(g, l1, l2)]: # group arc return ( - m.var_w_sns_glljqk[(g,l2,l1,j,q,k)] <= - max(m.param_w_new_glljhqk[(g,l1,l2,j,h,q,k)] + m.var_w_sns_glljqk[(g, l2, l1, j, q, k)] + <= max( + m.param_w_new_glljhqk[(g, l1, l2, j, h, q, k)] for t, h in m.set_TH - if (g,l1,l2,j) in m.set_GLLJ_col_t[t])* - m.var_zeta_sns_glljqk[(g,l2,l1,j,q,k)] + if (g, l1, l2, j) in m.set_GLLJ_col_t[t] ) - else: # individual arc + * m.var_zeta_sns_glljqk[(g, l2, l1, j, q, k)] + ) + else: # individual arc return ( - m.var_w_sns_glljqk[(g,l2,l1,j,q,k)] <= - max(m.param_w_new_glljhqk[(g,l1,l2,j,h,q,k)] - for h in m.set_H_gllj[(g,l1,l2,j)])* - m.var_zeta_sns_glljqk[(g,l2,l1,j,q,k)] + m.var_w_sns_glljqk[(g, l2, l1, j, q, k)] + <= max( + m.param_w_new_glljhqk[(g, l1, l2, j, h, q, k)] + for h in m.set_H_gllj[(g, l1, l2, j)] ) + * m.var_zeta_sns_glljqk[(g, l2, l1, j, q, k)] + ) + model.constr_static_losses_sense_new_ub_rev = pyo.Constraint( model.set_GLLJ_static_und_new, model.set_QK, - rule=rule_constr_static_losses_sense_new_ub_rev) - + rule=rule_constr_static_losses_sense_new_ub_rev, + ) + # equation, part 1 - + def rule_constr_static_losses_sense_new_eq1_nom(m, g, l1, l2, j, q, k): return ( - m.var_w_sns_glljqk[(g,l1,l2,j,q,k)] <= m.var_w_glljqk[(g,l1,l2,j,q,k)] - ) + m.var_w_sns_glljqk[(g, l1, l2, j, q, k)] + <= m.var_w_glljqk[(g, l1, l2, j, q, k)] + ) + model.constr_static_losses_sense_new_eq1_nom = pyo.Constraint( model.set_GLLJ_static_und_new, model.set_QK, - rule=rule_constr_static_losses_sense_new_eq1_nom) - + rule=rule_constr_static_losses_sense_new_eq1_nom, + ) + def rule_constr_static_losses_sense_new_eq1_rev(m, g, l1, l2, j, q, k): return ( - m.var_w_sns_glljqk[(g,l2,l1,j,q,k)] <= m.var_w_glljqk[(g,l1,l2,j,q,k)] - ) + m.var_w_sns_glljqk[(g, l2, l1, j, q, k)] + <= m.var_w_glljqk[(g, l1, l2, j, q, k)] + ) + model.constr_static_losses_sense_new_eq1_rev = pyo.Constraint( model.set_GLLJ_static_und_new, model.set_QK, - rule=rule_constr_static_losses_sense_new_eq1_rev) - + rule=rule_constr_static_losses_sense_new_eq1_rev, + ) + # equation, part 2 - + def rule_constr_static_losses_sense_new_eq2_nom(m, g, l1, l2, j, q, k): - if j in m.set_J_col[(g,l1,l2)]: + if j in m.set_J_col[(g, l1, l2)]: # arc group - return ( - m.var_w_sns_glljqk[(g,l1,l2,j,q,k)] >= - m.var_w_glljqk[(g,l1,l2,j,q,k)]- - m.var_zeta_sns_glljqk[(g,l2,l1,j,q,k)]* - max(m.param_w_new_glljhqk[(g,l1,l2,j,h,q,k)] - for t, h in m.set_TH - if (g,l1,l2,j) in m.set_GLLJ_col_t[t]) - ) - else: # individual - return ( - m.var_w_sns_glljqk[(g,l1,l2,j,q,k)] >= - m.var_w_glljqk[(g,l1,l2,j,q,k)]- - m.var_zeta_sns_glljqk[(g,l2,l1,j,q,k)]* - max(m.param_w_new_glljhqk[(g,l1,l2,j,h,q,k)] - for h in m.set_H_gllj[(g,l1,l2,j)]) - ) + return m.var_w_sns_glljqk[(g, l1, l2, j, q, k)] >= m.var_w_glljqk[ + (g, l1, l2, j, q, k) + ] - m.var_zeta_sns_glljqk[(g, l2, l1, j, q, k)] * max( + m.param_w_new_glljhqk[(g, l1, l2, j, h, q, k)] + for t, h in m.set_TH + if (g, l1, l2, j) in m.set_GLLJ_col_t[t] + ) + else: # individual + return m.var_w_sns_glljqk[(g, l1, l2, j, q, k)] >= m.var_w_glljqk[ + (g, l1, l2, j, q, k) + ] - m.var_zeta_sns_glljqk[(g, l2, l1, j, q, k)] * max( + m.param_w_new_glljhqk[(g, l1, l2, j, h, q, k)] + for h in m.set_H_gllj[(g, l1, l2, j)] + ) + model.constr_static_losses_sense_new_eq2_nom = pyo.Constraint( model.set_GLLJ_static_und_new, model.set_QK, - rule=rule_constr_static_losses_sense_new_eq2_nom) - + rule=rule_constr_static_losses_sense_new_eq2_nom, + ) + def rule_constr_static_losses_sense_new_eq2_rev(m, g, l1, l2, j, q, k): - if j in m.set_J_col[(g,l1,l2)]: + if j in m.set_J_col[(g, l1, l2)]: # arc group - return ( - m.var_w_sns_glljqk[(g,l2,l1,j,q,k)] >= - m.var_w_glljqk[(g,l1,l2,j,q,k)]- - m.var_zeta_sns_glljqk[(g,l1,l2,j,q,k)]* - max(m.param_w_new_glljhqk[(g,l1,l2,j,h,q,k)] - for t, h in m.set_TH - if (g,l1,l2,j) in m.set_GLLJ_col_t[t]) - ) - else: # individual group - return ( - m.var_w_sns_glljqk[(g,l2,l1,j,q,k)] >= - m.var_w_glljqk[(g,l1,l2,j,q,k)]- - m.var_zeta_sns_glljqk[(g,l1,l2,j,q,k)]* - max(m.param_w_new_glljhqk[(g,l1,l2,j,h,q,k)] - for h in m.set_H_gllj[(g,l1,l2,j)]) - ) + return m.var_w_sns_glljqk[(g, l2, l1, j, q, k)] >= m.var_w_glljqk[ + (g, l1, l2, j, q, k) + ] - m.var_zeta_sns_glljqk[(g, l1, l2, j, q, k)] * max( + m.param_w_new_glljhqk[(g, l1, l2, j, h, q, k)] + for t, h in m.set_TH + if (g, l1, l2, j) in m.set_GLLJ_col_t[t] + ) + else: # individual group + return m.var_w_sns_glljqk[(g, l2, l1, j, q, k)] >= m.var_w_glljqk[ + (g, l1, l2, j, q, k) + ] - m.var_zeta_sns_glljqk[(g, l1, l2, j, q, k)] * max( + m.param_w_new_glljhqk[(g, l1, l2, j, h, q, k)] + for h in m.set_H_gllj[(g, l1, l2, j)] + ) + model.constr_static_losses_sense_new_eq2_rev = pyo.Constraint( model.set_GLLJ_static_und_new, model.set_QK, - rule=rule_constr_static_losses_sense_new_eq2_rev) - + rule=rule_constr_static_losses_sense_new_eq2_rev, + ) + # ************************************************************************* # ************************************************************************* - + # groups of arcs # ************************************************************************* # ************************************************************************* - + # maximum flow amplitude per time interval, for arcs in arc groups - + # nominal direction, directed and undirected arcs - - def rule_constr_arc_group_max_flow_time_nom(m, t, g, l1, l2, j, q, k): - return ( - m.var_v_glljqk[(g,l1,l2,j,q,k)] <= - m.param_f_amp_v_glljqk[(g,l1,l2,j,q,k)]* - m.var_v_amp_t[t] - - - (m.var_w_glljqk[(g,l1,l2,j,q,k)] - if # j in m.set_J_stt[(g,l1,l2)] and - (j in m.set_J_stt_dep[(g,l1,l2)] or - j in m.set_J_stt_us[(g,l1,l2)]) else 0) + + def rule_constr_arc_group_max_flow_time_nom(m, t, g, l1, l2, j, q, k): + return m.var_v_glljqk[(g, l1, l2, j, q, k)] <= m.param_f_amp_v_glljqk[ + (g, l1, l2, j, q, k) + ] * m.var_v_amp_t[t] - ( + m.var_w_glljqk[(g, l1, l2, j, q, k)] + if ( # j in m.set_J_stt[(g,l1,l2)] and + j in m.set_J_stt_dep[(g, l1, l2)] or j in m.set_J_stt_us[(g, l1, l2)] ) + else 0 + ) + model.constr_arc_group_max_flow_time_nom = pyo.Constraint( - model.set_TGLLJ, # new directed and undirected arcs - model.set_QK, # all time intervals - rule=rule_constr_arc_group_max_flow_time_nom) - + model.set_TGLLJ, # new directed and undirected arcs + model.set_QK, # all time intervals + rule=rule_constr_arc_group_max_flow_time_nom, + ) + # reverse direction, for undirected arcs - + def rule_constr_arc_group_max_flow_time_rev(m, t, g, l1, l2, j, q, k): - if j in m.set_J_und[(g,l1,l2)]: - return ( - m.var_v_glljqk[(g,l2,l1,j,q,k)] <= - m.param_f_amp_v_glljqk[(g,l1,l2,j,q,k)]* - m.var_v_amp_t[t] - - - (m.var_w_glljqk[(g,l1,l2,j,q,k)] - if # j in m.set_J_stt[(g,l1,l2)] and - (j in m.set_J_stt_arr[(g,l1,l2)] or - j in m.set_J_stt_us[(g,l1,l2)]) else 0) + if j in m.set_J_und[(g, l1, l2)]: + return m.var_v_glljqk[(g, l2, l1, j, q, k)] <= m.param_f_amp_v_glljqk[ + (g, l1, l2, j, q, k) + ] * m.var_v_amp_t[t] - ( + m.var_w_glljqk[(g, l1, l2, j, q, k)] + if ( # j in m.set_J_stt[(g,l1,l2)] and + j in m.set_J_stt_arr[(g, l1, l2)] + or j in m.set_J_stt_us[(g, l1, l2)] ) + else 0 + ) else: return pyo.Constraint.Skip + model.constr_arc_group_max_flow_time_rev = pyo.Constraint( - model.set_TGLLJ, # new and undirected - model.set_QK, # all time intervals - rule=rule_constr_arc_group_max_flow_time_rev) - + model.set_TGLLJ, # new and undirected + model.set_QK, # all time intervals + rule=rule_constr_arc_group_max_flow_time_rev, + ) + # ************************************************************************* - + # maximum nominal flow amplitude for arc groups - + def rule_constr_arc_group_max_nominal_flow_amplitude(m, t): - return ( - m.var_v_amp_t[t] <= - sum( - m.var_delta_arc_inv_th[(t,h)]* - m.param_v_amp_max_th[(t,h)] - for h in m.set_H_t[t] - ) - ) + return m.var_v_amp_t[t] <= sum( + m.var_delta_arc_inv_th[(t, h)] * m.param_v_amp_max_th[(t, h)] + for h in m.set_H_t[t] + ) + model.constr_arc_group_max_nominal_flow_amplitude = pyo.Constraint( - model.set_T, # all arc groups - rule=rule_constr_arc_group_max_nominal_flow_amplitude) - + model.set_T, # all arc groups + rule=rule_constr_arc_group_max_nominal_flow_amplitude, + ) + # ************************************************************************* - + # one option per arc group - + def rule_constr_one_arc_option_per_arc_group(m, t): if t in m.set_T_mdt: return ( - sum(m.var_delta_arc_inv_th[(t,h)] - for h in m.set_H_t[t]) - == 1) # the arcs are all mandatory + sum(m.var_delta_arc_inv_th[(t, h)] for h in m.set_H_t[t]) == 1 + ) # the arcs are all mandatory else: return ( - sum(m.var_delta_arc_inv_th[(t,h)] - for h in m.set_H_t[t]) - <= 1) # the arcs are all optional + sum(m.var_delta_arc_inv_th[(t, h)] for h in m.set_H_t[t]) <= 1 + ) # the arcs are all optional + model.constr_one_arc_option_per_arc_group = pyo.Constraint( - model.set_T, # all arc groups - rule=rule_constr_one_arc_option_per_arc_group) - + model.set_T, rule=rule_constr_one_arc_option_per_arc_group # all arc groups + ) + # ************************************************************************* - + # SOS1 constraints for arc group selection - + model.constr_arc_group_sos1 = pyo.SOSConstraint( - model.set_T_sos1, # for all groups using sos1 - var=model.var_delta_arc_inv_th, # set_TH indexes the variables - index=model.set_TH_arc_inv_sos1_t, # key: t; value: TH - weights=model.param_arc_inv_sos1_weights_th, # key: TH; value: weight - sos=1) - - # ************************************************************************* - + model.set_T_sos1, # for all groups using sos1 + var=model.var_delta_arc_inv_th, # set_TH indexes the variables + index=model.set_TH_arc_inv_sos1_t, # key: t; value: TH + weights=model.param_arc_inv_sos1_weights_th, # key: TH; value: weight + sos=1, + ) + + # ************************************************************************* + # one flow direction per time interval, for undirected arcs within groups - + def rule_constr_one_sns_per_time_interval_group(m, t, g, l1, l2, j, q, k): - if j in m.set_J_int[(g,l1,l2)]: + if j in m.set_J_int[(g, l1, l2)]: # using interface return ( - m.var_zeta_sns_glljqk[(g,l1,l2,j,q,k)]+ - m.var_zeta_sns_glljqk[(g,l2,l1,j,q,k)] + m.var_zeta_sns_glljqk[(g, l1, l2, j, q, k)] + + m.var_zeta_sns_glljqk[(g, l2, l1, j, q, k)] == m.var_xi_arc_inv_t[t] - ) - elif j in m.set_J_mdt[(g,l1,l2)]: + ) + elif j in m.set_J_mdt[(g, l1, l2)]: # mandatory group return ( - m.var_zeta_sns_glljqk[(g,l1,l2,j,q,k)]+ - m.var_zeta_sns_glljqk[(g,l2,l1,j,q,k)] + m.var_zeta_sns_glljqk[(g, l1, l2, j, q, k)] + + m.var_zeta_sns_glljqk[(g, l2, l1, j, q, k)] == 1 - ) + ) else: # optional and not using interface - return ( - m.var_zeta_sns_glljqk[(g,l1,l2,j,q,k)]+ - m.var_zeta_sns_glljqk[(g,l2,l1,j,q,k)] - == sum(m.var_delta_arc_inv_th[(t,h)] - for h in m.set_H_t[t]) - ) + return m.var_zeta_sns_glljqk[(g, l1, l2, j, q, k)] + m.var_zeta_sns_glljqk[ + (g, l2, l1, j, q, k) + ] == sum(m.var_delta_arc_inv_th[(t, h)] for h in m.set_H_t[t]) + model.constr_one_sns_per_time_interval_group = pyo.Constraint( - model.set_TGLLJ_und, # once per undirected arc - model.set_QK, # once per time interval - rule=rule_constr_one_sns_per_time_interval_group) - + model.set_TGLLJ_und, # once per undirected arc + model.set_QK, # once per time interval + rule=rule_constr_one_sns_per_time_interval_group, + ) + # ************************************************************************* - + # no flow except in the flow direction, for new undirected arcs in groups - + # nominal direction - + def rule_constr_no_flow_except_in_sns_group_nom(m, t, g, l1, l2, j, q, k): - return ( - m.var_v_glljqk[(g,l1,l2,j,q,k)] <= - m.var_zeta_sns_glljqk[(g,l1,l2,j,q,k)]*( - max( - m.param_f_amp_v_glljqk[(g,l1,l2,j,q,k)]* - m.param_v_amp_max_th[(t,h)] - - - (m.param_w_new_glljhqk[(g,l1,l2,j,h,q,k)] - if (((g,l1,l2) in m.set_J_stt_dep and - j in m.set_J_stt_dep[(g,l1,l2)]) or - ((g,l1,l2) in m.set_J_stt_us and - j in m.set_J_stt_us[(g,l1,l2)])) else 0) - for h in m.set_H_t[t] - ) + return m.var_v_glljqk[(g, l1, l2, j, q, k)] <= m.var_zeta_sns_glljqk[ + (g, l1, l2, j, q, k) + ] * ( + max( + m.param_f_amp_v_glljqk[(g, l1, l2, j, q, k)] + * m.param_v_amp_max_th[(t, h)] + - ( + m.param_w_new_glljhqk[(g, l1, l2, j, h, q, k)] + if ( + ( + (g, l1, l2) in m.set_J_stt_dep + and j in m.set_J_stt_dep[(g, l1, l2)] + ) + or ( + (g, l1, l2) in m.set_J_stt_us + and j in m.set_J_stt_us[(g, l1, l2)] + ) + ) + else 0 ) - ) + for h in m.set_H_t[t] + ) + ) + model.constr_no_flow_except_in_sns_group_nom = pyo.Constraint( - model.set_TGLLJ_und, # new undirected arcs in groups - model.set_QK, # once per time interval - rule=rule_constr_no_flow_except_in_sns_group_nom) - + model.set_TGLLJ_und, # new undirected arcs in groups + model.set_QK, # once per time interval + rule=rule_constr_no_flow_except_in_sns_group_nom, + ) + # reverse direction def rule_constr_no_flow_except_in_sns_group_rev(m, t, g, l1, l2, j, q, k): - return ( - m.var_v_glljqk[(g,l2,l1,j,q,k)] <= - m.var_zeta_sns_glljqk[(g,l2,l1,j,q,k)]*( - max( - m.param_f_amp_v_glljqk[(g,l1,l2,j,q,k)]* - m.param_v_amp_max_th[(t,h)] - - - (m.param_w_new_glljhqk[(g,l1,l2,j,h,q,k)] - if (((g,l1,l2) in m.set_J_stt_arr and - j in m.set_J_stt_arr[(g,l1,l2)]) or - ((g,l1,l2) in m.set_J_stt_us and - j in m.set_J_stt_us[(g,l1,l2)])) else 0) - for h in m.set_H_t[t] - ) + return m.var_v_glljqk[(g, l2, l1, j, q, k)] <= m.var_zeta_sns_glljqk[ + (g, l2, l1, j, q, k) + ] * ( + max( + m.param_f_amp_v_glljqk[(g, l1, l2, j, q, k)] + * m.param_v_amp_max_th[(t, h)] + - ( + m.param_w_new_glljhqk[(g, l1, l2, j, h, q, k)] + if ( + ( + (g, l1, l2) in m.set_J_stt_arr + and j in m.set_J_stt_arr[(g, l1, l2)] + ) + or ( + (g, l1, l2) in m.set_J_stt_us + and j in m.set_J_stt_us[(g, l1, l2)] + ) + ) + else 0 ) - ) + for h in m.set_H_t[t] + ) + ) + model.constr_no_flow_except_in_sns_group_rev = pyo.Constraint( - model.set_TGLLJ_und, # new undirected arcs in groups - model.set_QK, # once per time interval - rule=rule_constr_no_flow_except_in_sns_group_rev) - + model.set_TGLLJ_und, # new undirected arcs in groups + model.set_QK, # once per time interval + rule=rule_constr_no_flow_except_in_sns_group_rev, + ) + # ************************************************************************* - + # interface equations for arc groups - + def rule_constr_arc_group_interfaces(m, t): - return ( - m.var_xi_arc_inv_t[t] == - sum(m.var_delta_arc_inv_th[(t,h)] - for h in m.set_H_t[t]) - ) + return m.var_xi_arc_inv_t[t] == sum( + m.var_delta_arc_inv_th[(t, h)] for h in m.set_H_t[t] + ) + model.constr_arc_group_interfaces = pyo.Constraint( - model.set_T_int, - rule=rule_constr_arc_group_interfaces) - + model.set_T_int, rule=rule_constr_arc_group_interfaces + ) + # ************************************************************************* # ************************************************************************* - + # converters - + # ************************************************************************* # ************************************************************************* - + # input signal limits for dimensionable inputs - - def rule_constr_u_limit_dim(m,i,m_i,q,k): - + + def rule_constr_u_limit_dim(m, i, m_i, q, k): return ( - m.var_u_imqk[(i,m_i,q,k)] <= - m.var_u_amp_im[(i,m_i)]* - m.param_f_amp_u_imqk[(i,m_i,q,k)] - ) - + m.var_u_imqk[(i, m_i, q, k)] + <= m.var_u_amp_im[(i, m_i)] * m.param_f_amp_u_imqk[(i, m_i, q, k)] + ) + model.constr_u_limit_dim = pyo.Constraint( - model.set_IM_dim, - model.set_QK, - rule=rule_constr_u_limit_dim) - + model.set_IM_dim, model.set_QK, rule=rule_constr_u_limit_dim + ) + # nominal input amplitude limit for dimensionable inputs - - def rule_constr_u_amp_ub(m,i,m_i): - + + def rule_constr_u_amp_ub(m, i, m_i): return ( - m.var_u_amp_im[(i,m_i)] <= - m.var_cvt_inv_i[i]* - m.param_u_amp_max_im[(i,m_i)] - ) - - model.constr_u_amp_ub = pyo.Constraint( - model.set_IM_dim, - rule=rule_constr_u_amp_ub) - + m.var_u_amp_im[(i, m_i)] + <= m.var_cvt_inv_i[i] * m.param_u_amp_max_im[(i, m_i)] + ) + + model.constr_u_amp_ub = pyo.Constraint(model.set_IM_dim, rule=rule_constr_u_amp_ub) + # fixed upper limits - + def rule_constr_u_fix_limits(m, i, m_i, q, k): - # if we need to know the lim input signal (e.g., for the obj. func.) - + if i in m.set_I_new: - # new converter - - return ( - m.var_u_imqk[(i,m_i,q,k)] <= - m.param_u_ub_imqk[(i,m_i,q,k)]* - m.var_cvt_inv_i[i] - ) - - + return ( - m.var_u_imqk[(i,m_i,q,k)] <= m.var_cvt_inv_i[i] - ) - + m.var_u_imqk[(i, m_i, q, k)] + <= m.param_u_ub_imqk[(i, m_i, q, k)] * m.var_cvt_inv_i[i] + ) + + return m.var_u_imqk[(i, m_i, q, k)] <= m.var_cvt_inv_i[i] + else: - # pre-existing - - return ( - m.var_u_imqk[(i,m_i,q,k)] <= - m.param_u_ub_imqk[(i,m_i,q,k)] - ) - - model.constr_u_fix_limits = pyo.Constraint(model.set_IM_fix, - model.set_QK, - rule=rule_constr_u_fix_limits) - + + return m.var_u_imqk[(i, m_i, q, k)] <= m.param_u_ub_imqk[(i, m_i, q, k)] + + model.constr_u_fix_limits = pyo.Constraint( + model.set_IM_fix, model.set_QK, rule=rule_constr_u_fix_limits + ) + # input limits for binary inputs - + def rule_constr_u_bin_limits(m, i, m_i, q, k): - if i in m.set_I_new: - # binary variables - - return ( - m.var_u_imqk[(i,m_i,q,k)] <= m.var_cvt_inv_i[i] - ) - + + return m.var_u_imqk[(i, m_i, q, k)] <= m.var_cvt_inv_i[i] + else: - return pyo.Constraint.Skip - - model.constr_u_bin_limits = pyo.Constraint(model.set_IM_bin, - model.set_QK, - rule=rule_constr_u_bin_limits) - - # ************************************************************************* - - # outputs - + + model.constr_u_bin_limits = pyo.Constraint( + model.set_IM_bin, model.set_QK, rule=rule_constr_u_bin_limits + ) + + # ************************************************************************* + + # outputs + # output equations - - def rule_constr_output_equations(m, i, r, q, k): + + def rule_constr_output_equations(m, i, r, q, k): return ( - m.var_y_irqk[(i,r,k)] == - sum(m.param_c_eq_y_irnqk[(i,r,n_i,q,k)]*m.var_x_inqk[(i,n_i,q,k)] - for n_i in m.set_N[i]) - + - sum(m.param_d_eq_y_irmqk[(i,r,m_i,q,k)]*m.var_u_imqk[(i,m_i,q,k)] - for m_i in m.set_M[i]) - + - m.param_e_eq_y_irqk[(i,r,q,k)] + m.var_y_irqk[(i, r, k)] + == sum( + m.param_c_eq_y_irnqk[(i, r, n_i, q, k)] * m.var_x_inqk[(i, n_i, q, k)] + for n_i in m.set_N[i] ) + + sum( + m.param_d_eq_y_irmqk[(i, r, m_i, q, k)] * m.var_u_imqk[(i, m_i, q, k)] + for m_i in m.set_M[i] + ) + + m.param_e_eq_y_irqk[(i, r, q, k)] + ) + model.constr_output_equations = pyo.Constraint( - model.set_IR, - model.set_QK, - rule=rule_constr_output_equations) - + model.set_IR, model.set_QK, rule=rule_constr_output_equations + ) + # positive amplitude limit for output variables - + def rule_constr_y_vars_have_pos_amp_limits(m, i, r, q, k): - return ( - m.var_y_irqk[(i,r,q,k)] <= ( - m.var_y_amp_pos_ir[(i,r)]*m.param_f_amp_y_pos_irqk[(i,r,q,k)] - ) - ) + return m.var_y_irqk[(i, r, q, k)] <= ( + m.var_y_amp_pos_ir[(i, r)] * m.param_f_amp_y_pos_irqk[(i, r, q, k)] + ) + model.constr_y_vars_have_pos_amp_limits = pyo.Constraint( - model.set_IR_dim_pos, - model.set_QK, - rule=rule_constr_y_vars_have_pos_amp_limits) - + model.set_IR_dim_pos, model.set_QK, rule=rule_constr_y_vars_have_pos_amp_limits + ) + # negative amplitude limit for output variables - - def rule_constr_y_vars_have_neg_amp_limits(m, i, r, q, k): - return ( - m.var_y_irqk[(i,r,q,k)] >= ( - -m.var_y_amp_neg_ir[(i,r)]*m.param_f_amp_y_neg_irqk[(i,r,q,k)] - ) - ) + + def rule_constr_y_vars_have_neg_amp_limits(m, i, r, q, k): + return m.var_y_irqk[(i, r, q, k)] >= ( + -m.var_y_amp_neg_ir[(i, r)] * m.param_f_amp_y_neg_irqk[(i, r, q, k)] + ) + model.constr_y_vars_have_neg_amp_limits = pyo.Constraint( - model.set_IR_dim_neg, - model.set_QK, - rule=rule_constr_y_vars_have_neg_amp_limits) - + model.set_IR_dim_neg, model.set_QK, rule=rule_constr_y_vars_have_neg_amp_limits + ) + # positive amplitude limit must be zero unless the system is installed - - def rule_constr_y_amp_pos_zero_if_cvt_not_selected(m, i, r): - return ( - m.var_y_amp_pos_ir[(i,r)] <= ( - m.var_cvt_inv_i[i]*m.param_y_amp_pos_ir[(i,r)] - ) - ) + + def rule_constr_y_amp_pos_zero_if_cvt_not_selected(m, i, r): + return m.var_y_amp_pos_ir[(i, r)] <= ( + m.var_cvt_inv_i[i] * m.param_y_amp_pos_ir[(i, r)] + ) + model.constr_y_amp_pos_zero_if_cvt_not_newected = pyo.Constraint( - model.set_IR_dim_pos, - rule=rule_constr_y_amp_pos_zero_if_cvt_not_selected) - + model.set_IR_dim_pos, rule=rule_constr_y_amp_pos_zero_if_cvt_not_selected + ) + # negative amplitude limit must be zero unless the system is installed - + def rule_constr_y_amp_neg_zero_if_cvt_not_selected(m, i, r): - return ( - m.var_y_amp_neg_ir[(i,r)] <= ( - m.var_cvt_inv_i[i]*m.param_y_amp_neg_ir[(i,r)] - ) - ) + return m.var_y_amp_neg_ir[(i, r)] <= ( + m.var_cvt_inv_i[i] * m.param_y_amp_neg_ir[(i, r)] + ) + model.constr_y_amp_neg_zero_if_cvt_not_selected = pyo.Constraint( - model.set_IR_dim_neg, - rule=rule_constr_y_amp_neg_zero_if_cvt_not_selected) - + model.set_IR_dim_neg, rule=rule_constr_y_amp_neg_zero_if_cvt_not_selected + ) + # the positive and negative amplitudes must match - + def rule_constr_y_amp_pos_neg_match(m, i, r): - return ( - m.var_y_amp_pos_ir[(i,r)] == m.var_y_amp_neg_ir[(i,r)] - ) + return m.var_y_amp_pos_ir[(i, r)] == m.var_y_amp_neg_ir[(i, r)] + model.constr_y_amp_pos_neg_match = pyo.Constraint( - model.set_IR_dim_eq, - rule=rule_constr_y_amp_pos_neg_match) - + model.set_IR_dim_eq, rule=rule_constr_y_amp_pos_neg_match + ) + # ************************************************************************* - - # states - - def rule_constr_state_equations(m, i, n, q, k): + # states + + def rule_constr_state_equations(m, i, n, q, k): return ( - m.var_x_inqk[(i,n,q,k)] == - sum(m.param_a_eq_x_innqk[(i,n,n_star,q,k)]* - (m.var_x_inqk[(i,n_star,q,k-1)] if k != 0 else - m.param_x_inq0[(i,n,q)]) - for n_star in m.set_N[i]) - + - sum(m.param_b_eq_x_inmqk[(i,n,m_i,q,k)]*m.var_u_imqk[(i,m_i,q,k)] - for m_i in m.set_M[i]) - + - m.param_e_eq_x_inqk[(i,n,q,k)] + m.var_x_inqk[(i, n, q, k)] + == sum( + m.param_a_eq_x_innqk[(i, n, n_star, q, k)] + * ( + m.var_x_inqk[(i, n_star, q, k - 1)] + if k != 0 + else m.param_x_inq0[(i, n, q)] + ) + for n_star in m.set_N[i] ) - + + sum( + m.param_b_eq_x_inmqk[(i, n, m_i, q, k)] * m.var_u_imqk[(i, m_i, q, k)] + for m_i in m.set_M[i] + ) + + m.param_e_eq_x_inqk[(i, n, q, k)] + ) + model.constr_state_equations = pyo.Constraint( - model.set_IN, - model.set_QK, - rule=rule_constr_state_equations) - + model.set_IN, model.set_QK, rule=rule_constr_state_equations + ) + # positive amplitude limit for state variables - - def rule_constr_x_vars_have_pos_amp_limits(m, i, n, q, k): - return ( - m.var_x_inqk[(i,n,q,k)] <= ( - m.var_x_amp_pos_in[(i,n)]*m.param_f_amp_x_pos_inqk[(i,n,q,k)] - ) - ) + + def rule_constr_x_vars_have_pos_amp_limits(m, i, n, q, k): + return m.var_x_inqk[(i, n, q, k)] <= ( + m.var_x_amp_pos_in[(i, n)] * m.param_f_amp_x_pos_inqk[(i, n, q, k)] + ) + model.constr_x_vars_have_pos_amp_limits = pyo.Constraint( - model.set_IN_dim_pos, - model.set_QK, - rule=rule_constr_x_vars_have_pos_amp_limits) - + model.set_IN_dim_pos, model.set_QK, rule=rule_constr_x_vars_have_pos_amp_limits + ) + # negative amplitude limit for state variables - - def rule_constr_x_vars_have_neg_amp_limits(m, i, n, q, k): - return ( - m.var_x_inqk[(i,n,q,k)] >= ( - -m.var_y_amp_neg_in[(i,n)]*m.param_f_amp_x_neg_inqk[(i,n,q,k)] - ) - ) + + def rule_constr_x_vars_have_neg_amp_limits(m, i, n, q, k): + return m.var_x_inqk[(i, n, q, k)] >= ( + -m.var_y_amp_neg_in[(i, n)] * m.param_f_amp_x_neg_inqk[(i, n, q, k)] + ) + model.constr_x_vars_have_neg_amp_limits = pyo.Constraint( - model.set_IN_dim_neg, - model.set_QK, - rule=rule_constr_x_vars_have_neg_amp_limits) - + model.set_IN_dim_neg, model.set_QK, rule=rule_constr_x_vars_have_neg_amp_limits + ) + # positive amplitude limit must be zero unless the system is installed - - def rule_constr_x_amp_pos_zero_if_cvt_not_selected(m, i, n): - return ( - m.var_x_amp_pos_in[(i,n)] <= ( - m.var_cvt_inv_i[i]*m.param_x_amp_pos_in[(i,n)] - ) - ) + + def rule_constr_x_amp_pos_zero_if_cvt_not_selected(m, i, n): + return m.var_x_amp_pos_in[(i, n)] <= ( + m.var_cvt_inv_i[i] * m.param_x_amp_pos_in[(i, n)] + ) + model.constr_x_amp_pos_zero_if_cvt_not_selected = pyo.Constraint( - model.set_IN_dim_pos, - rule=rule_constr_x_amp_pos_zero_if_cvt_not_selected) - + model.set_IN_dim_pos, rule=rule_constr_x_amp_pos_zero_if_cvt_not_selected + ) + # negative amplitude limit must be zero unless the system is installed - + def rule_constr_x_amp_neg_zero_if_cvt_not_selected(m, i, n): - - return ( - m.var_x_amp_neg_in[(i,n)] <= ( - m.var_cvt_inv_i[i]*m.param_x_amp_neg_in[(i,n)] - ) - ) - + return m.var_x_amp_neg_in[(i, n)] <= ( + m.var_cvt_inv_i[i] * m.param_x_amp_neg_in[(i, n)] + ) + model.constr_x_amp_neg_zero_if_cvt_not_selected = pyo.Constraint( - model.set_IN_dim_neg, - rule=rule_constr_x_amp_neg_zero_if_cvt_not_selected) - + model.set_IN_dim_neg, rule=rule_constr_x_amp_neg_zero_if_cvt_not_selected + ) + # the positive and negative amplitudes must match - + def rule_constr_x_amp_pos_neg_match(m, i, n): - - return ( - m.var_x_amp_pos_in[(i,n)] == m.var_x_amp_neg_in[(i,n)] - ) - + return m.var_x_amp_pos_in[(i, n)] == m.var_x_amp_neg_in[(i, n)] + model.constr_x_amp_pos_neg_match = pyo.Constraint( - model.set_IN_dim_eq, - rule=rule_constr_x_amp_pos_neg_match) - + model.set_IN_dim_eq, rule=rule_constr_x_amp_pos_neg_match + ) + # ************************************************************************* # ************************************************************************* - + return model - + # ************************************************************************* # ************************************************************************* + +# ***************************************************************************** # ***************************************************************************** # ***************************************************************************** # ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/src/topupopt/problems/esipp/network.py b/src/topupopt/problems/esipp/network.py index e42b934..78c8160 100644 --- a/src/topupopt/problems/esipp/network.py +++ b/src/topupopt/problems/esipp/network.py @@ -22,20 +22,22 @@ from .resource import are_prices_time_invariant # ***************************************************************************** # ***************************************************************************** + class Arcs: """A class for arc technologies in a network.""" - - def __init__(self, - name: str, # human-readable name - efficiency: dict, # keys: (q, k) - efficiency_reverse: dict, # keys: (q, k) - static_loss: dict, # keys: (h, q, k) - capacity: tuple, # one value per option (h) - minimum_cost: tuple, # one value per option (h) - specific_capacity_cost: float, # one value - capacity_is_instantaneous: bool, # one value - validate: bool): # one number - + + def __init__( + self, + name: str, # human-readable name + efficiency: dict, # keys: (q, k) + efficiency_reverse: dict, # keys: (q, k) + static_loss: dict, # keys: (h, q, k) + capacity: tuple, # one value per option (h) + minimum_cost: tuple, # one value per option (h) + specific_capacity_cost: float, # one value + capacity_is_instantaneous: bool, # one value + validate: bool, + ): # one number # initialise self.name = name self.efficiency = efficiency @@ -45,101 +47,92 @@ class Arcs: self.minimum_cost = minimum_cost self.specific_capacity_cost = specific_capacity_cost self.capacity_is_instantaneous = capacity_is_instantaneous - + # results (for simulation or post-optimisation analysis) self.options_selected = [False for element in self.capacity] - + # validate if validate: Arcs.validate(self) - + # ************************************************************************* # ************************************************************************* - + def number_options(self): "Return the number of arc options." - + return len(self.minimum_cost) - + # ************************************************************************* # ************************************************************************* - - def is_isotropic(self, - reverse_none_means_isotropic: bool = True) -> bool: + + def is_isotropic(self, reverse_none_means_isotropic: bool = True) -> bool: """Returns True if the efficiency values do not change with sense.""" - + # both efficiency and efficiency_reverse are None - + if type(self.efficiency) == type(None): - # the efficiency for the nominal sense is None - # this means it is 1 every time and that the efficiency for the + # this means it is 1 every time and that the efficiency for the # reverse sense can only be 1 too (efficiency_reverse == None) - - return True # does not change with the flow sense - + + return True # does not change with the flow sense + elif type(self.efficiency_reverse) == type(None): - # the efficiency for the nominal sense is not None - # the efficiency for the reverse sense is None: - + # the efficiency for the reverse sense is None: + # two possible interpretations - + if reverse_none_means_isotropic: - # None means no information and therefore it is isotropic - + return True - - else: # None means that there are no proportional losses - + + else: # None means that there are no proportional losses # therefore, it is only isotropic if there are no proportional # losses for the nominal flow sense too - - return ( - tuple(self.efficiency.values()) == - tuple(1 for _ in range(len(self.efficiency))) - ) - + + return tuple(self.efficiency.values()) == tuple( + 1 for _ in range(len(self.efficiency)) + ) + else: - # neither efficiency nor efficiency_reverse are None: all values # must match in either sense for it to be isotropic - + for key, value in self.efficiency.items(): - if self.efficiency[key] != self.efficiency_reverse[key]: - # one pair does not match: not isotropic - + return False - + # all pairs match: it is istropic (they must have the same size) - + return True - + # ************************************************************************* # ************************************************************************* - + def has_been_selected(self) -> bool: """Returns True if an option has been selected and False otherwise.""" - + return True in self.options_selected - + # ************************************************************************* # ************************************************************************* - + def is_infinite_capacity(self) -> bool: """Returns True if there is one capacity and it is infinite.""" - + return len(self.capacity) == 1 and self.capacity == (inf,) - + # ************************************************************************* # ************************************************************************* - + def has_constant_efficiency(self) -> bool: """Returns True if the arc has a constant efficiency.""" - + if self.has_proportional_losses(): # proportional losses if self.is_isotropic(): @@ -150,514 +143,466 @@ class Arcs: else: # is isotropic but does not have constant eta = False return False - else: # is not isotropic - return False # not isotropic = efficiency can change + else: # is not isotropic + return False # not isotropic = efficiency can change else: # no proportional losses: always equal to 1 = has constant eta return True # ************************************************************************* # ************************************************************************* - + def has_proportional_losses(self) -> bool: """Returns False if the efficiency is always one, otherwise True.""" - + if type(self.efficiency) == type(None): - # None means the efficiency is always one (default) - - return False # no proportional losses - - else: # efficiency is not None, reverse_efficiency may or not be too - + + return False # no proportional losses + + else: # efficiency is not None, reverse_efficiency may or not be too if type(self.efficiency_reverse) == type(None): - # Return True if at least one efficiency value is not one - - return ( - tuple(self.efficiency.values()) != - tuple(1 for _ in range(len(self.efficiency))) - ) - + + return tuple(self.efficiency.values()) != tuple( + 1 for _ in range(len(self.efficiency)) + ) + else: - # Return True if at least one efficiency value is not one # Return False if all efficiency values are one - - if (tuple(self.efficiency.values()) != - tuple(1 for _ in range(len(self.efficiency)))): - + + if tuple(self.efficiency.values()) != tuple( + 1 for _ in range(len(self.efficiency)) + ): # the efficiency values are different than one: False - + return True - - else: # the efficiency values are one - + + else: # the efficiency values are one # if the reverse efficiency values are not one, return True - - return ( - tuple(self.efficiency_reverse.values()) != - tuple(1 for _ in range(len(self.efficiency_reverse))) - ) - + + return tuple(self.efficiency_reverse.values()) != tuple( + 1 for _ in range(len(self.efficiency_reverse)) + ) + # ************************************************************************* # ************************************************************************* - + def has_static_losses(self) -> bool: """Returns False if the static losses are always zero, otherwise True.""" - + if type(self.static_loss) == type(None): - # None means the static losses are always zero (default) - - return False # no static losses - + + return False # no static losses + else: - # Return True if at least one static loss value is not zero - - return ( - tuple(self.static_loss.values()) != - tuple(0 for _ in range(len(self.static_loss))) - ) - + + return tuple(self.static_loss.values()) != tuple( + 0 for _ in range(len(self.static_loss)) + ) + # ************************************************************************* # ************************************************************************* - - def validate_sizes(self, - number_options: int, - number_scenarios: int, - number_intervals: list): - + + def validate_sizes( + self, number_options: int, number_scenarios: int, number_intervals: list + ): # min cost and capacity: one per option - - if (len(self.minimum_cost) != number_options or - len(self.capacity) != number_options): - + + if ( + len(self.minimum_cost) != number_options + or len(self.capacity) != number_options + ): raise ValueError( - 'The minimum cost or the capacity values are inconsistent with' - +' the number of options.' - ) - + "The minimum cost or the capacity values are inconsistent with" + + " the number of options." + ) + # efficiency: scenario and interval - + if type(self.efficiency) != type(None): - if len(self.efficiency) != sum(number_intervals): - raise ValueError( - 'The efficiency values are inconsistent with the number of' - +' scenarios and intervals.' - ) - + "The efficiency values are inconsistent with the number of" + + " scenarios and intervals." + ) + # reverse efficiency: scenario and interval - + if type(self.efficiency_reverse) != type(None): - if len(self.efficiency_reverse) != sum(number_intervals): - raise ValueError( - 'The reverse efficiency values are inconsistent with the '+ - 'number of scenarios and intervals.' - ) - + "The reverse efficiency values are inconsistent with the " + + "number of scenarios and intervals." + ) + # static loss: option, scenario and interval - + if type(self.static_loss) != type(None): - - if len(self.static_loss) != sum(number_intervals)*number_options: - + if len(self.static_loss) != sum(number_intervals) * number_options: raise ValueError( - 'The static loss values are inconsistent with the number '+ - 'of options, scenarios and intervals.' - ) + "The static loss values are inconsistent with the number " + + "of options, scenarios and intervals." + ) # ************************************************************************* # ************************************************************************* - + def validate(self): - # ********************************************************************* - + # check the types - + # the name should be hashable - + try: - assert type(hash(self.name)) == int - + except TypeError: - # not hashable - - raise TypeError('The name attribute is not hashable.') - + + raise TypeError("The name attribute is not hashable.") + # efficiency - - if (type(self.efficiency) != dict and - type(self.efficiency) != type(None)): - + + if type(self.efficiency) != dict and type(self.efficiency) != type(None): # efficiency is not a dict nor a None - - raise TypeError( - 'The efficiency should be given as a dict or None.') - + + raise TypeError("The efficiency should be given as a dict or None.") + elif type(self.efficiency) == type(None): - # efficiency is a None: the reverse efficiency must be too - + if type(self.efficiency_reverse) != type(None): - # must be None if normal efficiency is None - + raise TypeError( - 'The reverse efficiency has to match the nominal'+ - ' one when there are no proportional losses.' - ) - + "The reverse efficiency has to match the nominal" + + " one when there are no proportional losses." + ) + else: - # efficiency is dict: the reverse eff. has to be a None or dict - + # must be given as dict or None - - if (type(self.efficiency_reverse) != dict and - type(self.efficiency_reverse) != type(None)): - + + if type(self.efficiency_reverse) != dict and type( + self.efficiency_reverse + ) != type(None): raise TypeError( - 'The reverse efficiency should be given as a dict or None.' - ) - + "The reverse efficiency should be given as a dict or None." + ) + # static loss - - if (type(self.static_loss) != dict and - type(self.static_loss) != type(None)): - - raise TypeError( - 'The static losses should be given as a dict or None.') - + + if type(self.static_loss) != dict and type(self.static_loss) != type(None): + raise TypeError("The static losses should be given as a dict or None.") + # capacity - + if type(self.capacity) != tuple: - - raise TypeError( - 'The capacity should be given as a tuple.') - + raise TypeError("The capacity should be given as a tuple.") + # minimum cost - - if (type(self.minimum_cost) != tuple): - - raise TypeError( - 'The minimum cost values should be given as a tuple.') - + + if type(self.minimum_cost) != tuple: + raise TypeError("The minimum cost values should be given as a tuple.") + # specific capacity cost - + if not isinstance(self.specific_capacity_cost, Real): - raise TypeError( - 'The specific capacity cost was not given as a numeric type.') - + "The specific capacity cost was not given as a numeric type." + ) + # capacity_is_instantaneous - + if type(self.capacity_is_instantaneous) != bool: - raise TypeError( - 'The information about capacities being instantaneous or not'+ - ' should be given as a boolean variable.') - + "The information about capacities being instantaneous or not" + + " should be given as a boolean variable." + ) + # ********************************************************************* - + # the number of techs. in capacity and minimum_cost should be the same - + if len(self.capacity) != len(self.minimum_cost): - raise ValueError( - 'The number of capacity and minimum cost entries must match.') - + "The number of capacity and minimum cost entries must match." + ) + # there should be at least one option - + if len(self.capacity) == 0: - raise ValueError( - 'No capacity and minimum cost values were provided. At le' - +'ast one option should be provided.') - + "No capacity and minimum cost values were provided. At le" + + "ast one option should be provided." + ) + # ********************************************************************* - + # if efficiency is not None (i.e., the arc has proportional losses) if type(self.efficiency) != type(None): - # efficiency is a dict - + if len(self.efficiency) == 0: - raise ValueError( - 'No efficiency values were provided. There should be one'+ - ' value per scenario and time interval.') - + "No efficiency values were provided. There should be one" + + " value per scenario and time interval." + ) + for key, value in self.efficiency.items(): - if type(key) != tuple: - raise TypeError( - 'The efficiency dict keys must be (scenario, interval)' - +' tuples.') - + "The efficiency dict keys must be (scenario, interval)" + + " tuples." + ) + if len(key) != 2: - raise ValueError( - 'The efficiency dict keys must be tuples of size 2.') - + "The efficiency dict keys must be tuples of size 2." + ) + if not isinstance(value, Real): - raise TypeError( - 'Efficiency values must be provided as numeric types.') - + "Efficiency values must be provided as numeric types." + ) + if value <= 0: - - raise ValueError( - 'Efficiency values must be positive.') - + raise ValueError("Efficiency values must be positive.") + # check if the reverse efficiency is not None - + if type(self.efficiency_reverse) != type(None): - # it is not None, check the reverse efficiency too - + if len(self.efficiency_reverse) == 0: - raise ValueError( - 'No efficiency values were provided. There should be '+ - 'one value per scenario and time interval.') - + "No efficiency values were provided. There should be " + + "one value per scenario and time interval." + ) + for key, value in self.efficiency_reverse.items(): - if key not in self.efficiency.keys(): - raise ValueError( - 'The keys for the efficiency dicts do not match.' - ) - + "The keys for the efficiency dicts do not match." + ) + # if so, the key is a tuple of size 2 (checked before) - + # if type(key) != tuple: - + # raise TypeError( # 'The efficiency dict keys must be (scenario, '+ # 'interval) tuples.') - + # if len(key) != 2: - + # raise ValueError( - # 'The efficiency dict keys must be tuples of '+ + # 'The efficiency dict keys must be tuples of '+ # 'size 2.') - + if not isinstance(value, Real): - raise TypeError( - 'Efficiency values must be provided as numeric '+ - 'types.') - + "Efficiency values must be provided as numeric " + "types." + ) + if value <= 0: - - raise ValueError( - 'Efficiency values must be positive.') - + raise ValueError("Efficiency values must be positive.") + # if static loss is not None (i.e., the arc has static losses) - + if type(self.static_loss) != type(None): - # static loss is a dict - + if len(self.static_loss) == 0: - raise ValueError( - 'No static loss values were provided. There should be one'+ - ' value per option, scenario and time interval.') - + "No static loss values were provided. There should be one" + + " value per option, scenario and time interval." + ) + for key, value in self.static_loss.items(): - if type(key) != tuple: - raise TypeError( - 'The static loss dict keys must be (option, scenario,'+ - ' interval) tuples.' - ) - + "The static loss dict keys must be (option, scenario," + + " interval) tuples." + ) + if len(key) != 3: - raise ValueError( - 'The static loss dict keys must be tuples of size 3.') - + "The static loss dict keys must be tuples of size 3." + ) + if not isinstance(value, Real): - raise TypeError( - 'Static loss values must be provided as numeric types.' - ) - + "Static loss values must be provided as numeric types." + ) + if value < 0: - - raise ValueError( - 'Static loss values cannot be negative.' - ) - + raise ValueError("Static loss values cannot be negative.") + # capacity is a tuple - + for element in self.capacity: - if not isinstance(element, Real): - - raise TypeError( - 'Capacity values must be provided as numeric types.') - + raise TypeError("Capacity values must be provided as numeric types.") + if element <= 0: - - raise ValueError( - 'Capacity values must be positive.') - + raise ValueError("Capacity values must be positive.") + # minimum_cost is a tuple - + for element in self.minimum_cost: - if not isinstance(element, Real): - raise TypeError( - 'Minimum cost values must be provided as numeric types.') - + "Minimum cost values must be provided as numeric types." + ) + if element < 0: - - raise ValueError( - 'Minimum cost values must be positive or zero.') - + raise ValueError("Minimum cost values must be positive or zero.") + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* + # ***************************************************************************** # ***************************************************************************** + class ArcsWithoutProportionalLosses(Arcs): """A class for arc technologies without proportional losses.""" - - def __init__(self, - name: str, # human-readable name - static_loss: dict, # keys: (h, q, k) - capacity: tuple, # one value per option (h) - minimum_cost: tuple, # one value per option (h) - specific_capacity_cost: float, # one value - capacity_is_instantaneous: bool, # one value - validate: bool = False): # one number - + + def __init__( + self, + name: str, # human-readable name + static_loss: dict, # keys: (h, q, k) + capacity: tuple, # one value per option (h) + minimum_cost: tuple, # one value per option (h) + specific_capacity_cost: float, # one value + capacity_is_instantaneous: bool, # one value + validate: bool = False, + ): # one number Arcs.__init__( - self, - name=name, - efficiency=None, + self, + name=name, + efficiency=None, efficiency_reverse=None, - static_loss=static_loss, - capacity=capacity, - minimum_cost=minimum_cost, - specific_capacity_cost=specific_capacity_cost, + static_loss=static_loss, + capacity=capacity, + minimum_cost=minimum_cost, + specific_capacity_cost=specific_capacity_cost, capacity_is_instantaneous=capacity_is_instantaneous, - validate=validate - ) + validate=validate, + ) + # ***************************************************************************** # ***************************************************************************** + class ArcsWithoutStaticLosses(Arcs): """A class for arc technologies without static losses.""" - - def __init__(self, - name: str, # human-readable name - efficiency: dict, # keys: (q, k) - efficiency_reverse: dict, # keys: (q, k) - capacity: tuple, # one value per option (h) - minimum_cost: tuple, # one value per option (h) - specific_capacity_cost: float, # one value - capacity_is_instantaneous: bool, # one value - validate: bool = False): # one number - + + def __init__( + self, + name: str, # human-readable name + efficiency: dict, # keys: (q, k) + efficiency_reverse: dict, # keys: (q, k) + capacity: tuple, # one value per option (h) + minimum_cost: tuple, # one value per option (h) + specific_capacity_cost: float, # one value + capacity_is_instantaneous: bool, # one value + validate: bool = False, + ): # one number Arcs.__init__( - self, - name=name, - efficiency=efficiency, + self, + name=name, + efficiency=efficiency, efficiency_reverse=efficiency_reverse, - static_loss=None, - capacity=capacity, - minimum_cost=minimum_cost, - specific_capacity_cost=specific_capacity_cost, + static_loss=None, + capacity=capacity, + minimum_cost=minimum_cost, + specific_capacity_cost=specific_capacity_cost, capacity_is_instantaneous=capacity_is_instantaneous, - validate=validate - ) - + validate=validate, + ) + + # ***************************************************************************** # ***************************************************************************** + class ArcsWithoutLosses(Arcs): """A class for arc technologies without losses.""" - - def __init__(self, - name: str, # human-readable name - capacity: tuple, # one value per option (h) - minimum_cost: tuple, # one value per option (h) - specific_capacity_cost: float, # one value - capacity_is_instantaneous: bool, # one value - validate: bool = False): # one number - + + def __init__( + self, + name: str, # human-readable name + capacity: tuple, # one value per option (h) + minimum_cost: tuple, # one value per option (h) + specific_capacity_cost: float, # one value + capacity_is_instantaneous: bool, # one value + validate: bool = False, + ): # one number Arcs.__init__( - self, - name=name, - efficiency=None, + self, + name=name, + efficiency=None, efficiency_reverse=None, - static_loss=None, - capacity=capacity, - minimum_cost=minimum_cost, - specific_capacity_cost=specific_capacity_cost, + static_loss=None, + capacity=capacity, + minimum_cost=minimum_cost, + specific_capacity_cost=specific_capacity_cost, capacity_is_instantaneous=capacity_is_instantaneous, - validate=validate - ) + validate=validate, + ) + # ***************************************************************************** # ***************************************************************************** - + + class Network(nx.MultiDiGraph): - # directed arcs represent streets # the nodes are the start and end points of those streets - - - KEY_NODE_TYPE = 'node_type' - KEY_NODE_TYPE_IMP = 'imp' - KEY_NODE_TYPE_EXP = 'exp' - KEY_NODE_TYPE_SOURCE_SINK = 'source_sink' - KEY_NODE_TYPE_WAY = 'way' - KEY_NODE_TYPES = [KEY_NODE_TYPE_IMP, - KEY_NODE_TYPE_EXP, - KEY_NODE_TYPE_SOURCE_SINK, - KEY_NODE_TYPE_WAY] - KEY_NODE_PRICES = 'prices' - #KEY_NODE_PRICES = 'prices' - #KEY_NODE_VOLUMES = 'volumes' - KEY_NODE_BASE_FLOW = 'base_flow' - KEY_NODE_PRICES_TIME_INVARIANT = 'prices_invariant' - - - KEY_ARC_TECH = 'technology' - KEY_ARC_UND = 'undirected' - - KEY_ARC_TECH_EFFICIENCY = 'efficiency' - KEY_ARC_TECH_EFFICIENCY_REVERSE = 'efficiency_reverse' - KEY_ARC_TECH_CAPACITY = 'capacity' - KEY_ARC_TECH_MIN_COST = 'minimum_cost' - KEY_ARC_TECH_SPEC_CAP_COST = 'specific_capacity_cost' - KEY_ARC_TECH_OPTIONS_SELECTED = 'options_selected' - KEY_ARC_TECH_CAPACITY_INSTANTANEOUS = 'capacity_is_instantaneous' - KEY_ARC_TECH_STATIC_LOSS = 'static_loss' - + + KEY_NODE_TYPE = "node_type" + KEY_NODE_TYPE_IMP = "imp" + KEY_NODE_TYPE_EXP = "exp" + KEY_NODE_TYPE_SOURCE_SINK = "source_sink" + KEY_NODE_TYPE_WAY = "way" + KEY_NODE_TYPES = [ + KEY_NODE_TYPE_IMP, + KEY_NODE_TYPE_EXP, + KEY_NODE_TYPE_SOURCE_SINK, + KEY_NODE_TYPE_WAY, + ] + KEY_NODE_PRICES = "prices" + # KEY_NODE_PRICES = 'prices' + # KEY_NODE_VOLUMES = 'volumes' + KEY_NODE_BASE_FLOW = "base_flow" + KEY_NODE_PRICES_TIME_INVARIANT = "prices_invariant" + + KEY_ARC_TECH = "technology" + KEY_ARC_UND = "undirected" + + KEY_ARC_TECH_EFFICIENCY = "efficiency" + KEY_ARC_TECH_EFFICIENCY_REVERSE = "efficiency_reverse" + KEY_ARC_TECH_CAPACITY = "capacity" + KEY_ARC_TECH_MIN_COST = "minimum_cost" + KEY_ARC_TECH_SPEC_CAP_COST = "specific_capacity_cost" + KEY_ARC_TECH_OPTIONS_SELECTED = "options_selected" + KEY_ARC_TECH_CAPACITY_INSTANTANEOUS = "capacity_is_instantaneous" + KEY_ARC_TECH_STATIC_LOSS = "static_loss" + KEY_ARC_TECH_ATTR = ( KEY_ARC_TECH_EFFICIENCY, KEY_ARC_TECH_EFFICIENCY_REVERSE, @@ -667,110 +612,80 @@ class Network(nx.MultiDiGraph): KEY_ARC_TECH_OPTIONS_SELECTED, KEY_ARC_TECH_CAPACITY_INSTANTANEOUS, KEY_ARC_TECH_STATIC_LOSS, - ) - + ) + def __init__(self, incoming_graph_data=None, **attr): - # run base class init routine - - nx.MultiDiGraph.__init__(self, - incoming_graph_data=incoming_graph_data, - **attr) - + + nx.MultiDiGraph.__init__(self, incoming_graph_data=incoming_graph_data, **attr) + # identify node types - + self.identify_node_types() - + # declare variables for the nodes without directed arc limitations - + self.nodes_wo_in_dir_arc_limitations = [] - + self.nodes_wo_out_dir_arc_limitations = [] # ************************************************************************* # ************************************************************************* - + # add a new import node - - def add_import_node(self, - node_key, - prices: dict): - + + def add_import_node(self, node_key, prices: dict): node_dict = { self.KEY_NODE_TYPE: self.KEY_NODE_TYPE_IMP, self.KEY_NODE_PRICES: prices, - self.KEY_NODE_PRICES_TIME_INVARIANT: ( - are_prices_time_invariant(prices) - ) - } - - self.add_node( - node_key, - **node_dict) - + self.KEY_NODE_PRICES_TIME_INVARIANT: (are_prices_time_invariant(prices)), + } + + self.add_node(node_key, **node_dict) + # ************************************************************************* # ************************************************************************* - + # add a new export node - - def add_export_node(self, - node_key, - prices: dict): - + + def add_export_node(self, node_key, prices: dict): node_dict = { self.KEY_NODE_TYPE: self.KEY_NODE_TYPE_EXP, self.KEY_NODE_PRICES: prices, - self.KEY_NODE_PRICES_TIME_INVARIANT: ( - are_prices_time_invariant(prices) - ) - } - - self.add_node( - node_key, - **node_dict) - + self.KEY_NODE_PRICES_TIME_INVARIANT: (are_prices_time_invariant(prices)), + } + + self.add_node(node_key, **node_dict) + # ************************************************************************* # ************************************************************************* - + # add a new supply/demand node - - def add_source_sink_node(self, - node_key, - base_flow: dict): - + + def add_source_sink_node(self, node_key, base_flow: dict): node_dict = { self.KEY_NODE_TYPE: self.KEY_NODE_TYPE_SOURCE_SINK, - self.KEY_NODE_BASE_FLOW: base_flow - } - - self.add_node( - node_key, - **node_dict) - + self.KEY_NODE_BASE_FLOW: base_flow, + } + + self.add_node(node_key, **node_dict) + # ************************************************************************* # ************************************************************************* - + # add a new waypoint node - - def add_waypoint_node(self, - node_key): - - node_dict = { - self.KEY_NODE_TYPE: self.KEY_NODE_TYPE_WAY - } - - self.add_node( - node_key, - **node_dict) - + + def add_waypoint_node(self, node_key): + node_dict = {self.KEY_NODE_TYPE: self.KEY_NODE_TYPE_WAY} + + self.add_node(node_key, **node_dict) + # ************************************************************************* # ************************************************************************* - + # modify an existing network node - - def modify_network_node(self, - node_key, - node_data: dict): + + def modify_network_node(self, node_key, node_data: dict): """ Modifies a node in the network object. @@ -793,294 +708,256 @@ class Network(nx.MultiDiGraph): None. """ - + if self.has_node(node_key): - # check if there will be changes to the type of node - - if (self.KEY_NODE_TYPE in node_data and - self.KEY_NODE_TYPE in self.nodes[node_key]): - - if node_data[self.KEY_NODE_TYPE] != self.nodes[ - node_key][self.KEY_NODE_TYPE]: - + + if ( + self.KEY_NODE_TYPE in node_data + and self.KEY_NODE_TYPE in self.nodes[node_key] + ): + if ( + node_data[self.KEY_NODE_TYPE] + != self.nodes[node_key][self.KEY_NODE_TYPE] + ): # the node type changed: check if final node is imp./exp. - + # to export nodes - + if node_data[self.KEY_NODE_TYPE] == self.KEY_NODE_TYPE_EXP: - # export nodes cannot have outgoing arcs # check if there are outgoing arcs involving this node - + number_out_arcs = len( tuple( - arc_key for arc_key in self.edges(keys=True) - if arc_key[0] == node_key # is source - ) + arc_key + for arc_key in self.edges(keys=True) + if arc_key[0] == node_key # is source ) - + ) + if number_out_arcs > 0: - raise ValueError( - 'A node with outgoing arcs cannot be changed'+ - ' into an export node, since export nodes '+ - ' cannot have outgoing arcs.') - + "A node with outgoing arcs cannot be changed" + + " into an export node, since export nodes " + + " cannot have outgoing arcs." + ) + # to import nodes - + if node_data[self.KEY_NODE_TYPE] == self.KEY_NODE_TYPE_IMP: - # import nodes cannot have incoming arcs # check if there are incoming arcs involving this node - + number_in_arcs = len( tuple( - arc_key for arc_key in self.edges(keys=True) - if arc_key[1] == node_key # is destination - ) + arc_key + for arc_key in self.edges(keys=True) + if arc_key[1] == node_key # is destination ) - + ) + if number_in_arcs > 0: - raise ValueError( - 'A node with incoming arcs cannot be changed'+ - ' into an import node, since import nodes '+ - ' cannot have incoming arcs.') - + "A node with incoming arcs cannot be changed" + + " into an import node, since import nodes " + + " cannot have incoming arcs." + ) + # all good - - self.add_node(node_key, - **node_data) - + + self.add_node(node_key, **node_data) + else: - - raise ValueError('No such node was found.') - + raise ValueError("No such node was found.") + # ************************************************************************* # ************************************************************************* - + # identify importing nodes - + def identify_import_nodes(self): - self.import_nodes = tuple( node_key for node_key in self.nodes - if self.KEY_NODE_TYPE in self.nodes[node_key] - if (self.nodes[node_key][self.KEY_NODE_TYPE] - == self.KEY_NODE_TYPE_IMP) - ) - + if self.KEY_NODE_TYPE in self.nodes[node_key] + if (self.nodes[node_key][self.KEY_NODE_TYPE] == self.KEY_NODE_TYPE_IMP) + ) + # ************************************************************************* # ************************************************************************* - + # identify exporting nodes - + def identify_export_nodes(self): - self.export_nodes = tuple( node_key for node_key in self.nodes - if self.KEY_NODE_TYPE in self.nodes[node_key] - if (self.nodes[node_key][self.KEY_NODE_TYPE] - == self.KEY_NODE_TYPE_EXP) - ) - + if self.KEY_NODE_TYPE in self.nodes[node_key] + if (self.nodes[node_key][self.KEY_NODE_TYPE] == self.KEY_NODE_TYPE_EXP) + ) + # ************************************************************************* # ************************************************************************* - + # identify waypoint nodes - + def identify_waypoint_nodes(self): - self.waypoint_nodes = tuple( node_key for node_key in self.nodes - if self.KEY_NODE_TYPE in self.nodes[node_key] - if (self.nodes[node_key][self.KEY_NODE_TYPE] - == self.KEY_NODE_TYPE_WAY) - ) - + if self.KEY_NODE_TYPE in self.nodes[node_key] + if (self.nodes[node_key][self.KEY_NODE_TYPE] == self.KEY_NODE_TYPE_WAY) + ) + # ************************************************************************* # ************************************************************************* - + # identify source sink nodes - + def identify_source_sink_nodes(self): - self.source_sink_nodes = tuple( node_key for node_key in self.nodes - if self.KEY_NODE_TYPE in self.nodes[node_key] - if (self.nodes[node_key][self.KEY_NODE_TYPE] - == self.KEY_NODE_TYPE_SOURCE_SINK) + if self.KEY_NODE_TYPE in self.nodes[node_key] + if ( + self.nodes[node_key][self.KEY_NODE_TYPE] + == self.KEY_NODE_TYPE_SOURCE_SINK ) - + ) + # ************************************************************************* # ************************************************************************* - - # verify if everything is alright - + + # verify if everything is alright + def validate(self): - # check each arc - + for arc_key in self.edges(keys=True): - # directed arcs cannot start in an export node - + if arc_key[0] in self.export_nodes: - - raise ValueError( - 'Directed arcs cannot start in an export node.' - ) - + raise ValueError("Directed arcs cannot start in an export node.") + # directed arcs cannot end on an import node - + if arc_key[1] in self.import_nodes: - - raise ValueError( - 'Directed arcs cannot end in import node.' - ) - + raise ValueError("Directed arcs cannot end in import node.") + # import-export nodes cannot have static losses - - if (arc_key[0] in self.import_nodes and - arc_key[1] in self.export_nodes and - self.edges[arc_key][self.KEY_ARC_TECH].has_static_losses()): - + + if ( + arc_key[0] in self.import_nodes + and arc_key[1] in self.export_nodes + and self.edges[arc_key][self.KEY_ARC_TECH].has_static_losses() + ): raise ValueError( - 'Directed arcs between import and export nodes cannot have' - +' static losses.' - ) - + "Directed arcs between import and export nodes cannot have" + + " static losses." + ) + # undirected arcs cannot involve import nor export nodes - + if self.arc_is_undirected(arc_key): - - if (arc_key[0] in self.import_nodes or - arc_key[1] in self.export_nodes): - + if arc_key[0] in self.import_nodes or arc_key[1] in self.export_nodes: raise ValueError( - 'Undirected arcs cannot involve import nor '+ - 'export nodes.' - ) - + "Undirected arcs cannot involve import nor " + "export nodes." + ) + # ************************************************************************* # ************************************************************************* - + # identify node types - + def identify_node_types(self): "Identifies the node type for each node in the network objects." - + # identify import nodes - + self.identify_import_nodes() - + # identify export nodes - + self.identify_export_nodes() - + # identify source/sink nodes - + self.identify_source_sink_nodes() - + # identify waypoint nodes - + self.identify_waypoint_nodes() - + # validate - + self.validate() - + # ************************************************************************* # ************************************************************************* - - def add_directed_arc( - self, - node_key_a, - node_key_b, - arcs: Arcs): - + + def add_directed_arc(self, node_key_a, node_key_b, arcs: Arcs): # check if the arc ends in an import node - + if node_key_b in self.import_nodes: - - raise ValueError('Directed arcs cannot end in an import node.') - + raise ValueError("Directed arcs cannot end in an import node.") + # check if the arc starts in an export node - + if node_key_a in self.export_nodes: - - raise ValueError('Directed arcs cannot start in an export node.') - + raise ValueError("Directed arcs cannot start in an export node.") + # check the arc is between import and export nodes - - if (node_key_a in self.import_nodes and - node_key_b in self.export_nodes): - + + if node_key_a in self.import_nodes and node_key_b in self.export_nodes: # it is between import and export nodes - + # check if it involves static losses - + if arcs.has_static_losses(): - raise ValueError( - 'Arcs between import and export nodes cannot have static '+ - 'losses.' - ) - + "Arcs between import and export nodes cannot have static " + + "losses." + ) + # add a new arc - + return self.add_edge( - node_key_a, - node_key_b, - **{self.KEY_ARC_TECH: arcs, - self.KEY_ARC_UND: False} - ) + node_key_a, node_key_b, **{self.KEY_ARC_TECH: arcs, self.KEY_ARC_UND: False} + ) # ************************************************************************* # ************************************************************************* - - def add_undirected_arc( - self, - node_key_a, - node_key_b, - arcs: Arcs): - + + def add_undirected_arc(self, node_key_a, node_key_b, arcs: Arcs): # check if the arc links import or export nodes - - if (node_key_a in self.import_nodes or - node_key_a in self.export_nodes or - node_key_b in self.import_nodes or - node_key_b in self.export_nodes): - - raise ValueError( - 'Undirected arcs cannot involve import or export nodes.' - ) - + + if ( + node_key_a in self.import_nodes + or node_key_a in self.export_nodes + or node_key_b in self.import_nodes + or node_key_b in self.export_nodes + ): + raise ValueError("Undirected arcs cannot involve import or export nodes.") + # add a new arc (undirected arcs require unique keys) - + return self.add_edge( - node_key_a, + node_key_a, node_key_b, - key=self.get_pseudo_unique_arc_key(node_key_a, node_key_b), - **{self.KEY_ARC_TECH: arcs, - self.KEY_ARC_UND: True} - ) - + key=self.get_pseudo_unique_arc_key(node_key_a, node_key_b), + **{self.KEY_ARC_TECH: arcs, self.KEY_ARC_UND: True} + ) + # ************************************************************************* # ************************************************************************* - - def get_pseudo_unique_arc_key(self, - node_key_start, - node_key_end, - max_iterations: int = 10): + + def get_pseudo_unique_arc_key( + self, node_key_start, node_key_end, max_iterations: int = 10 + ): """ Generate a pseudo-unique arc key for a pair of nodes. - + They key cannot exist in either direction. If such a key cannot be fou- nd, an error will be raised. @@ -1098,59 +975,47 @@ class Network(nx.MultiDiGraph): hashable-type The pseudo-unique arc key. - """ + """ try: keys_ab = self._adj[node_key_start][node_key_end].keys() except KeyError: # no arcs in the ab direction - keys_ab = (), + keys_ab = ((),) try: keys_ba = self._adj[node_key_end][node_key_start].keys() except KeyError: # no arcs in the ba direction - keys_ba = (), - - # try to generate a unique key + keys_ba = ((),) + + # try to generate a unique key iteration = 0 - while iteration < max_iterations: - new_key = str(uuid.uuid4()) - if new_key not in keys_ab and new_key not in keys_ba: - return new_key - iteration += 1 - raise ValueError('No unique arc key could be produced.') - + while iteration < max_iterations: + new_key = str(uuid.uuid4()) + if new_key not in keys_ab and new_key not in keys_ba: + return new_key + iteration += 1 + raise ValueError("No unique arc key could be produced.") + # ************************************************************************* # ************************************************************************* - - def modify_network_arc(self, - node_key_a, - node_key_b, - arc_key_ab, - data_dict: dict): + + def modify_network_arc(self, node_key_a, node_key_b, arc_key_ab, data_dict: dict): # TODO: raise an error if the arc becomes inconsistent with the nodes it links and the data it has # a directed arc can only change to undirected if it does not involve import or export nodes - + # modify the arc - - return self.add_edge( - node_key_a, - node_key_b, - key=arc_key_ab, - **data_dict - ) - + + return self.add_edge(node_key_a, node_key_b, key=arc_key_ab, **data_dict) + # ************************************************************************* # ************************************************************************* - + def add_infinite_capacity_arc( - self, - node_key_a, - node_key_b, - efficiency: dict, - static_loss: dict): + self, node_key_a, node_key_b, efficiency: dict, static_loss: dict + ): """ Adds an infinite capacity arc, which must be directed and pre-existing. - + Pre-existing arcs are not subject to optimisation but constrain it. Parameters @@ -1160,7 +1025,7 @@ class Network(nx.MultiDiGraph): node_key_b : hashable-type The node key of the arc's end node. efficiency : dict - A dict containing the arc efficiency values for each interval and + A dict containing the arc efficiency values for each interval and scenario using (scenario, interval) tuples as keys. static_loss: dict A dict containing the static loss values for each option, interval @@ -1171,46 +1036,44 @@ class Network(nx.MultiDiGraph): None. """ - + # prepare the arc technology object - + arcs = Arcs( - name=str(node_key_a)+str(node_key_b), - efficiency=efficiency, - efficiency_reverse=None, # should be ignored downstream + name=str(node_key_a) + str(node_key_b), + efficiency=efficiency, + efficiency_reverse=None, # should be ignored downstream static_loss=static_loss, - capacity=[inf], # one capacity only, infinity - minimum_cost=[0], # pre-existing arcs have no costs - specific_capacity_cost=0, # no specific capacity costs + capacity=[inf], # one capacity only, infinity + minimum_cost=[0], # pre-existing arcs have no costs + specific_capacity_cost=0, # no specific capacity costs capacity_is_instantaneous=False, - validate=False) - + validate=False, + ) + # identify it as having been preselected - + arcs.options_selected[0] = True - + # add the arc - - return self.add_directed_arc( - node_key_a, - node_key_b, - arcs=arcs - ) - + + return self.add_directed_arc(node_key_a, node_key_b, arcs=arcs) + # ************************************************************************* # ************************************************************************* - + def add_preexisting_directed_arc( - self, - node_key_a, - node_key_b, - efficiency: dict, - static_loss: dict, - capacity: float or int, - capacity_is_instantaneous: bool): + self, + node_key_a, + node_key_b, + efficiency: dict, + static_loss: dict, + capacity: float or int, + capacity_is_instantaneous: bool, + ): """ Adds a pre-existing directed arc to the network. - + re-existing arcs are not subject to optimisation but constrain it. Parameters @@ -1220,7 +1083,7 @@ class Network(nx.MultiDiGraph): node_key_b : hashable-type The node key of the arc's end node. efficiency : dict - A dict containing the arc efficiency values for each interval and + A dict containing the arc efficiency values for each interval and scenario using (scenario, interval) tuples as keys. static_loss: dict A dict containing the static loss values for each option, interval @@ -1237,47 +1100,45 @@ class Network(nx.MultiDiGraph): None. """ - + # prepare the arc technology object - + arcs = Arcs( - name=str(node_key_a)+str(node_key_b), - efficiency=efficiency, - efficiency_reverse=None, # should be ignored downstream + name=str(node_key_a) + str(node_key_b), + efficiency=efficiency, + efficiency_reverse=None, # should be ignored downstream static_loss=static_loss, - capacity=[capacity], # one capacity only - minimum_cost=[0], # pre-existing arcs have no costs - specific_capacity_cost=0, # no specific capacity costs + capacity=[capacity], # one capacity only + minimum_cost=[0], # pre-existing arcs have no costs + specific_capacity_cost=0, # no specific capacity costs capacity_is_instantaneous=capacity_is_instantaneous, - validate=False) - + validate=False, + ) + # identify it as having been preselected - + arcs.options_selected[0] = True - + # add the arc - - return self.add_directed_arc( - node_key_a, - node_key_b, - arcs=arcs - ) - + + return self.add_directed_arc(node_key_a, node_key_b, arcs=arcs) + # ************************************************************************* # ************************************************************************* - + def add_preexisting_undirected_arc( - self, - node_key_a, - node_key_b, - efficiency: dict, - efficiency_reverse: dict, - static_loss: dict, - capacity: float or int, - capacity_is_instantaneous: bool): + self, + node_key_a, + node_key_b, + efficiency: dict, + efficiency_reverse: dict, + static_loss: dict, + capacity: float or int, + capacity_is_instantaneous: bool, + ): """ Adds a pre-existing undirected arc. - + Pre-existing arcs are not subject to optimisation but constrain it. The difference with regard to directed arcs is that undirected arcs cannot have an infinite capacity. @@ -1289,10 +1150,10 @@ class Network(nx.MultiDiGraph): node_key_b : hashable-type The node key of the arc's end node. efficiency : dict - A dict containing the arc efficiency values for each interval and + A dict containing the arc efficiency values for each interval and scenario using (scenario, interval) tuples as keys. efficiency_reverse : dict - A dict containing the reverse arc efficiency values for each + A dict containing the reverse arc efficiency values for each interval and scenario using (scenario, interval) tuples as keys. static_loss: dict A dict containing the static loss values for each option, interval @@ -1308,61 +1169,57 @@ class Network(nx.MultiDiGraph): None. """ - + # prepare the arc technology object - + arcs = Arcs( - name=str(node_key_a)+str(node_key_b), - efficiency=efficiency, + name=str(node_key_a) + str(node_key_b), + efficiency=efficiency, efficiency_reverse=efficiency_reverse, static_loss=static_loss, - capacity=[capacity], - minimum_cost=[0], - specific_capacity_cost=0, + capacity=[capacity], + minimum_cost=[0], + specific_capacity_cost=0, capacity_is_instantaneous=False, - validate=False) - + validate=False, + ) + # identify it as having been preselected - + arcs.options_selected = [True] - + # add an undirected arc - + return self.add_undirected_arc( - node_key_a=node_key_a, - node_key_b=node_key_b, - arcs=arcs) - + node_key_a=node_key_a, node_key_b=node_key_b, arcs=arcs + ) + # ************************************************************************* # ************************************************************************* - + def arc_is_undirected(self, arc_key) -> bool: "Returns True if the arc is undirected and False otherwise." - + if self.has_edge(*arc_key): - if self.KEY_ARC_UND in self.edges[arc_key]: - # arc has undirected attribute - + return self.edges[arc_key][self.KEY_ARC_UND] - - else: # arc has no undirected attribute - + + else: # arc has no undirected attribute return False - - else: # arc does not exist - + + else: # arc does not exist return False - + # ************************************************************************* # ************************************************************************* - + def has_tree_topology(self) -> bool: """ Returns True if the network has a tree topology and False otherwise. It only considers arcs that are selected and connected nodes. - + It relies on networkx.is_tree method. Returns @@ -1371,34 +1228,35 @@ class Network(nx.MultiDiGraph): If True, the network has a tree topology. If False, it does not. """ - + # obtain network view - + network_view = self.copy(as_view=False) - + # remove arcs that were not selected - + network_view.remove_edges_from( - [arc_key - for arc_key in self.edges(keys=True) - if True not in self.edges[arc_key]['technology'].options_selected] - ) + [ + arc_key + for arc_key in self.edges(keys=True) + if True not in self.edges[arc_key]["technology"].options_selected + ] + ) # remove unconnected nodes - + network_view.remove_nodes_from(find_unconnected_nodes(network_view)) - + if network_view.number_of_nodes() == 0: - # trivial solution - + return False - + else: - # use networkx method for tree detection - + return nx.is_tree(network_view) + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/src/topupopt/problems/esipp/problem.py b/src/topupopt/problems/esipp/problem.py index 92d08ca..5f013ff 100644 --- a/src/topupopt/problems/esipp/problem.py +++ b/src/topupopt/problems/esipp/problem.py @@ -15,19 +15,20 @@ from ...data.finance.invest import discount_factor from .network import Network, Arcs from .system import EnergySystem from .resource import ResourcePrice - + # ***************************************************************************** # ***************************************************************************** # TODO: allow users to define how fixed components in the objective function # are handled (using a variable equal to one or by excluding them altogether) + class InfrastructurePlanningProblem(EnergySystem): """A class for optimisation of infrastructure planning problems.""" - + # ************************************************************************* # ************************************************************************* - + SOS1_ARC_WEIGHTS_NONE = None SOS1_ARC_WEIGHTS_COST = 1 SOS1_ARC_WEIGHTS_CAP = 2 @@ -38,465 +39,428 @@ class InfrastructurePlanningProblem(EnergySystem): SOS1_ARC_WEIGHTS_COST, SOS1_ARC_WEIGHTS_CAP, SOS1_ARC_WEIGHTS_SPEC_COST, - SOS1_ARC_WEIGHTS_SPEC_CAP - ) - + SOS1_ARC_WEIGHTS_SPEC_CAP, + ) + SOS1_SENSE_OFFSET = 1 SOS1_SENSE_WEIGHT_NOMINAL_HIGHER = 1 SOS1_SENSE_WEIGHT_REVERSE_HIGHER = 2 SOS1_SENSE_WEIGHTS = [ SOS1_SENSE_WEIGHT_NOMINAL_HIGHER, - SOS1_SENSE_WEIGHT_REVERSE_HIGHER - ] - + SOS1_SENSE_WEIGHT_REVERSE_HIGHER, + ] + STATIC_LOSS_MODE_DEP = 1 STATIC_LOSS_MODE_ARR = 2 STATIC_LOSS_MODE_US = 3 STATIC_LOSS_MODE_DS = 4 - + STATIC_LOSS_MODES = ( STATIC_LOSS_MODE_DEP, STATIC_LOSS_MODE_ARR, STATIC_LOSS_MODE_US, - STATIC_LOSS_MODE_DS - ) - + STATIC_LOSS_MODE_DS, + ) + # ************************************************************************* # ************************************************************************* - - def __init__(self, - name: str, - discount_rates: dict, # key: assessment; value: list - reporting_periods: dict, # key: assessment; value: periods - time_intervals: dict, # key: assessment; value: intervals - time_weights: dict = None, # key: assessment, period, interval; value: weight - normalised_time_interval_duration: dict = None, - assessment_weights: dict = None, - networks: dict = None, - converters: dict = None, - prepare_model: bool = True, - validate_inputs: bool = True): # TODO: switch to False when everything is more mature - + + def __init__( + self, + name: str, + discount_rates: dict, # key: assessment; value: list + reporting_periods: dict, # key: assessment; value: periods + time_intervals: dict, # key: assessment; value: intervals + time_weights: dict = None, # key: assessment, period, interval; value: weight + normalised_time_interval_duration: dict = None, + assessment_weights: dict = None, + networks: dict = None, + converters: dict = None, + prepare_model: bool = True, + validate_inputs: bool = True, + ): # TODO: switch to False when everything is more mature # ********************************************************************* - - if validate_inputs: - + + if validate_inputs: # validate the inputs - - (self.assessment_keys, - self.number_assessments, - self.number_reporting_periods, - self.number_time_intervals) = self._validate_inputs( - discount_rates=discount_rates, - reporting_periods=reporting_periods, - time_intervals=time_intervals, - time_weights=time_weights, - normalised_time_interval_duration=( - normalised_time_interval_duration - ), - assessment_weights=assessment_weights - ) - - else: # skip validation - + + ( + self.assessment_keys, + self.number_assessments, + self.number_reporting_periods, + self.number_time_intervals, + ) = self._validate_inputs( + discount_rates=discount_rates, + reporting_periods=reporting_periods, + time_intervals=time_intervals, + time_weights=time_weights, + normalised_time_interval_duration=(normalised_time_interval_duration), + assessment_weights=assessment_weights, + ) + + else: # skip validation self.assessment_keys = tuple(discount_rates.keys()) - + self.number_assessments = len(self.assessment_keys) - + self.number_reporting_periods = { - q: len(reporting_periods[q]) - for q in self.assessment_keys - } - + q: len(reporting_periods[q]) for q in self.assessment_keys + } + self.number_time_intervals = { - q: len(time_intervals[q]) - for q in self.assessment_keys - } - + q: len(time_intervals[q]) for q in self.assessment_keys + } + # initialise - + self.discount_rates = dict(discount_rates) - + self.reporting_periods = dict(reporting_periods) - + self.time_intervals = dict(time_intervals) - + self.average_time_interval = { - q: mean(self.time_intervals[q]) - for q in self.assessment_keys - } - + q: mean(self.time_intervals[q]) for q in self.assessment_keys + } + self.normalised_time_interval_duration = { - (q,k): duration/self.average_time_interval[q] + (q, k): duration / self.average_time_interval[q] for q in self.assessment_keys for k, duration in enumerate(self.time_intervals[q]) - } - + } + # relation between reporting periods and time intervals - + if type(time_weights) != dict: - - self.time_weights = None # default values will be used - + self.time_weights = None # default values will be used + # self.time_weights = { # (q,p,k): 1 # for q in self.assessment_keys # for p in self.reporting_periods[q] # for k in self.time_intervals[q] # } - - else: - + + else: # TODO: validate - + # non-default values - + self.time_weights = dict(time_weights) - + # weight of each assessment - + if type(assessment_weights) != dict: - self.assessment_weights = None - + else: - # TODO: validate - + self.assessment_weights = dict(assessment_weights) - + # ********************************************************************* - + # set the name - + self.name = name - + # identify the type of problem - + # TODO: develop method to automatically identify the type of problem - + self.optimisation_problem_type = SolverInterface.PROBLEM_MILP - + # ********************************************************************* - + # initialise dynamic systems and networks objects - - EnergySystem.__init__( - self, - networks=networks, - converters=converters - ) - + + EnergySystem.__init__(self, networks=networks, converters=converters) + # ********************************************************************* - + # modelling options - + # dict to indicate which arcs should be decided upon using SOS1 # keys: (g,u,v,j) tuples for selectable arcs using sos # values: sos weights for each arc option # default outcome (by leaving it empty): do not use them/False - + self.use_sos1_arc_inv = {} - + # list to flag if real arc invest. variables should be used if possible # elements: (g,u,v,j) tuples for selectable arcs using sos # default outcome (by leaving it empty): do not use them/False - + self.use_real_arc_inv_variables_if_possible = [] - + # dict to indicate which arcs should have their sense decided via SOS1 # keys: (g,u,v,j) tuples for undirected arcs using sos # values: sos weights for each arc option # default outcome (by leaving it empty): do not use them/False - + self.use_sos1_flow_sense = {} - + # list to flag if real flow sense variables should be used if possible # elements: (g,u,v,j) tuples for undirected arcs # default outcome (by leaving it empty): do not use them/False - + self.use_real_sense_variables_if_possible = [] - + # ... to indicate which arcs are to be modelled using interface variab. # elements: (g,u,v,j) tuples for selectable arcs using sos # default outcome (by leaving it empty): do not use them/False - + self.use_arc_interface_variables = [] - + # ********************************************************************* - + # groups - + # dict for groups of arcs # keys: arc group key - # content: GLLJ tuples for the groups + # content: GLLJ tuples for the groups # default outcome (by leaving it empty): no groups - + self.arc_groups = {} - + # number of options for each group - + self.groups_number_options = {} - + # mandatory groups - + self.groups_mdt = {} - + # groups using interfaces - + self.groups_int = {} - + # groups using sos for arc selection - + self.groups_arc_sos1 = {} - + # groups using nnr for arc selection - + self.groups_arc_nnr = {} - + # ********************************************************************* - + # static losses - + # dict to list the arcs whose static losses are placed in the start no. - + self.static_losses_departure_node = {} - + # dict to list the arcs whose static losses are placed in the end node - + self.static_losses_arrival_node = {} - + # dict to list the arcs whose static losses are placed upstream - + self.static_losses_upstream = {} - + # dict to list the arcs whose static losses are placed downstream - + self.static_losses_downstream = {} - + # ********************************************************************* - + if prepare_model: - self.prepare() # ************************************************************************* # ************************************************************************* - + def _validate_inputs( - self, - discount_rates: dict, # key: assessment; value: list - reporting_periods: dict, # key: assessment; value: periods - time_intervals: dict, # key: assessment; value: intervals - time_weights: dict, # key: assessment, period, interval; value: weight - normalised_time_interval_duration: dict, # key: assessment, interval; value: ratio - assessment_weights: dict, # key: assessment; value: weight - **kwargs) -> tuple: - + self, + discount_rates: dict, # key: assessment; value: list + reporting_periods: dict, # key: assessment; value: periods + time_intervals: dict, # key: assessment; value: intervals + time_weights: dict, # key: assessment, period, interval; value: weight + normalised_time_interval_duration: dict, # key: assessment, interval; value: ratio + assessment_weights: dict, # key: assessment; value: weight + **kwargs + ) -> tuple: # types - + if type(discount_rates) != dict: - raise TypeError( - 'Discount rates must be provided as a dictionary '+ - 'whose keys represent assessments.' - ) - + "Discount rates must be provided as a dictionary " + + "whose keys represent assessments." + ) + if type(reporting_periods) != dict: - raise TypeError( - 'Reporting periods must be provided as a dictionary '+ - 'whose keys represent assessments.' - ) - + "Reporting periods must be provided as a dictionary " + + "whose keys represent assessments." + ) + if type(time_intervals) != dict: - raise TypeError( - 'Time intervals must be provided as a dictionary '+ - 'whose keys represent assessments.' - ) - + "Time intervals must be provided as a dictionary " + + "whose keys represent assessments." + ) + # sizes - + assessment_keys = tuple(discount_rates.keys()) - + number_assessments = len(assessment_keys) - + if number_assessments == 0: - raise ValueError( - 'The problem only makes sense with at least one assessment.' - ) - - # use the first assessment key to determine the number of reporting p. - - total_number_reporting_periods = len( - discount_rates[assessment_keys[0]] + "The problem only makes sense with at least one assessment." ) - + + # use the first assessment key to determine the number of reporting p. + + total_number_reporting_periods = len(discount_rates[assessment_keys[0]]) + for q in assessment_keys: - i_qk = discount_rates[q] - + if type(i_qk) != tuple: - - raise TypeError('The discount rates for a given assesssment '+ - 'should be provided as a tuple.') - + raise TypeError( + "The discount rates for a given assesssment " + + "should be provided as a tuple." + ) + number_i = len(i_qk) - + if number_i == 0: - - raise ValueError('There should be at least one discount rate '+ - 'per assessment.') - + raise ValueError( + "There should be at least one discount rate " + "per assessment." + ) + # the number of discount rates should be the same - + if number_i != total_number_reporting_periods: - - raise ValueError('The discount rates provided are inconsistent' - ' among the different assessments.') - + raise ValueError( + "The discount rates provided are inconsistent" + " among the different assessments." + ) + # check if all the keys for reporting_periods are for assessments - + for key, value in reporting_periods.items(): - if key not in assessment_keys: - - raise ValueError('Unknown assessment key: '+str(key)+'.') - + raise ValueError("Unknown assessment key: " + str(key) + ".") + if type(value) != tuple: - - raise TypeError('The reporting periods for a given assessment ' - +'have to provided as a tuple.') - - # check if all the keys for time_intervals are for assessments - + raise TypeError( + "The reporting periods for a given assessment " + + "have to provided as a tuple." + ) + + # check if all the keys for time_intervals are for assessments + for key, value in time_intervals.items(): - if key not in assessment_keys: - - raise ValueError('Unknown assessment key: '+str(key)+'.') - + raise ValueError("Unknown assessment key: " + str(key) + ".") + if type(value) != tuple: - - raise TypeError('The time intervals for a given assessment ' - +'have to provided as a tuple.') - + raise TypeError( + "The time intervals for a given assessment " + + "have to provided as a tuple." + ) + # time interval durations have to be positive reals - + for duration in value: - if isinstance(duration, Real): - # time intervals (durations) must be positive - + if duration <= 0: - - raise ValueError( - 'Time interval durations have to be positive.' - ) - + raise ValueError("Time interval durations have to be positive.") + else: - - raise ValueError( - 'Time interval durations have to be real-valued.' - ) - + raise ValueError("Time interval durations have to be real-valued.") + # all assessments must be included in reporting_periods and time_interv - + # checking the size will suffice, since the keys are valid by now - + if len(reporting_periods.keys()) != number_assessments: - - raise ValueError('There is missing data about (some) assessments.') - + raise ValueError("There is missing data about (some) assessments.") + if len(time_intervals.keys()) != number_assessments: - - raise ValueError('There is missing data about (some) assessments.') - + raise ValueError("There is missing data about (some) assessments.") + # number of reporting periods and time intervals - + number_reporting_periods = { - q: len(reporting_periods[q]) - for q in assessment_keys - } - - number_time_intervals = { - q: len(time_intervals[q]) - for q in assessment_keys - } - + q: len(reporting_periods[q]) for q in assessment_keys + } + + number_time_intervals = {q: len(time_intervals[q]) for q in assessment_keys} + # all reporting periods have to be covered by the assessments - + set_reporting_periods = set() - + for q in assessment_keys: - - set_reporting_periods = set_reporting_periods.union( - reporting_periods[q] - ) - + set_reporting_periods = set_reporting_periods.union(reporting_periods[q]) + if len(set_reporting_periods) != total_number_reporting_periods: - raise ValueError( - 'Not all reporting periods for which there are discount rates'+ - ' are covered.') - + "Not all reporting periods for which there are discount rates" + + " are covered." + ) + # reporting periods have to form a sequence (among all assessments) - + # intervals have to be sequential (within each scenario) - - - + # return useful data - - return ( - assessment_keys, - number_assessments, - number_reporting_periods, - number_time_intervals - ) + + return ( + assessment_keys, + number_assessments, + number_reporting_periods, + number_time_intervals, + ) # ************************************************************************* # ************************************************************************* - + def reset_arc_groups(self): "Clears the dictionaries for the groups of arcs under consideration." - + # groups - + self.arc_groups = {} - + # number of options for each group - + self.groups_number_options = {} - + # mandatory groups - + self.groups_mdt = {} - + # groups using interfaces - + self.groups_int = {} - + # groups using sos1 for arc selection - + self.groups_arc_sos1 = {} - + # groups using nnr for arc selection - + self.groups_arc_nnr = {} # ************************************************************************* # ************************************************************************* - - def create_arc_group(self, - gllj_tuples: tuple, - mandatory: bool = False, - use_sos1: bool = False, - use_interface: bool = False, - use_nnr_variables_if_possible: bool = False) -> int: + + def create_arc_group( + self, + gllj_tuples: tuple, + mandatory: bool = False, + use_sos1: bool = False, + use_interface: bool = False, + use_nnr_variables_if_possible: bool = False, + ) -> int: """ Create a group of arcs whose invesment is to be decided collectively. @@ -516,92 +480,87 @@ class InfrastructurePlanningProblem(EnergySystem): Returns the key that identifies the arc group, an integer. """ - + # make sure there is at least one arc - + if len(gllj_tuples) < 2: - raise ValueError( - 'At least two arcs need to be identified to create a group.' - ) - + "At least two arcs need to be identified to create a group." + ) + for arc_number, gllj in enumerate(gllj_tuples): - # does the network exist? - + if gllj[0] in self.networks: - # does the arc exist? - + if self.networks[gllj[0]].has_edge(*gllj[1:]): - # the arc exists, check if it has the same number of arc # options as previous arcs - + if arc_number == 0: - # first iteration, determine the number of options - - number_options = self.networks[ - gllj[0]].edges[gllj[1:]][ - Network.KEY_ARC_TECH].number_options() - + + number_options = ( + self.networks[gllj[0]] + .edges[gllj[1:]][Network.KEY_ARC_TECH] + .number_options() + ) + continue - - if number_options != self.networks[ - gllj[0]].edges[gllj[1:]][ - Network.KEY_ARC_TECH].number_options(): - + + if ( + number_options + != self.networks[gllj[0]] + .edges[gllj[1:]][Network.KEY_ARC_TECH] + .number_options() + ): raise ValueError( - 'The number of options is not consistent among the' - +' arcs.' - ) - + "The number of options is not consistent among the" + + " arcs." + ) + else: - # the arc does not exist - - raise ValueError('The arc '+str(gllj)+' does not exist.') - - else: # no, it does not - - raise ValueError( - 'The network '+str(gllj[0])+' does not exist.' - ) - - # everything checks out: add the group - + + raise ValueError("The arc " + str(gllj) + " does not exist.") + + else: # no, it does not + raise ValueError("The network " + str(gllj[0]) + " does not exist.") + + # everything checks out: add the group + new_t = len(self.arc_groups) - + self.arc_groups[new_t] = tuple(gllj_tuples) - + # number of options for each group - + self.groups_number_options[new_t] = number_options - + # mandatory groups - + self.groups_mdt[new_t] = mandatory - + # groups using interfaces - + self.groups_int[new_t] = use_interface - + # groups using sos1 for arc selection - + self.groups_arc_sos1[new_t] = use_sos1 - + # groups using nnr for arc selection - + self.groups_arc_nnr[new_t] = use_nnr_variables_if_possible - + # return the key - + return new_t - + # ************************************************************************* # ************************************************************************* - + def clear_static_loss_configurations(self): """ Clears all configurations for arcs with static losses. @@ -611,23 +570,23 @@ class InfrastructurePlanningProblem(EnergySystem): None. """ - + self.static_losses_arrival_node = {} - + self.static_losses_departure_node = {} - + self.static_losses_downstream = {} - + self.static_losses_upstream = {} # ************************************************************************* # ************************************************************************* - + def place_static_losses(self, network_key, arc_key, mode): """ Defines the mode according to which a given arc with static losses is to have them represented. - + There are four options available: 1) in the flow departure node (directed or undirected arcs); 2) in the flow arrival node (directed or undirected arcs); @@ -652,7 +611,7 @@ class InfrastructurePlanningProblem(EnergySystem): ------ ValueError This error is raised if the arc or the network are not recognised, - if the arc is not directed or involves an export node, or if the + if the arc is not directed or involves an export node, or if the static loss modelling mode is not recognised. Returns @@ -660,191 +619,180 @@ class InfrastructurePlanningProblem(EnergySystem): None. """ - + if mode not in self.STATIC_LOSS_MODES: - - raise ValueError('Unknown static loss modelling mode.') - + raise ValueError("Unknown static loss modelling mode.") + if network_key in self.networks and len(arc_key) == 3: - # check if the arc exists - + if self.networks[network_key].has_edge(*arc_key): - # the arc exists, now check if it has static losses - - if not self.networks[network_key].edges[arc_key][ - Network.KEY_ARC_TECH].has_static_losses(): - + + if ( + not self.networks[network_key] + .edges[arc_key][Network.KEY_ARC_TECH] + .has_static_losses() + ): # no static losses, do nothing - + return - + # ************************************************************* - + # departure node - + if mode == self.STATIC_LOSS_MODE_DEP: - # if it exists, it cannot link import and export nodes - + # if it exists, it cannot start on an import node - + if arc_key[0] in self.networks[network_key].import_nodes: - # raise ValueError( # 'Import nodes cannot be used to place losses in.') - + self.place_static_losses( - network_key, - arc_key, - mode=self.STATIC_LOSS_MODE_ARR - ) - + network_key, arc_key, mode=self.STATIC_LOSS_MODE_ARR + ) + # if everything is okay, update the dict - - if ((network_key,*arc_key[0:2]) not in - self.static_losses_departure_node): - + + if ( + network_key, + *arc_key[0:2], + ) not in self.static_losses_departure_node: # previous entries exist - + self.static_losses_departure_node[ - (network_key,*arc_key[0:2])] = [arc_key[2]] - - else: # new entry - + (network_key, *arc_key[0:2]) + ] = [arc_key[2]] + + else: # new entry self.static_losses_departure_node[ - (network_key,*arc_key[0:2])].append(arc_key[2]) - + (network_key, *arc_key[0:2]) + ].append(arc_key[2]) + # ************************************************************* - + # arrival node - + if mode == self.STATIC_LOSS_MODE_ARR: - # if it exists, it cannot link import and export nodes - + # if it exists, it cannot end on an export node - + if arc_key[1] in self.networks[network_key].export_nodes: - # raise ValueError( # 'Export nodes cannot be used to place losses in.') - + self.place_static_losses( - network_key, - arc_key, - mode=self.STATIC_LOSS_MODE_DEP - ) - + network_key, arc_key, mode=self.STATIC_LOSS_MODE_DEP + ) + # if everything is okay, update the dict - - if ((network_key,*arc_key[0:2]) not in - self.static_losses_arrival_node): - + + if ( + network_key, + *arc_key[0:2], + ) not in self.static_losses_arrival_node: # previous entries exist - + self.static_losses_arrival_node[ - (network_key,*arc_key[0:2])] = [arc_key[2]] - - else: # new entry - + (network_key, *arc_key[0:2]) + ] = [arc_key[2]] + + else: # new entry self.static_losses_arrival_node[ - (network_key,*arc_key[0:2])].append(arc_key[2]) - - + (network_key, *arc_key[0:2]) + ].append(arc_key[2]) + # ************************************************************* - + # upstream - + if mode == self.STATIC_LOSS_MODE_US: - # if it exists, it cannot be directed - - if not self.networks[network_key].edges[arc_key][ - Network.KEY_ARC_UND]: - + + if not self.networks[network_key].edges[arc_key][ + Network.KEY_ARC_UND + ]: # if the arc is directed, use STATIC_LOSS_MODE_DEP mode - + self.place_static_losses( - network_key, - arc_key, - mode=self.STATIC_LOSS_MODE_DEP - ) - - return # done - + network_key, arc_key, mode=self.STATIC_LOSS_MODE_DEP + ) + + return # done + # if everything is okay, update the dict - - if ((network_key,*arc_key[0:2]) not in - self.static_losses_upstream): - + + if (network_key, *arc_key[0:2]) not in self.static_losses_upstream: # previous entries exist - - self.static_losses_upstream[ - (network_key,*arc_key[0:2])] = [arc_key[2]] - - else: # new entry - + + self.static_losses_upstream[(network_key, *arc_key[0:2])] = [ + arc_key[2] + ] + + else: # new entry self.static_losses_upstream[ - (network_key,*arc_key[0:2])].append(arc_key[2]) - + (network_key, *arc_key[0:2]) + ].append(arc_key[2]) + # ************************************************************* - + # downstream - + if mode == self.STATIC_LOSS_MODE_DS: - # if it exists, it cannot be directed - - if not self.networks[network_key].edges[arc_key][ - Network.KEY_ARC_UND]: - + + if not self.networks[network_key].edges[arc_key][ + Network.KEY_ARC_UND + ]: # if the arc is directed, use STATIC_LOSS_MODE_ARR mode - + self.place_static_losses( - network_key, - arc_key, - mode=self.STATIC_LOSS_MODE_ARR - ) - - return # done - + network_key, arc_key, mode=self.STATIC_LOSS_MODE_ARR + ) + + return # done + # if everything is okay, update the dict - - if ((network_key,*arc_key[0:2]) not in - self.static_losses_downstream): - + + if ( + network_key, + *arc_key[0:2], + ) not in self.static_losses_downstream: # previous entries exist - - self.static_losses_downstream[ - (network_key,*arc_key[0:2])] = [arc_key[2]] - - else: # new entry - + + self.static_losses_downstream[(network_key, *arc_key[0:2])] = [ + arc_key[2] + ] + + else: # new entry self.static_losses_downstream[ - (network_key,*arc_key[0:2])].append(arc_key[2]) - + (network_key, *arc_key[0:2]) + ].append(arc_key[2]) + # ************************************************************* - - else: # the arc does not exist - + + else: # the arc does not exist raise ValueError( - 'The arc key used does not match any arc in the network.') - - else: # something is up with the network key or arc key length - + "The arc key used does not match any arc in the network." + ) + + else: # something is up with the network key or arc key length raise ValueError( - 'Either the network key provided is incorrect or the arc key '+ - 'lacks the proper size.') - + "Either the network key provided is incorrect or the arc key " + + "lacks the proper size." + ) + # ************************************************************************* # ************************************************************************* - + def place_static_losses_departure_node(self): """ Configures the problem instances so that arcs will have their static - losses placed in the departure node or, if not possible, in the arrival + losses placed in the departure node or, if not possible, in the arrival node. Returns @@ -852,70 +800,57 @@ class InfrastructurePlanningProblem(EnergySystem): None. """ - + # all arcs except those ending in export nodes: departure # arcs ending in export nodes: arrival - + # clear all configurations - + self.clear_static_loss_configurations() - + for network_key, network in self.networks.items(): - for arc_key in network.edges(keys=True): - # if it has static losses - - if network.edges[arc_key][ - Network.KEY_ARC_TECH].has_static_losses(): - + + if network.edges[arc_key][Network.KEY_ARC_TECH].has_static_losses(): # if the arc is undirected - + if network.arc_is_undirected(arc_key): - self.place_static_losses( - network_key, - arc_key, - mode=self.STATIC_LOSS_MODE_DEP - ) - + network_key, arc_key, mode=self.STATIC_LOSS_MODE_DEP + ) + elif arc_key[0] not in network.import_nodes: - # directed and the departure node is not an import one # >> departure - + self.place_static_losses( - network_key, - arc_key, - mode=self.STATIC_LOSS_MODE_DEP - ) - + network_key, arc_key, mode=self.STATIC_LOSS_MODE_DEP + ) + elif arc_key[1] not in network.export_nodes: - # directed and the arrival node is not an export one # though the departure node is an import one # >> arrival - + self.place_static_losses( - network_key, - arc_key, - mode=self.STATIC_LOSS_MODE_ARR - ) - + network_key, arc_key, mode=self.STATIC_LOSS_MODE_ARR + ) + else: - raise ValueError( - 'Static losses cannot exist directly between '+ - 'import and export nodes. Use an intermediate arc'+ - ' to obtain the same outcome.') - + "Static losses cannot exist directly between " + + "import and export nodes. Use an intermediate arc" + + " to obtain the same outcome." + ) + # ************************************************************************* # ************************************************************************* - + def place_static_losses_arrival_node(self): """ Configures the problem instances so that arcs will have their static - losses placed in the arrival node or, if not possible, in the departure + losses placed in the arrival node or, if not possible, in the departure node. Returns @@ -923,68 +858,55 @@ class InfrastructurePlanningProblem(EnergySystem): None. """ - + # all arcs except those starting in import nodes: arrival # arcs starting in import nodes: departure - + # clear all configurations - + self.clear_static_loss_configurations() - + for network_key, network in self.networks.items(): - for arc_key in network.edges(keys=True): - # if it has static losses - - if network.edges[arc_key][ - Network.KEY_ARC_TECH].has_static_losses(): - + + if network.edges[arc_key][Network.KEY_ARC_TECH].has_static_losses(): # if the arc is undirected - + if network.arc_is_undirected(arc_key): - # arrival - + self.place_static_losses( - network_key, - arc_key, - mode=self.STATIC_LOSS_MODE_ARR - ) - + network_key, arc_key, mode=self.STATIC_LOSS_MODE_ARR + ) + elif arc_key[1] not in network.export_nodes: - # directed and the arrival node is not an export one # >> arrival - + self.place_static_losses( - network_key, - arc_key, - mode=self.STATIC_LOSS_MODE_ARR - ) - + network_key, arc_key, mode=self.STATIC_LOSS_MODE_ARR + ) + elif arc_key[0] not in network.import_nodes: - # directed and the departure node is not an import one # though the arrival node is an export one # >> departure - + self.place_static_losses( - network_key, - arc_key, - mode=self.STATIC_LOSS_MODE_DEP - ) - + network_key, arc_key, mode=self.STATIC_LOSS_MODE_DEP + ) + else: - raise ValueError( - 'Static losses cannot exist directly between '+ - 'import and export nodes. Use an intermediate arc'+ - ' to obtain the same outcome.') - + "Static losses cannot exist directly between " + + "import and export nodes. Use an intermediate arc" + + " to obtain the same outcome." + ) + # ************************************************************************* # ************************************************************************* - + def place_static_losses_upstream(self): """ Configures the problem instances so that arcs will have their static @@ -997,65 +919,52 @@ class InfrastructurePlanningProblem(EnergySystem): None. """ - + # no import-export arcs with static losses are allowed - + # clear all configurations - + self.clear_static_loss_configurations() - + # for each arc - + for network_key, network in self.networks.items(): - for arc_key in network.edges(keys=True): - - if network.edges[arc_key][ - Network.KEY_ARC_TECH].has_static_losses(): - + if network.edges[arc_key][Network.KEY_ARC_TECH].has_static_losses(): # if the arc is undirected - + if network.arc_is_undirected(arc_key): - self.place_static_losses( - network_key, - arc_key, - mode=self.STATIC_LOSS_MODE_US - ) - + network_key, arc_key, mode=self.STATIC_LOSS_MODE_US + ) + elif arc_key[0] not in network.import_nodes: - # directed and the departure node is not an import one # >> departure - + self.place_static_losses( - network_key, - arc_key, - mode=self.STATIC_LOSS_MODE_DEP - ) - + network_key, arc_key, mode=self.STATIC_LOSS_MODE_DEP + ) + elif arc_key[1] not in network.export_nodes: - # directed and the arrival node is not an export one # though the departure node is an import one # >> arrival - + self.place_static_losses( - network_key, - arc_key, - mode=self.STATIC_LOSS_MODE_ARR - ) - + network_key, arc_key, mode=self.STATIC_LOSS_MODE_ARR + ) + else: - raise ValueError( - 'Static losses cannot exist directly between '+ - 'import and export nodes. Use an intermediate arc'+ - ' to obtain the same outcome.') - + "Static losses cannot exist directly between " + + "import and export nodes. Use an intermediate arc" + + " to obtain the same outcome." + ) + # ************************************************************************* # ************************************************************************* - + def place_static_losses_downstream(self): """ Configures the problem instances so that arcs will have their static @@ -1068,62 +977,49 @@ class InfrastructurePlanningProblem(EnergySystem): None. """ - + # clear all configurations - + self.clear_static_loss_configurations() - + # for each arc - + for network_key, network in self.networks.items(): - for arc_key in network.edges(keys=True): - - if network.edges[arc_key][ - Network.KEY_ARC_TECH].has_static_losses(): - + if network.edges[arc_key][Network.KEY_ARC_TECH].has_static_losses(): # if the arc is undirected - + if network.arc_is_undirected(arc_key): - self.place_static_losses( - network_key, - arc_key, - mode=self.STATIC_LOSS_MODE_DS - ) - + network_key, arc_key, mode=self.STATIC_LOSS_MODE_DS + ) + elif arc_key[1] not in network.export_nodes: - # directed and the arrival node is not an export one # >> arrival - + self.place_static_losses( - network_key, - arc_key, - mode=self.STATIC_LOSS_MODE_ARR - ) - + network_key, arc_key, mode=self.STATIC_LOSS_MODE_ARR + ) + elif arc_key[0] not in network.import_nodes: - # directed and the departure node is not an import one # though the arrival node is an export one # >> departure - + self.place_static_losses( - network_key, - arc_key, - mode=self.STATIC_LOSS_MODE_DEP - ) - + network_key, arc_key, mode=self.STATIC_LOSS_MODE_DEP + ) + else: - raise ValueError( - 'Static losses cannot exist directly between '+ - 'import and export nodes. Use an intermediate arc'+ - ' to obtain the same outcome.') - + "Static losses cannot exist directly between " + + "import and export nodes. Use an intermediate arc" + + " to obtain the same outcome." + ) + # # ************************************************************************* - + # def favour_placing_fixed_directed_arc_losses_upstream(self): # """ # Sets up the problem to place fixed arc losses upstream. @@ -1133,57 +1029,57 @@ class InfrastructurePlanningProblem(EnergySystem): # None. # """ - + # # initialise the object (if empty, they are all placed upstream) - + # self.place_fixed_arc_losses_downstream_if_possible = [] - + # # check for exceptions - + # # for each network - + # for network_key in self.networks: - + # # for each arc - + # for arc_key in self.networks[network_key].edges(keys=True): - + # # if undirected, continue - - # if self.networks[network_key].edges[arc_key][ + + # if self.networks[network_key].edges[arc_key][ # Network.KEY_ARC_UND]: - + # continue - + # # if directed, make sure it has fixed losses - + # if not self.networks[network_key].edges[arc_key][ # Network.KEY_ARC_TECH].has_static_losses(): - + # # if not isinstance(self.networks[network_key].edges[arc_key][ # # Network.KEY_ARC_TECH], # # ArcsWithStaticLosses): - + # # if it does not have fixed losses, continue - + # continue - + # # if directed and with fixed losses, check if it involves an # # import arc - + # if arc_key[0] in self.networks[network_key].import_nodes: - + # self.place_fixed_arc_losses_downstream_if_possible.append( # (network_key,*arc_key) # ) - + # # ************************************************************* - + # # ***************************************************************** # # ************************************************************************* # # ************************************************************************* - + # def do_not_place_fixed_losses_downstream(self, network_key, arc_key): # """ # Unflags an arc technology from having its fixed losses put downstream. @@ -1200,27 +1096,27 @@ class InfrastructurePlanningProblem(EnergySystem): # None. # """ - + # # check if the tuple exists - - # if ((network_key, *arc_key) in + + # if ((network_key, *arc_key) in # self.place_fixed_arc_losses_downstream_if_possible): - + # # if it does, pop it - + # self.place_fixed_arc_losses_downstream_if_possible.pop( # (network_key, *arc_key) # ) - + # # if it does not, ignore # # ************************************************************************* # # ************************************************************************* - + # def place_fixed_losses_downstream(self, network_key, arc_key): # """ # Flags an arc technology as having its fixed losses downstream. - + # The arc technology must be an instance of ArcsWithStaticLosses # and the end node cannot be an export node (export arcs are excluded). @@ -1242,65 +1138,63 @@ class InfrastructurePlanningProblem(EnergySystem): # None. # """ - + # # the keys must be in the correct format and point to existing objects # # is not possible with export arcs # # is not possible between import nodes and export nodes # # it will be redundant for import arcs, since there is no other way - + # if network_key in self.networks and len(arc_key) == 3: - + # # check if the arc exists - + # if self.networks[network_key].has_edge(*arc_key): - + # # if it exists, it cannot be undirected - - # if self.networks[network_key].edges[arc_key][ + + # if self.networks[network_key].edges[arc_key][ # Network.KEY_ARC_UND]: - + # raise ValueError( # 'The arc selected is not directed.') - + # # if it exists, it cannot be between import and export nodes - + # # if it exists, it cannot be an export arc (export node as end) - + # if arc_key[2] in self.networks[network_key].export_nodes: - + # raise ValueError( # 'Export nodes cannot be used to place losses in.') - + # # if everything is okay, add the (g,u,v,j) tuple to the list # # if it is not already there, otherwise ignore - - # if ((network_key,*arc_key) not in + + # if ((network_key,*arc_key) not in # self.place_fixed_arc_losses_downstream_if_possible): - + # self.place_fixed_arc_losses_downstream_if_possible.append( # (network_key,*arc_key) # ) - + # else: # the arc does not exist - + # raise ValueError( # 'The arc key used does not match any arc in the network.') - + # else: # something is up with the network key or arc key length - + # raise ValueError( # 'Either the network key provided is incorrect or the arc key '+ # 'lacks the proper size.') - + # ************************************************************************* # ************************************************************************* - def use_interface_variables_for_arc_selection(self, - network_key, - arc_key): + def use_interface_variables_for_arc_selection(self, network_key, arc_key): """ Flags a given arc on a given network as requiring interface variables. - + Interface variables are a device introduced to evaluate whether it is advantageous to use special ordered sets of type 1 to decide which arcs to install while simultaneously using another special ordered set of @@ -1318,164 +1212,151 @@ class InfrastructurePlanningProblem(EnergySystem): None. """ - + # check if the network and arc exist, including the format - + if network_key in self.networks and len(arc_key) == 3: - # check if the arc exists - + if self.networks[network_key].has_edge(*arc_key): - # if it exists, it cannot be preselected - - if self.networks[network_key].edges[arc_key][ - Network.KEY_ARC_TECH].has_been_selected(): - - raise ValueError( - 'The arc selected has been preselected.') - + + if ( + self.networks[network_key] + .edges[arc_key][Network.KEY_ARC_TECH] + .has_been_selected() + ): + raise ValueError("The arc selected has been preselected.") + # if so, check if the tuple is already on the list - - if (network_key, - *arc_key) not in self.use_arc_interface_variables: - + + if (network_key, *arc_key) not in self.use_arc_interface_variables: # if it is not, add it - - self.use_arc_interface_variables.append( - (network_key, *arc_key) - ) - + + self.use_arc_interface_variables.append((network_key, *arc_key)) + # if it is, ignore - - else: # the arc does not exist - + + else: # the arc does not exist raise ValueError( - 'The arc key used does not match any arc in the network.') - - else: # something is up with the network key or arc key length - + "The arc key used does not match any arc in the network." + ) + + else: # something is up with the network key or arc key length raise ValueError( - 'Either the network key provided is incorrect or the arc key '+ - 'lacks the proper size.') + "Either the network key provided is incorrect or the arc key " + + "lacks the proper size." + ) # ************************************************************************* # ************************************************************************* - - def do_not_use_interface_variables_for_arc_selection(self, - network_key, - arc_key): + + def do_not_use_interface_variables_for_arc_selection(self, network_key, arc_key): """ - Sets up the problem to not use interface variables on a given arc. - + Sets up the problem to not use interface variables on a given arc. + Parameters ---------- network_key : hashable-type The key to the network object. arc_key : hashable-type The key to the arc in the specified network. - + Returns ------- None. - + """ - + # check if the tuple exists - + if (network_key, *arc_key) in self.use_arc_interface_variables: - # if it does, pop it self.use_arc_interface_variables.remove((network_key, *arc_key)) - + # if it does not, ignore - + # ************************************************************************* # ************************************************************************* - - def do_not_use_sos1_for_arc_selection(self, - network_key, - arc_key): + + def do_not_use_sos1_for_arc_selection(self, network_key, arc_key): """ Sets up the problem to not use SOS1 on a given arc\'s selection. - + Parameters ---------- network_key : hashable-type The key to the network object. arc_key : hashable-type The key to the arc in the specified network. - + Returns ------- None. - + """ - + # check if the tuple exists - + if (network_key, *arc_key) in self.use_sos1_arc_inv: - # if it does, pop it - + self.use_sos1_arc_inv.pop((network_key, *arc_key)) - + # if it does not, ignore - + # ************************************************************************* # ************************************************************************* - - def do_not_use_sos1_for_flow_sense(self, - network_key, - arc_key): + + def do_not_use_sos1_for_flow_sense(self, network_key, arc_key): """ Sets up the problem to not use SOS1 to determine flow senses in an arc. - + Parameters ---------- network_key : hashable-type The key to the network object. arc_key : hashable-type The key to the arc in the specified network. - + Returns ------- None. - + """ - + # check if the tuple exists - + if (network_key, *arc_key) in self.use_sos1_flow_sense: - # if it does, pop it - + self.use_sos1_flow_sense.pop((network_key, *arc_key)) - + # if it does not, ignore # ************************************************************************* # ************************************************************************* - + def use_sos1_for_flow_senses( - self, - network_key, - arc_key, - use_real_variables_if_possible: bool = True, - use_interface_variables: bool = True, - sos1_weight_method: int = SOS1_SENSE_WEIGHT_NOMINAL_HIGHER): + self, + network_key, + arc_key, + use_real_variables_if_possible: bool = True, + use_interface_variables: bool = True, + sos1_weight_method: int = SOS1_SENSE_WEIGHT_NOMINAL_HIGHER, + ): """ Configure the model to use special ordered sets of type 1 (SOS1) for selecting the flow sense in a given undirected arc and network. - - Several methods can be used to determine the weights. - + + Several methods can be used to determine the weights. + The model can also be configured to avoid using strictly binary variab- les, if possible, and to use interface variables to separate arc selec- tion from flow sense determination. The former means using non-negative real variables for the sense variables and the latter introduces inter- mediate variables and equations to separate the two decisions. - + Parameters ---------- network_key : hashable-type @@ -1484,7 +1365,7 @@ class InfrastructurePlanningProblem(EnergySystem): The key to the arc in the specified network. use_real_variables_if_possible : bool, optional If True, instances will use real variables for selecting the flow - sense on this arc, if possible. If False, they will not be used. + sense on this arc, if possible. If False, they will not be used. The default is True. use_interface_variables : bool, optional If True, instances will use interface variables to attempt to de- @@ -1501,129 +1382,117 @@ class InfrastructurePlanningProblem(EnergySystem): ValueError This error is raised if the method is not recognised or if the arc is not undirected, since that means the flow sense is known. - + Returns ------- None. """ - + if sos1_weight_method not in self.SOS1_SENSE_WEIGHTS: - raise ValueError( - 'The method to determine the SOS weights was not recognised.') - + "The method to determine the SOS weights was not recognised." + ) + if network_key in self.networks and len(arc_key) == 3: - # check if the arc exists - + if self.networks[network_key].has_edge(*arc_key): - # if it exists, it cannot be directed - - if not self.networks[network_key].edges[arc_key][ - Network.KEY_ARC_UND]: - - raise ValueError( - 'The arc selected is not undirected.') - + + if not self.networks[network_key].edges[arc_key][Network.KEY_ARC_UND]: + raise ValueError("The arc selected is not undirected.") + # if so, create a new dict entry or update the old one - + # key: (g,u,v,j) # value: sos weights - - self.use_sos1_flow_sense[(network_key,*arc_key)] = ( - sos1_weight_method - ) - + + self.use_sos1_flow_sense[(network_key, *arc_key)] = sos1_weight_method + # if so, update the preference for the type of variables used - + if use_real_variables_if_possible: - # use real variables if possible - - if ((network_key,*arc_key) not in - self.use_real_sense_variables_if_possible): - + + if ( + network_key, + *arc_key, + ) not in self.use_real_sense_variables_if_possible: # the arc had not been flagged: flag it - + self.use_real_sense_variables_if_possible.append( - (network_key,*arc_key)) - + (network_key, *arc_key) + ) + # the arc was already flagged: do nothing - - else: # do not use real variables - + + else: # do not use real variables # check if already exists - - if ((network_key,*arc_key) in - self.use_real_sense_variables_if_possible): - + + if ( + network_key, + *arc_key, + ) in self.use_real_sense_variables_if_possible: # the arc was already flagged: unflag it - + self.use_real_sense_variables_if_possible.remove( - (network_key,*arc_key)) - + (network_key, *arc_key) + ) + # the arc was not flagged before: do nothing - + # interface variables - + if use_interface_variables: - # use interface variables - - if ((network_key,*arc_key) not in - self.use_arc_interface_variables): - + + if (network_key, *arc_key) not in self.use_arc_interface_variables: # the arc had not been flagged: flag it - - self.use_arc_interface_variables.append( - (network_key,*arc_key)) - + + self.use_arc_interface_variables.append((network_key, *arc_key)) + # the arc was already flagged: do nothing - - else: # do not use interface variables - + + else: # do not use interface variables # check if already exists - - if ((network_key,*arc_key) in - self.use_arc_interface_variables): - + + if (network_key, *arc_key) in self.use_arc_interface_variables: # the arc was already flagged: unflag it - - self.use_arc_interface_variables.remove( - (network_key,*arc_key)) - - else: # the arc does not exist - + + self.use_arc_interface_variables.remove((network_key, *arc_key)) + + else: # the arc does not exist raise ValueError( - 'The arc key used does not match any arc in the network.') - - else: # something is up with the network key or arc key length - + "The arc key used does not match any arc in the network." + ) + + else: # something is up with the network key or arc key length raise ValueError( - 'Either the network key provided is incorrect or the arc key '+ - 'lacks the proper size.') + "Either the network key provided is incorrect or the arc key " + + "lacks the proper size." + ) # ************************************************************************* # ************************************************************************* - + def use_sos1_for_arc_selection( - self, - network_key, - arc_key, - use_real_variables_if_possible: bool = True, - sos1_weight_method: int = SOS1_ARC_WEIGHTS_NONE): + self, + network_key, + arc_key, + use_real_variables_if_possible: bool = True, + sos1_weight_method: int = SOS1_ARC_WEIGHTS_NONE, + ): """ Configure the model to use special ordered sets of type 1 (SOS1) for selecting which option to invest in a given arc and network. - - Several methods can be used to determine the weights. - + + Several methods can be used to determine the weights. + The model can also be configured to avoid using strictly binary variab- les, if possible. This means using non-negative real variables, and is only possible if the arc options are selectable yet mandatory. - + Parameters ---------- network_key : hashable-type @@ -1643,739 +1512,731 @@ class InfrastructurePlanningProblem(EnergySystem): ValueError This error is raised if the method is not recognised or if the arc has been preselected, since that means no optimisation is needed. - + Returns ------- None. """ - + if sos1_weight_method not in self.SOS1_ARC_WEIGHTS: - raise ValueError( - 'The method to determine the SOS1 weights was not recognised.') - + "The method to determine the SOS1 weights was not recognised." + ) + if network_key in self.networks and len(arc_key) == 3: - # check if the arc exists - + if self.networks[network_key].has_edge(*arc_key): - # if it exists, it cannot be preselected - - if self.networks[network_key].edges[arc_key][ - Network.KEY_ARC_TECH].has_been_selected(): - - raise ValueError( - 'The arc selected has been preselected.') - + + if ( + self.networks[network_key] + .edges[arc_key][Network.KEY_ARC_TECH] + .has_been_selected() + ): + raise ValueError("The arc selected has been preselected.") + # if it exists, there must be more than one option - - if self.networks[network_key].edges[arc_key][ - Network.KEY_ARC_TECH].number_options() <= 1: - - return # makes no sense, skip - + + if ( + self.networks[network_key] + .edges[arc_key][Network.KEY_ARC_TECH] + .number_options() + <= 1 + ): + return # makes no sense, skip + # if so, create a new dict entry or update the old one - + # key: (g,u,v,j) # value: sos1 weights - + self.use_sos1_arc_inv[ - (network_key,*arc_key)] = self.compute_arc_sos1_weights( - self.networks[network_key].edges[arc_key][ - Network.KEY_ARC_TECH], - method=sos1_weight_method - ) - + (network_key, *arc_key) + ] = self.compute_arc_sos1_weights( + self.networks[network_key].edges[arc_key][Network.KEY_ARC_TECH], + method=sos1_weight_method, + ) + # if so, update the preference for the type of variables used - + if use_real_variables_if_possible: - # use real variables if possible - - if ((network_key,*arc_key) not in - self.use_real_arc_inv_variables_if_possible): - + + if ( + network_key, + *arc_key, + ) not in self.use_real_arc_inv_variables_if_possible: # the arc had not been flagged: flag it - + self.use_real_arc_inv_variables_if_possible.append( - (network_key,*arc_key)) - + (network_key, *arc_key) + ) + # the arc was already flagged: do nothing - - else: # do not use real variables - + + else: # do not use real variables # check if already exists - - if ((network_key,*arc_key) in - self.use_real_arc_inv_variables_if_possible): - + + if ( + network_key, + *arc_key, + ) in self.use_real_arc_inv_variables_if_possible: # the arc was already flagged: unflag it - + self.use_real_arc_inv_variables_if_possible.remove( - (network_key,*arc_key)) - + (network_key, *arc_key) + ) + # the arc was not flagged before: do nothing - - else: # the arc does not exist - + + else: # the arc does not exist raise ValueError( - 'The arc key used does not match any arc in the network.') - - else: # something is up with the network key or arc key length - + "The arc key used does not match any arc in the network." + ) + + else: # something is up with the network key or arc key length raise ValueError( - 'Either the network key provided is incorrect or the arc key '+ - 'lacks the proper size.') - + "Either the network key provided is incorrect or the arc key " + + "lacks the proper size." + ) + # ************************************************************************* # ************************************************************************* - - def compute_arc_sos1_weights(self, - arcs: Arcs, - method: int = SOS1_ARC_WEIGHTS_CAP, - verify_weights: bool = False) -> tuple: - + + def compute_arc_sos1_weights( + self, + arcs: Arcs, + method: int = SOS1_ARC_WEIGHTS_CAP, + verify_weights: bool = False, + ) -> tuple: if method not in self.SOS1_ARC_WEIGHTS: - raise ValueError( - 'The method to determine the SOS1 weights was not recognised.') - + "The method to determine the SOS1 weights was not recognised." + ) + if method == self.SOS1_ARC_WEIGHTS_CAP: - sos1_weights = tuple(capacity for capacity in arcs.capacity) - + elif method == self.SOS1_ARC_WEIGHTS_COST: - sos1_weights = tuple(cost for cost in arcs.minimum_cost) - + elif method == self.SOS1_ARC_WEIGHTS_SPEC_CAP: - sos1_weights = tuple( - cap/cost - for cap, cost in zip(arcs.capacity, arcs.minimum_cost) - ) - + cap / cost for cap, cost in zip(arcs.capacity, arcs.minimum_cost) + ) + elif method == self.SOS1_ARC_WEIGHTS_SPEC_COST: - sos1_weights = tuple( - cost/cap - for cap, cost in zip(arcs.capacity, arcs.minimum_cost) - ) - - else: # SOS1_ARC_WEIGHTS_NONE - + cost / cap for cap, cost in zip(arcs.capacity, arcs.minimum_cost) + ) + + else: # SOS1_ARC_WEIGHTS_NONE return None - + # make sure they are unique - + if verify_weights: - for weight in sos1_weights: - - if sos1_weights.count(weight) >= 2: # TODO: reach this point - - raise ValueError('These arcs cannot be considered using '+ - 'special ordered sets of type 1 (SOS1),'+ - ' since some weights are not unique.') - + if sos1_weights.count(weight) >= 2: # TODO: reach this point + raise ValueError( + "These arcs cannot be considered using " + + "special ordered sets of type 1 (SOS1)," + + " since some weights are not unique." + ) + return sos1_weights - + # ************************************************************************* # ************************************************************************* - + def prepare(self): """Sets up the problem model with which instances can be built.""" - + # create pyomo model (AbstractModel) - + self.model = create_model(self.name) # ************************************************************************* # ************************************************************************* - - def instantiate(self, - pyomo_dict: dict = None, - initialise_ancillary_sets: bool = False): + + def instantiate( + self, pyomo_dict: dict = None, initialise_ancillary_sets: bool = False + ): """Instantiates the model using the data available or provided.""" - + # check if the (abstract) model already exists - + try: - assert type(self.model) == pyo.AbstractModel - + except AttributeError: - - raise ValueError('The problem model does not exist.') - + raise ValueError("The problem model does not exist.") + except AssertionError: - - raise TypeError('The problem model is not a pyomo AbstractModel.') - + raise TypeError("The problem model is not a pyomo AbstractModel.") + if type(pyomo_dict) == dict: - # use external dict to create pyomo model instance (ConcreteModel) - - self.instance = self.model.create_instance(pyomo_dict) - + + self.instance = self.model.create_instance(pyomo_dict) + else: - # make pre-instantiation preparations - - - + # generate pyomo-ready dictionary - + pyomo_dict = self.create_pyomo_dictionary( include_ancillary_sets=initialise_ancillary_sets - ) - + ) + # create pyomo model instance (ConcreteModel) - - self.instance = self.model.create_instance(pyomo_dict) + + self.instance = self.model.create_instance(pyomo_dict) # ************************************************************************* # ************************************************************************* - - def optimise(self, - solver_name: str, - solver_options: dict, - output_options: dict, - print_solver_output: bool = True): + + def optimise( + self, + solver_name: str, + solver_options: dict, + output_options: dict, + print_solver_output: bool = True, + ): """Optimise the problem using pyomo and the specifications provided.""" - + # check if the instance already exists - + try: - assert type(self.instance) == pyo.ConcreteModel - + except AttributeError: - - raise ValueError('The problem instance does not exist.') - + raise ValueError("The problem instance does not exist.") + except AssertionError: - - raise TypeError( - 'The problem instance is not a pyomo ConcreteModel.') - + raise TypeError("The problem instance is not a pyomo ConcreteModel.") + # ensure compatibility between problem and solver - + self.assert_solver_problem_compatibility(solver_name) - + # configure common solver interface - + self.solver_interface = SolverInterface( - solver_name=solver_name, - **solver_options) - + solver_name=solver_name, **solver_options + ) + # get the solver handler - - self.solver_handler = self.solver_interface.get_solver_handler( - **solver_options) - + + self.solver_handler = self.solver_interface.get_solver_handler(**solver_options) + # solve - - self.results = self.solver_handler.solve( - self.instance, - tee=print_solver_output) - + + self.results = self.solver_handler.solve(self.instance, tee=print_solver_output) + # post-optimisation - + if self.solver_interface.was_optimisation_sucessful( - self.results, - self.optimisation_problem_type): - + self.results, self.optimisation_problem_type + ): # if successful, import results - + self.import_results() - + return True - - return False - + + return False + # TODO: reach this statement, perhaps using a small solver budget # ************************************************************************* # ************************************************************************* - - def create_pyomo_dictionary(self, - include_ancillary_sets: bool = False) -> dict: + + def create_pyomo_dictionary(self, include_ancillary_sets: bool = False) -> dict: """Returns a dictionary with which the pyomo model can be instantiated.""" - - print('creating the dictionary...') - + + print("creating the dictionary...") + # ********************************************************************* - # ********************************************************************* # ********************************************************************* # ********************************************************************* - + # ********************************************************************* + # sets - + + # ********************************************************************* # ********************************************************************* - # ********************************************************************* # ********************************************************************* # ********************************************************************* - + # time - + # set of representative periods - - set_Q = self.assessment_keys # tuple(self.assessment_keys) - + + set_Q = self.assessment_keys # tuple(self.assessment_keys) + # set of representative periods - - set_P_q = { - q: tuple(p for p in self.reporting_periods[q]) - for q in set_Q - } - + + set_P_q = {q: tuple(p for p in self.reporting_periods[q]) for q in set_Q} + # set of time intervals - + set_K_q = { - q: tuple(k for k in range(self.number_time_intervals[q])) - for q in set_Q - } - + q: tuple(k for k in range(self.number_time_intervals[q])) for q in set_Q + } + # set of (q,p) tuples - - set_QP = [(q,p) for q in set_Q for p in set_P_q[q]] - + + set_QP = [(q, p) for q in set_Q for p in set_P_q[q]] + # set of (q,k) tuples - - set_QK = [(q,k) for q in set_Q for k in set_K_q[q]] - + + set_QK = [(q, k) for q in set_Q for k in set_K_q[q]] + # set of (q,p,k) tuples - - set_QPK = [(q,p,k) for (q,p) in set_QP for k in set_K_q[q]] - + + set_QPK = [(q, p, k) for (q, p) in set_QP for k in set_K_q[q]] + # ********************************************************************* - + # set of networks - + set_G = [g for g in self.networks.keys()] - + # set of nodes per grid - + set_L = { - g: tuple(l for l in self.networks[g].nodes) - for g in self.networks.keys() - } - + g: tuple(l for l in self.networks[g].nodes) for g in self.networks.keys() + } + # set of importing nodes per grid - + set_L_imp = { - g: tuple(l - for l in self.networks[g].nodes - if l in self.networks[g].import_nodes) + g: tuple( + l for l in self.networks[g].nodes if l in self.networks[g].import_nodes + ) for g in self.networks.keys() - } - + } + # set of exporting nodes per grid - + set_L_exp = { - g: tuple(l - for l in self.networks[g].nodes - if l in self.networks[g].export_nodes) + g: tuple( + l for l in self.networks[g].nodes if l in self.networks[g].export_nodes + ) for g in self.networks.keys() - } - + } + set_L_max_in_g = { g: tuple( - l + l for l in self.networks[g].nodes if l not in self.networks[g].nodes_wo_in_dir_arc_limitations - ) + ) for g in self.networks.keys() - } - + } + # set_L_max_out_g = { # g: tuple( - # l + # l # for l in self.networks[g].nodes # if l not in self.networks[g].nodes_wo_out_dir_arc_limitations # ) # for g in self.networks.keys() # } - - set_GL = tuple((g,l) for g in set_G for l in set_L[g]) - - set_GL_imp = tuple((g,l) for (g,l) in set_GL if l in set_L_imp[g]) - - set_GL_exp = tuple((g,l) for (g,l) in set_GL if l in set_L_exp[g]) - + + set_GL = tuple((g, l) for g in set_G for l in set_L[g]) + + set_GL_imp = tuple((g, l) for (g, l) in set_GL if l in set_L_imp[g]) + + set_GL_exp = tuple((g, l) for (g, l) in set_GL if l in set_L_exp[g]) + set_GL_exp_imp = tuple( - (g,l) - for (g,l) in set_GL - if (l in set_L_imp[g] or l in set_L_exp[g]) - ) - - set_GL_not_exp_imp = tuple( - gl - for gl in set_GL - if gl not in set_GL_exp_imp - ) - + (g, l) for (g, l) in set_GL if (l in set_L_imp[g] or l in set_L_exp[g]) + ) + + set_GL_not_exp_imp = tuple(gl for gl in set_GL if gl not in set_GL_exp_imp) + set_GLL = tuple( - (g,l1,l2) + (g, l1, l2) for g in set_G for l1 in set_L[g] if l1 not in set_L_exp[g] for l2 in set_L[g] if l2 not in set_L_imp[g] if l1 != l2 - ) - + ) + # ********************************************************************* - + # set of price segments set_S = { (g, l, q, p, k): tuple( - s - for s in range(self.networks[g].nodes[l][ - Network.KEY_NODE_PRICES][(q,p,k)].number_segments()) - ) if not self.networks[g].nodes[l][ - Network.KEY_NODE_PRICES_TIME_INVARIANT] - else tuple( - s - for s in range(self.networks[g].nodes[l][ - Network.KEY_NODE_PRICES][ - (q,p,k)].number_segments()) - ) + s + for s in range( + self.networks[g] + .nodes[l][Network.KEY_NODE_PRICES][(q, p, k)] + .number_segments() + ) + ) + if not self.networks[g].nodes[l][Network.KEY_NODE_PRICES_TIME_INVARIANT] + else tuple( + s + for s in range( + self.networks[g] + .nodes[l][Network.KEY_NODE_PRICES][(q, p, k)] + .number_segments() + ) + ) # for g in self.networks.keys() # for l in self.networks[g].nodes # if (l in self.networks[g].import_nodes or # l in self.networks[g].export_nodes) - for (g,l) in set_GL_exp_imp - for (q,p,k) in set_QPK - } - + for (g, l) in set_GL_exp_imp + for (q, p, k) in set_QPK + } + # set of GLKS tuples set_GLQPKS = tuple( - (*glqpk,s) - for glqpk, s_tuple in set_S.items() - for s in s_tuple - ) - - set_GLQPKS_exp = tuple(glqpks for glqpks in set_GLQPKS - if glqpks[1] in set_L_exp[glqpks[0]]) - - set_GLQPKS_imp = tuple(glqpks for glqpks in set_GLQPKS - if glqpks[1] in set_L_imp[glqpks[0]]) - + (*glqpk, s) for glqpk, s_tuple in set_S.items() for s in s_tuple + ) + + set_GLQPKS_exp = tuple( + glqpks for glqpks in set_GLQPKS if glqpks[1] in set_L_exp[glqpks[0]] + ) + + set_GLQPKS_imp = tuple( + glqpks for glqpks in set_GLQPKS if glqpks[1] in set_L_imp[glqpks[0]] + ) + # ********************************************************************* - + # dynamic systems - + # set of converters - + set_I = [key for key in self.converters.keys()] - + # set of optional converters - - set_I_new = self.optional_converters # or [key for key in self.optional_converters] - + + set_I_new = ( + self.optional_converters + ) # or [key for key in self.optional_converters] + # ********************************************************************* - + # inputs - + # set of inputs - + set_M = { converter_key: [m for m in range(converter.number_inputs)] - for converter_key, converter in self.converters.items()} - + for converter_key, converter in self.converters.items() + } + # set of inputs that need to be modelled as binary variables - + set_M_bin = { converter_key: converter.binary_inputs - for converter_key, converter in self.converters.items()} - + for converter_key, converter in self.converters.items() + } + # set of input signals whose amplitude is to be dimensioned - + set_M_dim = { converter_key: converter.dimensionable_inputs for converter_key, converter in self.converters.items() - if converter_key in self.optional_converters} - + if converter_key in self.optional_converters + } + # set of inputs whose amplitude has a cost - + set_M_cost = { converter_key: converter.amplitude_penalised_inputs for converter_key, converter in self.converters.items() - for converter_key in self.optional_converters} - + for converter_key in self.optional_converters + } + # set of inputs that can cause externalities - + set_M_ext = { converter_key: converter.externality_inducing_inputs - for converter_key, converter in self.converters.items()} - + for converter_key, converter in self.converters.items() + } + # ********************************************************************* - + # set of outputs - + set_R = { converter_key: [r for r in range(converter.number_outputs)] - for converter_key, converter in self.converters.items()} - + for converter_key, converter in self.converters.items() + } + # set of outputs whose amplitude is to be dimensioned - + set_R_dim = { converter_key: converter.dimensionable_outputs for converter_key, converter in self.converters.items() - if converter_key in self.optional_converters} - + if converter_key in self.optional_converters + } + # set of outputs whose amplitude has a cost - + set_R_cost = { converter_key: converter.amplitude_penalised_outputs for converter_key, converter in self.converters.items() - for converter_key in self.optional_converters} - + for converter_key in self.optional_converters + } + # set of outputs inducing externalities - + set_R_ext = { converter_key: converter.externality_inducing_outputs - for converter_key, converter in self.converters.items()} - + for converter_key, converter in self.converters.items() + } + # ********************************************************************* - + # states - + # set of states - + set_N = { converter_key: [n for n in range(converter.number_states)] - for converter_key, converter in self.converters.items()} - + for converter_key, converter in self.converters.items() + } + # set of state nodes whose upper limit is dimensionable, per system - + set_N_dim_up = {} - + # set of state nodes whose lower limit is dimensionable, per system - + set_N_dim_lo = {} - + # set of state nodes whose upper limit can induce costs, per system - + set_N_cost_up = {} - + # set of state nodes whose lower limit can induce costs, per system - + set_N_cost_lo = {} - + # states inducing externalities - + set_N_ext = { converter_key: converter.externality_inducing_states - for converter_key, converter in self.converters.items()} - + for converter_key, converter in self.converters.items() + } + # ********************************************************************* - + # arcs - + # set of arcs - + set_J = { - (g, u, v): - [j # the arc key - for j, _ in self.networks[g]._adj[u][v].items()] + (g, u, v): [ + j for j, _ in self.networks[g]._adj[u][v].items() # the arc key + ] for g in self.networks.keys() for u, v in self.networks[g].edges() - } - + } + # set of undirected arcs - + set_J_und = { - (g, u, v): - [j # all arcs that are undirected - for j in set_J[(g,u,v)] - if self.networks[g].arc_is_undirected(arc_key=(u,v,j))] + (g, u, v): [ + j # all arcs that are undirected + for j in set_J[(g, u, v)] + if self.networks[g].arc_is_undirected(arc_key=(u, v, j)) + ] for (g, u, v) in set_J - } - + } + # TODO: replace this with validation rule in the pyomo model, asap - - for (g, u, v) in set_J_und: - - for j in set_J_und[(g,u,v)]: - + + for g, u, v in set_J_und: + for j in set_J_und[(g, u, v)]: if u in set_L_imp[g] or v in set_L_imp[g]: - - raise ValueError( - 'Undirected arcs cannot involve import nodes.' - ) - + raise ValueError("Undirected arcs cannot involve import nodes.") + if u in set_L_exp[g] or v in set_L_exp[g]: - - raise ValueError( - 'Undirected arcs cannot involve export nodes.' - ) - + raise ValueError("Undirected arcs cannot involve export nodes.") + # set of predefined arcs - + set_J_pre = { - (g, u, v): - [j for j in set_J[(g, u, v)] - # only if it has not been preselected - if True in self.networks[g].edges[(u,v,j)][ - Network.KEY_ARC_TECH].options_selected] + (g, u, v): [ + j + for j in set_J[(g, u, v)] + # only if it has not been preselected + if True + in self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .options_selected + ] for (g, u, v) in set_J - } - + } + # set of predefined infinite capacity arcs - + set_J_pre_inf = { - (g, u, v): - [j - for j in set_J_pre[(g, u, v)] - if j not in set_J_und[(g, u, v)] # can only be directed - # the capacity must be set to math.inf - if self.networks[g].edges[(u, v, j)][ - Network.KEY_ARC_TECH].capacity[ - self.networks[g].edges[(u, v, j)][ - Network.KEY_ARC_TECH].options_selected.index( - True) - ] == math.inf] + (g, u, v): [ + j + for j in set_J_pre[(g, u, v)] + if j not in set_J_und[(g, u, v)] # can only be directed + # the capacity must be set to math.inf + if self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .capacity[ + self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .options_selected.index(True) + ] + == math.inf + ] for (g, u, v) in set_J_pre - } - + } + # sets of collectively-selected groups - + set_GLLJ_col = set( - arc_tuple - for group_of_arcs in self.arc_groups.values() - for arc_tuple in group_of_arcs) - + arc_tuple + for group_of_arcs in self.arc_groups.values() + for arc_tuple in group_of_arcs + ) + set_J_col = { - (g, u, v): - [j - for j in set_J[(g, u, v)] - if j not in set_J_pre[(g,u,v)] # new - if (g,u,v,j) in set_GLLJ_col] # can only appear once + (g, u, v): [ + j + for j in set_J[(g, u, v)] + if j not in set_J_pre[(g, u, v)] # new + if (g, u, v, j) in set_GLLJ_col + ] # can only appear once for (g, u, v) in set_J - } - + } + # set of selectable yet mandatory arcs - + set_J_mdt = { - (g, u, v): - [j for j in set_J[(g, u, v)] - if j not in set_J_pre[(g,u,v)] # new - if j not in set_J_col[(g,u,v)] # individual - if (g, u, v, j) in self.mandatory_arcs] + (g, u, v): [ + j + for j in set_J[(g, u, v)] + if j not in set_J_pre[(g, u, v)] # new + if j not in set_J_col[(g, u, v)] # individual + if (g, u, v, j) in self.mandatory_arcs + ] for (g, u, v) in set_J - #if (g,u,v) not in set_J_pre - } - + # if (g,u,v) not in set_J_pre + } + # arcs using interfaces (new and optional arcs only) - + set_J_int = { - (g, u, v): - [j # all arcs that are to use interface variables - for j in set_J[(g,u,v)] - if j not in set_J_pre[(g,u,v)] # new - if j not in set_J_mdt[(g,u,v)] # optional - if j not in set_J_col[(g,u,v)] # individual - if (g,u,v,j) in self.use_arc_interface_variables - ] + (g, u, v): [ + j # all arcs that are to use interface variables + for j in set_J[(g, u, v)] + if j not in set_J_pre[(g, u, v)] # new + if j not in set_J_mdt[(g, u, v)] # optional + if j not in set_J_col[(g, u, v)] # individual + if (g, u, v, j) in self.use_arc_interface_variables + ] for (g, u, v) in set_J - } - + } + # set of selectable arcs whose investment decisions are modelled as sos - + set_J_arc_sos1 = { - (g, u, v): - [j - for j in set_J[(g,u,v)] - if j not in set_J_pre[(g,u,v)] # new - if j not in set_J_col[(g,u,v)] # individual - if (g,u,v,j) in self.use_sos1_arc_inv - # if isinstance(self.networks[g].edges[(u,v,j)][ - # Network.KEY_ARC_TECH], ArcsSOS) - ] - for (g,u,v) in set_J - #if (g,u,v) not in set_J_pre - } - + (g, u, v): [ + j + for j in set_J[(g, u, v)] + if j not in set_J_pre[(g, u, v)] # new + if j not in set_J_col[(g, u, v)] # individual + if (g, u, v, j) in self.use_sos1_arc_inv + # if isinstance(self.networks[g].edges[(u,v,j)][ + # Network.KEY_ARC_TECH], ArcsSOS) + ] + for (g, u, v) in set_J + # if (g,u,v) not in set_J_pre + } + # set of selectable arcs whose investment decisions are modelled as nnr - + set_J_arc_nnr = { - (g, u, v): - [j - for j in set_J_arc_sos1[(g,u,v)] - if (g,u,v) in set_J_mdt - if j in set_J_mdt[(g,u,v)] - if ((g, u, v, j) in - self.use_real_arc_inv_variables_if_possible) - ] - for (g,u,v) in set_J_arc_sos1 - #if (g,u,v) in set_J_mdt - } - + (g, u, v): [ + j + for j in set_J_arc_sos1[(g, u, v)] + if (g, u, v) in set_J_mdt + if j in set_J_mdt[(g, u, v)] + if ((g, u, v, j) in self.use_real_arc_inv_variables_if_possible) + ] + for (g, u, v) in set_J_arc_sos1 + # if (g,u,v) in set_J_mdt + } + # set of undirected arcs whose flow sense is modelled using sos1 - + set_J_sns_sos1 = { - (g, u, v): - [j - for j in set_J_und[(g,u,v)] - if (g,u,v,j) in self.use_sos1_flow_sense] - for (g,u,v) in set_J_und - } - + (g, u, v): [ + j + for j in set_J_und[(g, u, v)] + if (g, u, v, j) in self.use_sos1_flow_sense + ] + for (g, u, v) in set_J_und + } + # set of undirected arcs whose flow sense is modelled using nnr - + set_J_sns_nnr = { - (g, u, v): - [j - for j in set_J_sns_sos1[(g,u,v)] - if (g, u, v, j) in self.use_real_sense_variables_if_possible - ] - for (g,u,v) in set_J_sns_sos1 - } + (g, u, v): [ + j + for j in set_J_sns_sos1[(g, u, v)] + if (g, u, v, j) in self.use_real_sense_variables_if_possible + ] + for (g, u, v) in set_J_sns_sos1 + } # set of arcs with fixed losses # note: there cannot be arcs with fixed losses between import and ex- # port nodes, as the nodes cannot be used to place the losses in set_J_stt = { - (g, u, v): - [j - for j in set_J[(g, u, v)] - if self.networks[g].edges[(u,v,j)][ - Network.KEY_ARC_TECH].has_static_losses() - ] + (g, u, v): [ + j + for j in set_J[(g, u, v)] + if self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .has_static_losses() + ] for (g, u, v) in set_J - } - + } + # TODO: replace this with validation rule in the pyomo model, asap - + for u in self.networks[g].import_nodes: - for v in self.networks[g].export_nodes: - - if (g, u, v) in set_J_stt and len(set_J_stt[(g,u,v)]) != 0: - + if (g, u, v) in set_J_stt and len(set_J_stt[(g, u, v)]) != 0: raise ValueError( - 'There cannot be arcs with fixed losses between import' - +' and export nodes.' - ) - + "There cannot be arcs with fixed losses between import" + + " and export nodes." + ) + # for (g,u,v) in set_J_stt: - + # for j in set_J_stt[(g,u,v)]: - + # if (u in self.networks[g].import_nodes and # v in self.networks[g].export_nodes): - + # raise ValueError( # 'There cannot be arcs with fixed losses between import' # +' and export nodes.' # ) - + # directed arcs: # 1) in the start node (also upstream) # 2) in the end node (also downstream) @@ -2387,971 +2248,981 @@ class InfrastructurePlanningProblem(EnergySystem): # sets: # static_a: directed and undirected # static_b: directed and undirected - # static_us: undirected + # static_us: undirected # static_ds: undirected - + # set of (directed) arcs with static losses in the departure node - + set_J_stt_dep = { - (g, u, v): - [j - for j in set_J_stt[(g,u,v)] - if (g,u,v) in self.static_losses_departure_node - if j in self.static_losses_departure_node[(g,u,v)] - ] + (g, u, v): [ + j + for j in set_J_stt[(g, u, v)] + if (g, u, v) in self.static_losses_departure_node + if j in self.static_losses_departure_node[(g, u, v)] + ] for (g, u, v) in set_J_stt # if u in self.networks[g].import_nodes # if v not in self.networks[g].export_nodes - } + } # TODO: replace this with validation rule in the pyomo model, asap - for (g,u,v) in set_J_stt_dep: - - for j in set_J_stt_dep[(g,u,v)]: - + for g, u, v in set_J_stt_dep: + for j in set_J_stt_dep[(g, u, v)]: if u in self.networks[g].import_nodes: - raise ValueError( - 'Static losses cannot be placed upstream if the start ' - +'node is an import node.' - ) + "Static losses cannot be placed upstream if the start " + + "node is an import node." + ) # set of (directed) arcs with static losses in the arrival node # note: export arcs cannot have fixed losses placed downstream; if ex- # port arcs are found in place_fixed_arc_losses_downstream_if_possible # they will not be modelled as having their losses upstream (default) - + set_J_stt_arr = { - (g, u, v): - [j - for j in set_J_stt[(g,u,v)] - if (g,u,v) in self.static_losses_arrival_node - if j in self.static_losses_arrival_node[(g,u,v)] - ] + (g, u, v): [ + j + for j in set_J_stt[(g, u, v)] + if (g, u, v) in self.static_losses_arrival_node + if j in self.static_losses_arrival_node[(g, u, v)] + ] for (g, u, v) in set_J_stt # if u in self.networks[g].import_nodes # if v not in self.networks[g].export_nodes - } + } # TODO: replace this with validation rule in the pyomo model, asap - for (g,u,v) in set_J_stt_arr: - - for j in set_J_stt_arr[(g,u,v)]: - + for g, u, v in set_J_stt_arr: + for j in set_J_stt_arr[(g, u, v)]: if v in self.networks[g].export_nodes: - raise ValueError( - 'Static losses cannot be placed downstream if the end ' - +'node is an export node.' - ) - - # set of (directed) arcs with static losses upstream - + "Static losses cannot be placed downstream if the end " + + "node is an export node." + ) + + # set of (directed) arcs with static losses upstream + set_J_stt_us = { - (g, u, v): - [j - for j in set_J_stt[(g,u,v)] - if (g,u,v) in self.static_losses_upstream - if j in self.static_losses_upstream[(g,u,v)] - ] + (g, u, v): [ + j + for j in set_J_stt[(g, u, v)] + if (g, u, v) in self.static_losses_upstream + if j in self.static_losses_upstream[(g, u, v)] + ] for (g, u, v) in set_J_stt # if u in self.networks[g].import_nodes # if v not in self.networks[g].export_nodes - } + } # TODO: replace this with validation rule in the pyomo model, asap - for (g,u,v) in set_J_stt_us: - - for j in set_J_stt_us[(g,u,v)]: - - if (u in self.networks[g].import_nodes or - v in self.networks[g].export_nodes): - + for g, u, v in set_J_stt_us: + for j in set_J_stt_us[(g, u, v)]: + if ( + u in self.networks[g].import_nodes + or v in self.networks[g].export_nodes + ): raise ValueError( - 'Undirected arcs cannot connect import or export nodes' - +'.' - ) - + "Undirected arcs cannot connect import or export nodes" + "." + ) + # set of (directed) arcs with static losses downstream - + set_J_stt_ds = { - (g, u, v): - [j - for j in set_J_stt[(g,u,v)] - if (g,u,v) in self.static_losses_downstream - if j in self.static_losses_downstream[(g,u,v)] - ] + (g, u, v): [ + j + for j in set_J_stt[(g, u, v)] + if (g, u, v) in self.static_losses_downstream + if j in self.static_losses_downstream[(g, u, v)] + ] for (g, u, v) in set_J_stt # if u in self.networks[g].import_nodes # if v not in self.networks[g].export_nodes - } + } # TODO: replace this with validation rule in the pyomo model, asap - for (g,u,v) in set_J_stt_ds: - - for j in set_J_stt_ds[(g,u,v)]: - - if (u in self.networks[g].import_nodes or - v in self.networks[g].export_nodes): - + for g, u, v in set_J_stt_ds: + for j in set_J_stt_ds[(g, u, v)]: + if ( + u in self.networks[g].import_nodes + or v in self.networks[g].export_nodes + ): raise ValueError( - 'Undirected arcs cannot connect import or export nodes' - +'.' - ) - + "Undirected arcs cannot connect import or export nodes" + "." + ) + # TODO: make sure the stt sets are all mutually-exclusive - + for guv, j in set_J_stt.items(): - # a and b, a and us, a and ds, b and us, b and ds, us and ds - - if ((j in set_J_stt_dep[guv] and j in set_J_stt_arr[guv]) or - (j in set_J_stt_dep[guv] and j in set_J_stt_us[guv]) or - (j in set_J_stt_dep[guv] and j in set_J_stt_ds[guv]) or - (j in set_J_stt_arr[guv] and j in set_J_stt_us[guv]) or - (j in set_J_stt_arr[guv] and j in set_J_stt_ds[guv]) or - (j in set_J_stt_us[guv] and j in set_J_stt_ds[guv])): - + + if ( + (j in set_J_stt_dep[guv] and j in set_J_stt_arr[guv]) + or (j in set_J_stt_dep[guv] and j in set_J_stt_us[guv]) + or (j in set_J_stt_dep[guv] and j in set_J_stt_ds[guv]) + or (j in set_J_stt_arr[guv] and j in set_J_stt_us[guv]) + or (j in set_J_stt_arr[guv] and j in set_J_stt_ds[guv]) + or (j in set_J_stt_us[guv] and j in set_J_stt_ds[guv]) + ): raise ValueError( - 'Each arc with static losses can only be modelled one way.' - ) - + "Each arc with static losses can only be modelled one way." + ) + # set of options for a given arc - + set_H_gllj = { - (g, u, v, j): - tuple(h - for h in range(len(self.networks[g].edges[(u,v,j)][ - Network.KEY_ARC_TECH].capacity))) + (g, u, v, j): tuple( + h + for h in range( + len( + self.networks[g].edges[(u, v, j)][Network.KEY_ARC_TECH].capacity + ) + ) + ) for g in self.networks.keys() for u, v in self.networks[g].edges() - for j in set_J[(g,u,v)] - if j not in set_J_pre[(g,u,v)] # new - if j not in set_J_col[(g, u, v)] # individual - } - + for j in set_J[(g, u, v)] + if j not in set_J_pre[(g, u, v)] # new + if j not in set_J_col[(g, u, v)] # individual + } + # ********************************************************************* # ********************************************************************* # ********************************************************************* - + # ancillary sets - - set_GLLJ = tuple( - (g,u,v,j) - for (g,u,v) in set_J - for j in set_J[(g,u,v)] - ) - + + set_GLLJ = tuple((g, u, v, j) for (g, u, v) in set_J for j in set_J[(g, u, v)]) + set_GLLJ_und = tuple( - (g,u,v,j) - for (g,u,v,j) in set_GLLJ - for (g,u,v) in set_J_und - if j in set_J_und[(g,u,v)] - ) - - set_GLLJ_und_ext = set((g,v,u,j) for (g,u,v,j) in set_GLLJ_und) - + (g, u, v, j) + for (g, u, v, j) in set_GLLJ + for (g, u, v) in set_J_und + if j in set_J_und[(g, u, v)] + ) + + set_GLLJ_und_ext = set((g, v, u, j) for (g, u, v, j) in set_GLLJ_und) + set_GLLJ_new = tuple( - (g,l1,l2,j) - for (g,l1,l2,j) in set_GLLJ - if j not in set_J_pre[(g,l1,l2)] - ) - + (g, l1, l2, j) + for (g, l1, l2, j) in set_GLLJ + if j not in set_J_pre[(g, l1, l2)] + ) + set_GLLJ_int = tuple( - (g,u,v,j) for (g,u,v) in set_J_int for j in set_J_int[(g,u,v)] - ) - + (g, u, v, j) for (g, u, v) in set_J_int for j in set_J_int[(g, u, v)] + ) + set_GLLJ_static = tuple( - (g,l1,l2,j) - for (g,l1,l2) in set_J_stt - for j in set_J_stt[(g,l1,l2)] - ) - + (g, l1, l2, j) for (g, l1, l2) in set_J_stt for j in set_J_stt[(g, l1, l2)] + ) + set_GLLJ_static_pre = tuple( - (g,l1,l2,j) - for (g,l1,l2,j) in set_GLLJ_static - if j in set_J_pre[(g,l1,l2)] - ) - + (g, l1, l2, j) + for (g, l1, l2, j) in set_GLLJ_static + if j in set_J_pre[(g, l1, l2)] + ) + set_GLLJ_static_new = tuple( - (g,l1,l2,j) - for (g,l1,l2,j) in set_GLLJ_static - if j not in set_J_pre[(g,l1,l2)] - ) - + (g, l1, l2, j) + for (g, l1, l2, j) in set_GLLJ_static + if j not in set_J_pre[(g, l1, l2)] + ) + # red sets - + set_GLLJ_red = set(set_GLLJ) set_GLLJ_red.update(set_GLLJ_und_ext) - + set_GLLJ_und_red = set(set_GLLJ_und) set_GLLJ_und_red.update(set_GLLJ_und_ext) - + set_GLLJ_static_und_red = set( - gllj for gllj in set_GLLJ_und - if gllj in set_GLLJ_static - ) - + gllj for gllj in set_GLLJ_und if gllj in set_GLLJ_static + ) + set_GLLJ_static_und_red.update( - tuple((g,v,u,j) for (g,u,v,j) in set_GLLJ_static_und_red) - ) - + tuple((g, v, u, j) for (g, u, v, j) in set_GLLJ_static_und_red) + ) + # ********************************************************************* # ********************************************************************* - + # set of arc groups - + set_T = tuple(t for t in self.arc_groups.keys()) - + # set of mandatory arc groups - + set_T_mdt = tuple(t for t in set_T if self.groups_mdt[t]) - + # set of arc groups requiring interface variables - + set_T_int = tuple(t for t in set_T if self.groups_int[t]) - + # set of arc groups relying on SOS1 - + set_T_sos1 = tuple(t for t in set_T if self.groups_arc_sos1[t]) - + # set of arg groups relying on non-negative real variables - - set_T_nnr = tuple(t for t in set_T if self.groups_arc_nnr[t]) - + + set_T_nnr = tuple(t for t in set_T if self.groups_arc_nnr[t]) + # set of arc groups relying on binary variables - + set_T_bin = tuple(t for t in set_T if t not in set_T_nnr) - + # set of arcs in the various arc groups - + # set_GLLJ_col_t = { # t: self.arc_groups[t] # for t in set_T # } - + set_GLLJ_col_t = self.arc_groups - + # set of arc options for arc groups - + set_H_t = { - t: tuple(h for h in range(self.groups_number_options[t])) - for t in set_T - } - + t: tuple(h for h in range(self.groups_number_options[t])) for t in set_T + } + + # ********************************************************************* # ********************************************************************* - # ********************************************************************* # ********************************************************************* # ********************************************************************* - + # parameters - + # ********************************************************************* - # ********************************************************************* # ********************************************************************* # ********************************************************************* - + # ********************************************************************* + # objective function - - # ********************************************************************* + + # ********************************************************************* # ********************************************************************* - + # assessment weight - + if type(self.assessment_weights) != dict: - param_c_wgt_q = {} - + else: - param_c_wgt_q = self.assessment_weights - + # weight of each time interval k within the period p - + if type(self.time_weights) != dict: - - param_c_time_qpk = {} # rely on default values - + param_c_time_qpk = {} # rely on default values + else: - # param_c_time_qpk = { # (q, p, k): self.time_weights[(q,p,k)] # for (q,p) in set_QP # for k in set_K_q[q] # } - + param_c_time_qpk = self.time_weights - + # discount factors for each period - + param_c_df_qp = { - (q,p): discount_factor(self.discount_rates[q][0:p+1]) + (q, p): discount_factor(self.discount_rates[q][0 : p + 1]) # (q,p): self.investments[q].discount_factors[p+1] - for (q,p) in set_QP - } - + for (q, p) in set_QP + } + # prices - + param_p_glqpks = { - (g,l,q,p,k,s): - self.networks[g].nodes[l][ - Network.KEY_NODE_PRICES][(q,p,k)].prices[s] - for (g,l,q,p,k) in set_S - for s in set_S[(g,l,q,p,k)] - } - + (g, l, q, p, k, s): self.networks[g] + .nodes[l][Network.KEY_NODE_PRICES][(q, p, k)] + .prices[s] + for (g, l, q, p, k) in set_S + for s in set_S[(g, l, q, p, k)] + } + # maximum resource volume per segment (infinity is the default) - + param_v_max_glqpks = { - (g,l,q,p,k,s): - self.networks[g].nodes[l][ - Network.KEY_NODE_PRICES][(q,p,k)].volumes[s] - for (g,l,q,p,k) in set_S - for s in set_S[(g,l,q,p,k)] + (g, l, q, p, k, s): self.networks[g] + .nodes[l][Network.KEY_NODE_PRICES][(q, p, k)] + .volumes[s] + for (g, l, q, p, k) in set_S + for s in set_S[(g, l, q, p, k)] # skip if not numeric, since that means infinity (=default) if isinstance( - self.networks[g].nodes[l][ - Network.KEY_NODE_PRICES][(q,p,k)].volumes[s], - Real - ) - } - - # ********************************************************************* + self.networks[g] + .nodes[l][Network.KEY_NODE_PRICES][(q, p, k)] + .volumes[s], + Real, + ) + } + # ********************************************************************* - + # ********************************************************************* + # converters - - # ********************************************************************* + + # ********************************************************************* # ********************************************************************* - + # objective function - - # ********************************************************************* - + + # ********************************************************************* + # minimum cost - + param_c_cvt_i = { converter_key: converter.minimum_cost for converter_key, converter in self.converters.items() - } - + } + # input specific amplitude costs - + param_c_cvt_u_im = { (converter_key, m): converter.specific_input_amplitude_cost[m] for converter_key, converter in self.converters.items() if converter_key in self.optional_converters for m in converter.amplitude_penalised_inputs - } - + } + # state specific amplitude costs - + param_c_cvt_x_in = { (converter_key, n): converter.specific_state_amplitude_cost[n] for converter_key, converter in self.converters.items() if converter_key in self.optional_converters for n in converter.amplitude_penalised_states - } - + } + # output specific amplitude costs - + param_c_cvt_y_ir = { (converter_key, r): converter.specific_output_amplitude_cost[r] for converter_key, converter in self.converters.items() if converter_key in self.optional_converters for r in converter.amplitude_penalised_outputs - } - + } + # input specific externality costs - + param_c_ext_u_imqk = { - (converter_key, m, q, k): - converter.specific_input_externality_cost[(m,q,k)] + (converter_key, m, q, k): converter.specific_input_externality_cost[ + (m, q, k) + ] for converter_key, converter in self.converters.items() for m in converter.externality_inducing_inputs - for (q,k) in set_QK - } - + for (q, k) in set_QK + } + # state specific externality costs - + param_c_ext_x_inqk = { - (converter_key, n, q, k): - converter.specific_state_externality_cost[(n,q,k)] + (converter_key, n, q, k): converter.specific_state_externality_cost[ + (n, q, k) + ] for converter_key, converter in self.converters.items() for n in converter.externality_inducing_states - for (q,k) in set_QK - } - + for (q, k) in set_QK + } + # output specific externality costs - + param_c_ext_y_irqk = { - (converter_key, r, q, k): - converter.specific_output_externality_cost[(r,q,k)] + (converter_key, r, q, k): converter.specific_output_externality_cost[ + (r, q, k) + ] for converter_key, converter in self.converters.items() for r in converter.externality_inducing_outputs - for (q,k) in set_QK - } - - # ********************************************************************* - + for (q, k) in set_QK + } + + # ********************************************************************* + # inputs - - # ********************************************************************* - + + # ********************************************************************* + # upper bound for input signals - + param_u_ub_imqk = { (converter_key, m, q, k): converter.u_upper_bounds[k] for converter_key, converter in self.converters.items() for m in converter.bounded_inputs # if m not in converter.u_is_dimensionable # cannot be dimensionable # if m not in converter.u_is_binary # cannot be binary - for (q,k) in set_QK - } - + for (q, k) in set_QK + } + # maximum amplitude for inputs - + param_u_lim_max_im = { (converter_key, m): converter.u_max_amplitude[m] for converter_key, converter in self.converters.items() if converter_key in self.optional_converters for m in converter.dimensionable_inputs - } - + } + # u rated adjustment coefficient (1 if the time intervals are regular) - + param_f_amp_u_imqk = {} - + # if self.intraperiod_time_intervals_are_regular: - + # # an empty dictionary is enough because the default value is 1 - + # param_f_amp_u_imqk = {} - + # else: - + # # TODO: test irregular time intervals - + # param_f_amp_u_imqk = {} - - # ********************************************************************* - + + # ********************************************************************* + # outputs - + # ********************************************************************* - + # upper bounds for outputs (default: none) - + param_y_ub_irqk = { # (converter_key, r, q, k): converter.u_upper_bounds[k] # for converter_key, converter in self.converters.items() # for r in converter.bounded_outputs # for k in set_K - } - + } + # lower bounds for outputs (default: 0) - + param_y_lb_irqk = { # (converter_key, r, q, k): converter.u_upper_bounds[k] # for converter_key, converter in self.converters.items() # for r in converter.bounded_inputs # for k in set_K - } - + } + # maximum positive amplitude for inputs - + param_y_pos_lim_max_ir = { # (converter_key, m): converter.u_max_amplitude[m] # for converter_key, converter in self.converters.items() # if converter_key in self.optional_converters # for m in converter.dimensionable_inputs - } - + } + # minimum positive amplitude for inputs - + param_y_pos_lim_min_ir = { # (converter_key, m): converter.u_min_amplitude[m] # for converter_key, converter in self.converters.items() # if converter_key in self.optional_converters # for m in converter.dimensionable_inputs # # minimum amplitude limits are not mandatory, the default is zero - # if (converter.u_min_amplitude[m] != 0 or + # if (converter.u_min_amplitude[m] != 0 or # converter.u_min_amplitude[m] != None) - } - + } + # maximum negative amplitude for inputs - + param_y_neg_lim_max_ir = { # (converter_key, m): converter.u_max_amplitude[m] # for converter_key, converter in self.converters.items() # if converter_key in self.optional_converters # for m in converter.dimensionable_inputs - } - + } + # minimum negative amplitude for inputs - + param_y_neg_lim_min_ir = { # (converter_key, m): converter.u_min_amplitude[m] # for converter_key, converter in self.converters.items() # if converter_key in self.optional_converters # for m in converter.dimensionable_inputs # # minimum amplitude limits are not mandatory, the default is zero - # if (converter.u_min_amplitude[m] != 0 or + # if (converter.u_min_amplitude[m] != 0 or # converter.u_min_amplitude[m] != None) - } - + } + # u rated adjustment coefficient (1 if the time intervals are regular) - + param_f_amp_y_irqk = {} - + # if self.intraperiod_time_intervals_are_regular: - + # # an empty dictionary is enough because the default value is 1 - + # param_f_amp_y_irqk = {} - + # else: - + # # TODO: test irregular time intervals - + # param_f_amp_y_irqk = {} - + # output equations: C matrix coefficients - + param_c_eq_y_irnqk = { - (converter_key, r, n, q, k): - 0 #converter.dssm[k].C[r,n] + (converter_key, r, n, q, k): 0 # converter.dssm[k].C[r,n] for converter_key, converter in self.converters.items() for r in range(converter.number_outputs) for n in range(converter.number_states) - for (q,k) in set_QK - } - + for (q, k) in set_QK + } + # output equations: D matrix coefficients - + param_d_eq_y_irmqk = { - (converter_key, r, m, q, k): - 0 #converter.dssm[k].C[r,n] + (converter_key, r, m, q, k): 0 # converter.dssm[k].C[r,n] for converter_key, converter in self.converters.items() for r in range(converter.number_outputs) for m in range(converter.number_inputs) - for (q,k) in set_QK - } - + for (q, k) in set_QK + } + # output equations: constant term - + param_e_eq_y_irqk = { - (converter_key, r, q, k): - 0 #converter.dssm[k].C[r,n] + (converter_key, r, q, k): 0 # converter.dssm[k].C[r,n] for converter_key, converter in self.converters.items() for r in range(converter.number_outputs) - for (q,k) in set_QK - } - - # ********************************************************************* - + for (q, k) in set_QK + } + + # ********************************************************************* + # states - + # state equations: A matrix coefficients - + param_a_eq_x_innqk = { - # (converter_key, r, n, k): + # (converter_key, r, n, k): # 0 #converter.dssm[k].C[r,n] # for converter_key, converter in self.converters.items() # for r in range(converter.number_outputs) # for n in range(converter.number_states) # for k in set_K - } - + } + # state equations: B matrix coefficients - + param_b_eq_x_inmqk = { - # (converter_key, r, m, k): + # (converter_key, r, m, k): # 0 #converter.dssm[k].C[r,n] # for converter_key, converter in self.converters.items() # for r in range(converter.number_outputs) # for m in range(converter.number_inputs) # for k in set_K - } - + } + # state equations: constant term - + param_e_eq_x_inqk = { - # (converter_key, r, k): + # (converter_key, r, k): # 0 #converter.dssm[k].C[r,n] # for converter_key, converter in self.converters.items() # for r in range(converter.number_outputs) # for k in set_K - } - + } + # initial states - - param_x_inq0 = { - } - + + param_x_inq0 = {} + # upper bounds for states (default: none) - - param_x_ub_inqk = { - } - + + param_x_ub_inqk = {} + # lower bounds for states (default: none) - - param_x_lb_inqk = { - } - + + param_x_lb_inqk = {} + # maximum positive amplitude for inputs - - param_x_pos_lim_max_in = { - } - + + param_x_pos_lim_max_in = {} + # minimum positive amplitude for inputs - - param_x_pos_lim_min_in = { - } - + + param_x_pos_lim_min_in = {} + # maximum negative amplitude for inputs - - param_x_neg_lim_max_in = { - } - + + param_x_neg_lim_max_in = {} + # minimum negative amplitude for inputs - - param_x_neg_lim_min_in = { - } - + + param_x_neg_lim_min_in = {} + # u rated adjustment coefficient (1 if the time intervals are regular) - + param_f_amp_x_inqk = {} - + # if self.intraperiod_time_intervals_are_regular: - + # # an empty dictionary is enough because the default value is 1 - + # param_f_amp_x_inqk = {} - + # else: - + # # TODO: test irregular time intervals - + # param_f_amp_x_inqk = {} - - # ********************************************************************* - # ********************************************************************* - + + # ********************************************************************* + # ********************************************************************* + # nodes - - # ********************************************************************* + + # ********************************************************************* # ********************************************************************* - + # static balance for nodes (default: 0) - + param_v_base_glqk = { - (g,l,q,k): self.networks[g].nodes[l][ - Network.KEY_NODE_BASE_FLOW][(q,k)] - if l not in self.networks[g].waypoint_nodes else 0 - for (g,l) in set_GL_not_exp_imp + (g, l, q, k): self.networks[g].nodes[l][Network.KEY_NODE_BASE_FLOW][(q, k)] + if l not in self.networks[g].waypoint_nodes + else 0 + for (g, l) in set_GL_not_exp_imp # for g in self.networks.keys() # for l in self.networks[g].nodes # if l not in self.networks[g].import_nodes # if l not in self.networks[g].export_nodes # if l not in self.networks[g].waypoint_nodes # relies on default (0) - for (q,k) in set_QK} - + for (q, k) in set_QK + } + # maximum number of parallel arcs (default in pyomo: 1) - + param_max_number_parallel_arcs = { (g, u, v): self.max_number_parallel_arcs[(g, u, v)] for (g, u, v) in self.max_number_parallel_arcs if ((g, u, v) in set_J or (g, v, u) in set_J) - } - + } + # effect of converter inputs on networks - + param_a_nw_glimqk = { # (grid, node_key, converter_key, m, k): a_glmk # for converter_key, converter in self.converters.items() # for (grid, node_key, m, k), a_glmk in dynsys.a_glmk.items() - } - + } + # effect of converter outputs on networks - + param_a_nw_glirqk = { # (grid, node_key, dynsys_key, m, k): a_glmk # for dynsys_key, dynsys in self.dynamic_systems.items() # for (grid, node_key, m, k), a_glmk in dynsys.a_glmk.items() - } - - # ********************************************************************* + } + # ********************************************************************* - + # ********************************************************************* + # arcs - - # ********************************************************************* + + # ********************************************************************* # ********************************************************************* - + # nominal flow amplitude limit adjustment coefficient (1 if the time intervals are regular) - + # instantaneous, regular: f = time interval duration # instantaneous, irregular: f = time interval duration # not instantaneous, regular: f = time interval duration / reference time interval duration # not instantaneous, irregular: f = time interval duration / reference time interval duration - + # note: if capacities are instantaneous, that means they still need to # be adjusted to consider the actual time interval durations, i.e., the # quantities increase with time step duration - + # note: non-instantaneous capacities should be determined using the re- # ference time interval duration since that causes the standard case to # be simplified to 1 (all time steps are equal to the reference one) - + param_f_amp_v_glljqk = {} - - param_f_amp_v_glljqk.update({ - (g, u, v, j, q, k): - (self.time_intervals[q][k] # instantaneous - if self.networks[g].edges[ - (u,v,j)][ - Network.KEY_ARC_TECH].capacity_is_instantaneous - else # non-instantaneous: modulate - self.normalised_time_interval_duration[(q,k)]) - for (g, u, v, j) in set_GLLJ_new - for (q,k) in set_QK # for each time interval - }) - + + param_f_amp_v_glljqk.update( + { + (g, u, v, j, q, k): ( + self.time_intervals[q][k] # instantaneous + if self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .capacity_is_instantaneous + else self.normalised_time_interval_duration[ # non-instantaneous: modulate + (q, k) + ] + ) + for (g, u, v, j) in set_GLLJ_new + for (q, k) in set_QK # for each time interval + } + ) + # transmission efficiency - + reverse_none_means_isotropic = True - + param_eta_glljqk = { - (g, u, v, j, q, k): - self.networks[g].edges[(u,v,j)][ - Network.KEY_ARC_TECH].efficiency[(q,k)] - if self.networks[g].edges[(u,v,j)][ - Network.KEY_ARC_TECH].has_proportional_losses() else 1 + (g, u, v, j, q, k): self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .efficiency[(q, k)] + if self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .has_proportional_losses() + else 1 for (g, u, v) in set_J for j in set_J[(g, u, v)] - for (q,k) in set_QK - } - + for (q, k) in set_QK + } + # reverse direction - - param_eta_glljqk.update({ - (g, v, u, j, q, k): ( - self.networks[g].edges[(u,v,j)][ - Network.KEY_ARC_TECH].efficiency_reverse[(q,k)] - if not self.networks[g].edges[(u,v,j)][ - Network.KEY_ARC_TECH].is_isotropic( - reverse_none_means_isotropic= - reverse_none_means_isotropic - ) - else self.networks[g].edges[(u,v,j)][ - Network.KEY_ARC_TECH].efficiency[(q,k)] - ) if self.networks[g].edges[(u,v,j)][ - Network.KEY_ARC_TECH].has_proportional_losses() else 1 - for (g, u, v, j, q, k) in param_eta_glljqk - if j in set_J_und[(g, u, v)] - }) + + param_eta_glljqk.update( + { + (g, v, u, j, q, k): ( + self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .efficiency_reverse[(q, k)] + if not self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .is_isotropic( + reverse_none_means_isotropic=reverse_none_means_isotropic + ) + else self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .efficiency[(q, k)] + ) + if self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .has_proportional_losses() + else 1 + for (g, u, v, j, q, k) in param_eta_glljqk + if j in set_J_und[(g, u, v)] + } + ) # set_GLLJ_red = tuple(set( # (g, u, v, j) for (g, u, v, j, k) in param_eta_glljk.keys() # )) - + # cost of installing arcs - + param_c_arc_min_glljh = { - (g, u, v, j, h): - self.networks[g].edges[(u, v, j)][ - Network.KEY_ARC_TECH].minimum_cost[h] + (g, u, v, j, h): self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .minimum_cost[h] for (g, u, v) in set_J for j in set_J[(g, u, v)] - if j not in set_J_pre[(g, u, v)] # new - if j not in set_J_col[(g, u, v)] # individual + if j not in set_J_pre[(g, u, v)] # new + if j not in set_J_col[(g, u, v)] # individual for h in set_H_gllj[(g, u, v, j)] - } - + } + set_GLLJH_sgl = tuple(param_c_arc_min_glljh.keys()) - + # arc capacities (pre-defined) - + param_v_ub_glljqk = {} - + # nominal direction - - param_v_ub_glljqk.update({ - (g, u, v, j, q, k): - self.networks[g].edges[(u, v, j)][ - Network.KEY_ARC_TECH].capacity[ - self.networks[g].edges[(u, v, j)][ - Network.KEY_ARC_TECH].options_selected.index(True) - ] - - - (self.networks[g].edges[(u, v, j)][ - Network.KEY_ARC_TECH].static_loss[ - self.networks[g].edges[(u, v, j)][ - Network.KEY_ARC_TECH].options_selected.index(True), - q,k - ] if (((g,u,v) in set_J_stt_dep and - j in set_J_stt_dep[(g,u,v)]) or - ((g,u,v) in set_J_stt_us and - j in set_J_stt_us[(g,u,v)])) else 0) - for (g, u, v) in set_J_pre - for j in set_J_pre[(g, u, v)] - if j not in set_J_pre_inf[(g, u, v)] - for (q,k) in set_QK - }) - + + param_v_ub_glljqk.update( + { + (g, u, v, j, q, k): self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .capacity[ + self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .options_selected.index(True) + ] + - ( + self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .static_loss[ + self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .options_selected.index(True), + q, + k, + ] + if ( + ((g, u, v) in set_J_stt_dep and j in set_J_stt_dep[(g, u, v)]) + or ((g, u, v) in set_J_stt_us and j in set_J_stt_us[(g, u, v)]) + ) + else 0 + ) + for (g, u, v) in set_J_pre + for j in set_J_pre[(g, u, v)] + if j not in set_J_pre_inf[(g, u, v)] + for (q, k) in set_QK + } + ) + # reverse direction - - param_v_ub_glljqk.update({ - (g, v, u, j, q, k): - self.networks[g].edges[(u, v, j)][ - Network.KEY_ARC_TECH].capacity[ - self.networks[g].edges[(u, v, j)][ - Network.KEY_ARC_TECH].options_selected.index(True) - ] - - - (self.networks[g].edges[(u, v, j)][ - Network.KEY_ARC_TECH].static_loss[ - self.networks[g].edges[(u, v, j)][ - Network.KEY_ARC_TECH].options_selected.index(True), - q,k - ] if (((g,u,v) in set_J_stt_arr and - j in set_J_stt_arr[(g,u,v)]) or - ((g,u,v) in set_J_stt_us and - j in set_J_stt_us[(g,u,v)])) else 0) - for (g, u, v) in set_J_pre - for j in set_J_pre[(g, u, v)] - if j not in set_J_pre_inf[(g, u, v)] - if j in set_J_und[(g, u, v)] - for (q,k) in set_QK - }) - + + param_v_ub_glljqk.update( + { + (g, v, u, j, q, k): self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .capacity[ + self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .options_selected.index(True) + ] + - ( + self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .static_loss[ + self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .options_selected.index(True), + q, + k, + ] + if ( + ((g, u, v) in set_J_stt_arr and j in set_J_stt_arr[(g, u, v)]) + or ((g, u, v) in set_J_stt_us and j in set_J_stt_us[(g, u, v)]) + ) + else 0 + ) + for (g, u, v) in set_J_pre + for j in set_J_pre[(g, u, v)] + if j not in set_J_pre_inf[(g, u, v)] + if j in set_J_und[(g, u, v)] + for (q, k) in set_QK + } + ) + # set_GLLJ_pre_fin_red = tuple(param_v_ub_glljk.keys()) - set_GLLJ_pre_fin_red = tuple(set( - (g, u, v, j) for (g, u, v, j, q, k) in param_v_ub_glljqk.keys() - )) - + set_GLLJ_pre_fin_red = tuple( + set((g, u, v, j) for (g, u, v, j, q, k) in param_v_ub_glljqk.keys()) + ) + # maximum nominal arc flow amplitude (i.e. capacity) - + param_v_amp_max_glljh = { - (g, u, v, j, h): - self.networks[g].edges[(u, v, j)][ - Network.KEY_ARC_TECH].capacity[h] + (g, u, v, j, h): self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .capacity[h] for (g, u, v) in set_J for j in set_J[(g, u, v)] if j not in set_J_pre[(g, u, v)] if j not in set_J_col[(g, u, v)] for h in set_H_gllj[(g, u, v, j)] - } - + } + # specific capacity cost - + param_c_arc_var_gllj = { - (g, u, v, j): - self.networks[g].edges[(u, v, j)][ - Network.KEY_ARC_TECH].specific_capacity_cost + (g, u, v, j): self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .specific_capacity_cost for (g, u, v) in set_J for j in set_J[(g, u, v)] if j not in set_J_pre[(g, u, v)] - if j not in set_J_col[(g, u, v)] # individual - } - + if j not in set_J_col[(g, u, v)] # individual + } + set_GLLJ_sgl = tuple(param_c_arc_var_gllj.keys()) - + # ********************************************************************* - + # sos1 weights for arc selection - + param_arc_inv_sos1_weights_glljh = { - (g, u, v, j, h): - self.use_sos1_arc_inv[(g,u,v,j)][h] - if self.use_sos1_arc_inv[(g,u,v,j)] is not None else h + (g, u, v, j, h): self.use_sos1_arc_inv[(g, u, v, j)][h] + if self.use_sos1_arc_inv[(g, u, v, j)] is not None + else h for (g, u, v) in set_J_arc_sos1 for j in set_J_arc_sos1[(g, u, v)] # otherwise, go through each option for h in set_H_gllj[(g, u, v, j)] - } - - set_GLLJ_arc_inv_sos1 = tuple((g,u,v,j) for (g, u, v) in set_J_arc_sos1 - for j in set_J_arc_sos1[(g, u, v)] - if (g,u,v,j) in self.use_sos1_arc_inv) - + } + + set_GLLJ_arc_inv_sos1 = tuple( + (g, u, v, j) + for (g, u, v) in set_J_arc_sos1 + for j in set_J_arc_sos1[(g, u, v)] + if (g, u, v, j) in self.use_sos1_arc_inv + ) + set_GLLJH_arc_inv_sos1 = tuple(param_arc_inv_sos1_weights_glljh.keys()) - + set_GLLJH_arc_inv_sos1_gllj = { - (g, u, v, j): tuple( - (g, u, v, j, h) for h in set_H_gllj[(g, u, v, j)] - ) + (g, u, v, j): tuple((g, u, v, j, h) for h in set_H_gllj[(g, u, v, j)]) for (g, u, v) in set_J_arc_sos1 for j in set_J_arc_sos1[(g, u, v)] - } - + } + # sos1 weights for flow sense determination - + param_arc_sns_sos1_weights_glljqk = { - (g, u, v, j, q, k): - 1+self.SOS1_SENSE_OFFSET - if (self.use_sos1_flow_sense[(g,u,v,j)] == - self.SOS1_SENSE_WEIGHT_NOMINAL_HIGHER) - else self.SOS1_SENSE_OFFSET + (g, u, v, j, q, k): 1 + self.SOS1_SENSE_OFFSET + if ( + self.use_sos1_flow_sense[(g, u, v, j)] + == self.SOS1_SENSE_WEIGHT_NOMINAL_HIGHER + ) + else self.SOS1_SENSE_OFFSET for (g, u, v) in set_J_sns_sos1 for j in set_J_sns_sos1[(g, u, v)] # otherwise, go through each option - for (q,k) in set_QK + for (q, k) in set_QK + } + + param_arc_sns_sos1_weights_glljqk.update( + { + (g, v, u, j, q, k): self.SOS1_SENSE_OFFSET + if ( + self.use_sos1_flow_sense[(g, u, v, j)] + == self.SOS1_SENSE_WEIGHT_NOMINAL_HIGHER + ) + else 1 + self.SOS1_SENSE_OFFSET + for (g, u, v, j, q, k) in param_arc_sns_sos1_weights_glljqk } - - param_arc_sns_sos1_weights_glljqk.update({ - (g, v, u, j, q, k): - self.SOS1_SENSE_OFFSET - if (self.use_sos1_flow_sense[(g,u,v,j)] == - self.SOS1_SENSE_WEIGHT_NOMINAL_HIGHER) - else 1+self.SOS1_SENSE_OFFSET - for (g, u, v, j, q, k) in param_arc_sns_sos1_weights_glljqk - }) - - set_GLLJQK_und_sns_sos1_red = tuple( - param_arc_sns_sos1_weights_glljqk.keys() - ) - + ) + + set_GLLJQK_und_sns_sos1_red = tuple(param_arc_sns_sos1_weights_glljqk.keys()) + # ********************************************************************* # ********************************************************************* - + # parameters for groups of arcs - + param_c_arc_min_th = { - (t,h): sum( - self.networks[g].edges[(u,v,j)][ - Network.KEY_ARC_TECH].minimum_cost[h] - for (g,u,v,j) in set_GLLJ_col_t[t] - ) + (t, h): sum( + self.networks[g].edges[(u, v, j)][Network.KEY_ARC_TECH].minimum_cost[h] + for (g, u, v, j) in set_GLLJ_col_t[t] + ) for t in set_T for h in set_H_t[t] - } - + } + param_c_arc_var_t = { t: sum( - self.networks[g].edges[(u,v,j)][ - Network.KEY_ARC_TECH].specific_capacity_cost - for (g,u,v,j) in set_GLLJ_col_t[t] - ) + self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .specific_capacity_cost + for (g, u, v, j) in set_GLLJ_col_t[t] + ) for t in set_T - } - - param_v_amp_max_th = { # TODO: check if this is the case - (t,h): mean( - self.networks[g].edges[(u, v, j)][ - Network.KEY_ARC_TECH].capacity[h] - for (g,u,v,j) in set_GLLJ_col_t[t] - ) + } + + param_v_amp_max_th = { # TODO: check if this is the case + (t, h): mean( + self.networks[g].edges[(u, v, j)][Network.KEY_ARC_TECH].capacity[h] + for (g, u, v, j) in set_GLLJ_col_t[t] + ) for t in set_T for h in set_H_t[t] - } - + } + param_arc_inv_sos1_weights_th = { (t, h): h # (t, h): mean( @@ -3361,14 +3232,14 @@ class InfrastructurePlanningProblem(EnergySystem): # ) for t in set_T_sos1 for h in set_H_t[t] - } - + } + # ********************************************************************* - + # fixed arc losses for preselected arcs - + # param_w_pre_glljk = { - # (g, u, v, j, k): + # (g, u, v, j, k): # self.networks[g].edges[(u, v, j)][ # Network.KEY_ARC_TECH].static_loss[( # self.networks[g].edges[(u, v, j)][ @@ -3379,25 +3250,30 @@ class InfrastructurePlanningProblem(EnergySystem): # if j in set_J_pre[(g,u,v)] # for k in set_K # } - + param_w_pre_glljqk = { - (g, u, v, j, q, k): - self.networks[g].edges[(u, v, j)][ - Network.KEY_ARC_TECH].static_loss[( - self.networks[g].edges[(u, v, j)][ - Network.KEY_ARC_TECH].options_selected.index(True), - q,k)] + (g, u, v, j, q, k): self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .static_loss[ + ( + self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .options_selected.index(True), + q, + k, + ) + ] for (g, u, v, j) in set_GLLJ_static_pre # for (g, u, v) in set_J_stt # for j in set_J_stt[(g,u,v)] # if j in set_J_pre[(g,u,v)] - for (q,k) in set_QK - } - + for (q, k) in set_QK + } + # fixed arc losses for selectable arcs - + # param_w_new_glljhk = { - # (g, u, v, j, h, k): + # (g, u, v, j, h, k): # self.networks[g].edges[(u, v, j)][ # Network.KEY_ARC_TECH].static_loss[(h, 0, k)] # for (g, u, v) in set_J_stt @@ -3406,613 +3282,415 @@ class InfrastructurePlanningProblem(EnergySystem): # for h in set_H_gllj[(g, u, v, j)] # for k in set_K # } - + param_w_new_glljhqk = { - (g, u, v, j, h, q, k): - self.networks[g].edges[(u, v, j)][ - Network.KEY_ARC_TECH].static_loss[(h, q, k)] + (g, u, v, j, h, q, k): self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .static_loss[(h, q, k)] for (g, u, v, j) in set_GLLJ_static_new - if j not in set_J_col[(g,u,v)] + if j not in set_J_col[(g, u, v)] # for j in set_J_stt[(g,u,v)] # if j not in set_J_pre[(g,u,v)] for h in set_H_gllj[(g, u, v, j)] - for (q,k) in set_QK + for (q, k) in set_QK + } + + param_w_new_glljhqk.update( + { + (g, u, v, j, h, q, k): self.networks[g] + .edges[(u, v, j)][Network.KEY_ARC_TECH] + .static_loss[(h, q, k)] + for t in set_T + for (g, u, v, j) in set_GLLJ_col_t[t] + if j in set_J_stt[(g, u, v)] + # if (g, u, v, j) in set_GLLJ_static_new + for h in set_H_t[t] + for (q, k) in set_QK } - - param_w_new_glljhqk.update({ - (g, u, v, j, h, q, k): - self.networks[g].edges[(u, v, j)][ - Network.KEY_ARC_TECH].static_loss[(h, q, k)] - for t in set_T - for (g, u, v, j) in set_GLLJ_col_t[t] - if j in set_J_stt[(g,u,v)] - # if (g, u, v, j) in set_GLLJ_static_new - for h in set_H_t[t] - for (q,k) in set_QK - }) - + ) + set_GLLJH_static_new = tuple( - set((g,u,v,j,h) for (g,u,v,j,h,q,k) in param_w_new_glljhqk.keys()) - ) - + set((g, u, v, j, h) for (g, u, v, j, h, q, k) in param_w_new_glljhqk.keys()) + ) + # ********************************************************************* # ********************************************************************* # ********************************************************************* # ********************************************************************* - + # produce a dictionary with the data for the problem - - data_dict = {None: { - - # ***************************************************************** - # ***************************************************************** - - # sets - - # ***************************************************************** - # ***************************************************************** - - # common sets - - 'set_Q': {None: set_Q}, - - 'set_P_q': set_P_q, - - 'set_K_q': set_K_q, - - 'set_QP': set_QP, - - 'set_QK': set_QK, - - 'set_QPK': set_QPK, - - # network sets - - 'set_G': {None: set_G}, - - 'set_L': set_L, - - 'set_L_imp': set_L_imp, - - 'set_L_exp': set_L_exp, - - 'set_L_max_in_g': set_L_max_in_g, - - #'set_L_max_out_g': set_L_max_out_g, - - 'set_GL': set_GL, - - 'set_GL_exp': set_GL_exp, - - 'set_GL_imp': set_GL_imp, - - 'set_GL_exp_imp': set_GL_exp_imp, - - 'set_GL_not_exp_imp': set_GL_not_exp_imp, - - 'set_GLL': set_GLL, - - # ***************************************************************** - - # tariff - - 'set_S': set_S, - - 'set_GLQPKS': set_GLQPKS, - - 'set_GLQPKS_exp': set_GLQPKS_exp, - - 'set_GLQPKS_imp': set_GLQPKS_imp, - - # ***************************************************************** - - # converter sets - - 'set_I': set_I, - - 'set_I_new': set_I_new, - - # input sets - - 'set_M': set_M, - - 'set_M_bin': set_M_bin, - - 'set_M_dim': set_M_dim, - - 'set_M_cost': set_M_cost, - - 'set_M_ext': set_M_ext, - - # state sets - - 'set_N': set_N, - - 'set_N_dim_up': set_N_dim_up, - - 'set_N_dim_lo': set_N_dim_lo, - - 'set_N_cost_up': set_N_cost_up, - - 'set_N_cost_lo': set_N_cost_lo, - - 'set_N_ext': set_N_ext, - - # output sets - - 'set_R': set_R, - - 'set_R_dim': set_R_dim, - - 'set_R_cost': set_R_cost, - - 'set_R_ext': set_R_ext, - - # ***************************************************************** - - # arc related sets - - 'set_J': set_J, - - 'set_J_und': set_J_und, # if j not here, then it is directed - - 'set_J_pre': set_J_pre, # if j not here, then it is selectable - - 'set_J_pre_inf': set_J_pre_inf, # if j not here, then it is fin cap - - 'set_J_col': set_J_col, - - 'set_J_mdt': set_J_mdt, # if j not here, then it is optional - - 'set_J_arc_sos1': set_J_arc_sos1, # option - - 'set_J_arc_nnr': set_J_arc_nnr, # if j not here, then it uses binary vars. - - 'set_J_int': set_J_int, # if j not here, no int. are needed - - 'set_J_sns_sos1': set_J_sns_sos1, # option - - 'set_J_sns_nnr': set_J_sns_nnr, # the alternative is bin. vars. - - 'set_J_stt': set_J_stt, - - 'set_J_stt_dep': set_J_stt_dep, - - 'set_J_stt_arr': set_J_stt_arr, - - 'set_J_stt_us': set_J_stt_us, - - 'set_J_stt_ds': set_J_stt_ds, - - 'set_H_gllj': set_H_gllj, - - # ***************************************************************** - - # groups of arcs - - 'set_T': set_T, - - # set of mandatory arc groups - - 'set_T_mdt': set_T_mdt, - - # set of arc groups requiring interface variables - - 'set_T_int': set_T_int, - - # set of arc groups relying on SOS1 - - 'set_T_sos1': set_T_sos1, - - # set of arg groups relying on non-negative real variables - - 'set_T_nnr': set_T_nnr, - - # set of arc groups relying on binary variables - - 'set_T_bin': set_T_bin, - - # set of arcs in the various arc groups - - 'set_GLLJ_col_t': set_GLLJ_col_t, - - # set of arc options for arc groups - - 'set_H_t': set_H_t, - - # # set of (t,h) tuples - - # 'set_TH': set_TH, - - # # set of (t,g,l1,l2,j) tuples - - # 'set_TGLLJ': set_TGLLJ, - - # # set of TH tuples for groups using SOS1 - - # 'set_TH_sos1': set_TH_sos1, - - # # set of t-indexed TH tuples for groups of arcs relying on SOS1 - - # 'set_TH_sos1_t': set_TH_sos1_t, - - # minimum cost of a group of arcs - - 'param_c_arc_min_th': param_c_arc_min_th, - - # unit flow amplitude cost - - 'param_c_arc_var_t': param_c_arc_var_t, - - # maximum nominal amplitude - - 'param_v_amp_max_th': param_v_amp_max_th, - - # sos1 weights for arc group options - - 'param_arc_inv_sos1_weights_th': param_arc_inv_sos1_weights_th, - - # ***************************************************************** - # ***************************************************************** - - # ancillary sets - - # ***************************************************************** - # ***************************************************************** - - 'set_GLLJ': set_GLLJ, - - 'set_GLLJ_und': set_GLLJ_und, - - 'set_GLLJ_new': set_GLLJ_new, - - 'set_GLLJ_sgl': set_GLLJ_sgl, - - 'set_GLLJ_und_ext': set_GLLJ_und_ext, - - 'set_GLLJ_und_red': set_GLLJ_und_red, - - 'set_GLLJ_red': set_GLLJ_red, - - 'set_GLLJH_sgl': set_GLLJH_sgl, - - 'set_GLLJ_pre_fin_red': set_GLLJ_pre_fin_red, - - 'set_GLLJ_arc_inv_sos1': set_GLLJ_arc_inv_sos1, - - 'set_GLLJH_arc_inv_sos1': set_GLLJH_arc_inv_sos1, - - 'set_GLLJH_arc_inv_sos1_gllj': set_GLLJH_arc_inv_sos1_gllj, - - 'set_GLLJQK_und_sns_sos1_red': set_GLLJQK_und_sns_sos1_red, - - 'set_GLLJ_int': set_GLLJ_int, - - 'set_GLLJ_static': set_GLLJ_static, - - 'set_GLLJ_static_pre': set_GLLJ_static_pre, - - 'set_GLLJ_static_new': set_GLLJ_static_new, - - 'set_GLLJH_static_new': set_GLLJH_static_new, - - 'set_GLLJ_static_und_red': set_GLLJ_static_und_red, - # model.set_GLLJ_und_red, - # model.set_GLLJ_und, - # model.set_GLLJ_und_new, - # model.set_GLLJH_static_new, - # model.set_GLLJ_static_pre, - # model.set_GLLJ_static_und_red, - # model.set_GLLJ_static_new, - # model.set_GLLJ_static_dir, - # model.set_GLLJ_static_und, - # model.set_GLLJ_static_und_pre, - # model.set_GLLJ_static_und_new, - - # ***************************************************************** - # ***************************************************************** - - # parameters - - # ***************************************************************** - # ***************************************************************** - - # objective function - - 'param_c_wgt_q': param_c_wgt_q, - - 'param_c_df_qp': param_c_df_qp, - - 'param_c_time_qpk': param_c_time_qpk, - - 'param_p_glqpks': param_p_glqpks, - - 'param_v_max_glqpks': param_v_max_glqpks, - - # ***************************************************************** - - # converters - - # objective function - - 'param_c_cvt_i': param_c_cvt_i, - - 'param_c_cvt_u_im': param_c_cvt_u_im, - - 'param_c_cvt_x_in': param_c_cvt_x_in, - - 'param_c_cvt_y_ir': param_c_cvt_y_ir, - - 'param_c_ext_u_imqk': param_c_ext_u_imqk, - - 'param_c_ext_x_inqk': param_c_ext_x_inqk, - - 'param_c_ext_y_irqk': param_c_ext_y_irqk, - - # inputs - - 'param_u_ub_imqk': param_u_ub_imqk, - - 'param_u_lim_max_im': param_u_lim_max_im, - - 'param_f_amp_u_imqk': param_f_amp_u_imqk, - - # states - - 'param_a_eq_x_innqk': param_a_eq_x_innqk, - - 'param_b_eq_x_inmqk': param_b_eq_x_inmqk, - - 'param_e_eq_x_inqk': param_e_eq_x_inqk, - - 'param_x_inq0': param_x_inq0, - - 'param_x_ub_inqk': param_x_ub_inqk, - - 'param_x_lb_inqk': param_x_lb_inqk, - - 'param_x_pos_lim_max_in': param_x_pos_lim_max_in, - - 'param_x_pos_lim_min_in': param_x_pos_lim_min_in, - - 'param_x_neg_lim_max_in': param_x_neg_lim_max_in, - - 'param_x_neg_lim_min_in': param_x_neg_lim_min_in, - - 'param_f_amp_x_inqk': param_f_amp_x_inqk, - - # outputs - - 'param_c_eq_y_irnqk': param_c_eq_y_irnqk, - - 'param_d_eq_y_irmqk': param_d_eq_y_irmqk, - - 'param_e_eq_y_irqk': param_e_eq_y_irqk, - - 'param_y_ub_irqk': param_y_ub_irqk, - - 'param_y_lb_irqk': param_y_lb_irqk, - - 'param_y_pos_lim_max_ir': param_y_pos_lim_max_ir, - - 'param_y_pos_lim_min_ir': param_y_pos_lim_min_ir, - - 'param_y_neg_lim_max_ir': param_y_neg_lim_max_ir, - - 'param_y_neg_lim_min_ir': param_y_neg_lim_min_ir, - - 'param_f_amp_y_irqk': param_f_amp_y_irqk, - - # ***************************************************************** - - # network - - 'param_max_number_parallel_arcs': param_max_number_parallel_arcs, - - 'param_v_base_glqk': param_v_base_glqk, - - 'param_a_nw_glimqk': param_a_nw_glimqk, - - 'param_a_nw_glirqk': param_a_nw_glirqk, - - # ***************************************************************** - - # arc parameters - - 'param_eta_glljqk': param_eta_glljqk, - - 'param_v_ub_glljqk': param_v_ub_glljqk, - - 'param_v_amp_max_glljh': param_v_amp_max_glljh, - - 'param_c_arc_min_glljh': param_c_arc_min_glljh, - - 'param_c_arc_var_gllj': param_c_arc_var_gllj, - - 'param_f_amp_v_glljqk': param_f_amp_v_glljqk, - - # fixed losses - - 'param_w_pre_glljqk': param_w_pre_glljqk, - - 'param_w_new_glljhqk': param_w_new_glljhqk, - - # sos1 weights for arc selection - - 'param_arc_inv_sos1_weights_glljh': param_arc_inv_sos1_weights_glljh, - - # sos1 weights for flow sense determination - - 'param_arc_sns_sos1_weights_glljqk': param_arc_sns_sos1_weights_glljqk, - - # ***************************************************************** - # ***************************************************************** - - }} - - print('dictionary created...') - + + data_dict = { + None: { + # ***************************************************************** + # ***************************************************************** + # sets + # ***************************************************************** + # ***************************************************************** + # common sets + "set_Q": {None: set_Q}, + "set_P_q": set_P_q, + "set_K_q": set_K_q, + "set_QP": set_QP, + "set_QK": set_QK, + "set_QPK": set_QPK, + # network sets + "set_G": {None: set_G}, + "set_L": set_L, + "set_L_imp": set_L_imp, + "set_L_exp": set_L_exp, + "set_L_max_in_g": set_L_max_in_g, + #'set_L_max_out_g': set_L_max_out_g, + "set_GL": set_GL, + "set_GL_exp": set_GL_exp, + "set_GL_imp": set_GL_imp, + "set_GL_exp_imp": set_GL_exp_imp, + "set_GL_not_exp_imp": set_GL_not_exp_imp, + "set_GLL": set_GLL, + # ***************************************************************** + # tariff + "set_S": set_S, + "set_GLQPKS": set_GLQPKS, + "set_GLQPKS_exp": set_GLQPKS_exp, + "set_GLQPKS_imp": set_GLQPKS_imp, + # ***************************************************************** + # converter sets + "set_I": set_I, + "set_I_new": set_I_new, + # input sets + "set_M": set_M, + "set_M_bin": set_M_bin, + "set_M_dim": set_M_dim, + "set_M_cost": set_M_cost, + "set_M_ext": set_M_ext, + # state sets + "set_N": set_N, + "set_N_dim_up": set_N_dim_up, + "set_N_dim_lo": set_N_dim_lo, + "set_N_cost_up": set_N_cost_up, + "set_N_cost_lo": set_N_cost_lo, + "set_N_ext": set_N_ext, + # output sets + "set_R": set_R, + "set_R_dim": set_R_dim, + "set_R_cost": set_R_cost, + "set_R_ext": set_R_ext, + # ***************************************************************** + # arc related sets + "set_J": set_J, + "set_J_und": set_J_und, # if j not here, then it is directed + "set_J_pre": set_J_pre, # if j not here, then it is selectable + "set_J_pre_inf": set_J_pre_inf, # if j not here, then it is fin cap + "set_J_col": set_J_col, + "set_J_mdt": set_J_mdt, # if j not here, then it is optional + "set_J_arc_sos1": set_J_arc_sos1, # option + "set_J_arc_nnr": set_J_arc_nnr, # if j not here, then it uses binary vars. + "set_J_int": set_J_int, # if j not here, no int. are needed + "set_J_sns_sos1": set_J_sns_sos1, # option + "set_J_sns_nnr": set_J_sns_nnr, # the alternative is bin. vars. + "set_J_stt": set_J_stt, + "set_J_stt_dep": set_J_stt_dep, + "set_J_stt_arr": set_J_stt_arr, + "set_J_stt_us": set_J_stt_us, + "set_J_stt_ds": set_J_stt_ds, + "set_H_gllj": set_H_gllj, + # ***************************************************************** + # groups of arcs + "set_T": set_T, + # set of mandatory arc groups + "set_T_mdt": set_T_mdt, + # set of arc groups requiring interface variables + "set_T_int": set_T_int, + # set of arc groups relying on SOS1 + "set_T_sos1": set_T_sos1, + # set of arg groups relying on non-negative real variables + "set_T_nnr": set_T_nnr, + # set of arc groups relying on binary variables + "set_T_bin": set_T_bin, + # set of arcs in the various arc groups + "set_GLLJ_col_t": set_GLLJ_col_t, + # set of arc options for arc groups + "set_H_t": set_H_t, + # # set of (t,h) tuples + # 'set_TH': set_TH, + # # set of (t,g,l1,l2,j) tuples + # 'set_TGLLJ': set_TGLLJ, + # # set of TH tuples for groups using SOS1 + # 'set_TH_sos1': set_TH_sos1, + # # set of t-indexed TH tuples for groups of arcs relying on SOS1 + # 'set_TH_sos1_t': set_TH_sos1_t, + # minimum cost of a group of arcs + "param_c_arc_min_th": param_c_arc_min_th, + # unit flow amplitude cost + "param_c_arc_var_t": param_c_arc_var_t, + # maximum nominal amplitude + "param_v_amp_max_th": param_v_amp_max_th, + # sos1 weights for arc group options + "param_arc_inv_sos1_weights_th": param_arc_inv_sos1_weights_th, + # ***************************************************************** + # ***************************************************************** + # ancillary sets + # ***************************************************************** + # ***************************************************************** + "set_GLLJ": set_GLLJ, + "set_GLLJ_und": set_GLLJ_und, + "set_GLLJ_new": set_GLLJ_new, + "set_GLLJ_sgl": set_GLLJ_sgl, + "set_GLLJ_und_ext": set_GLLJ_und_ext, + "set_GLLJ_und_red": set_GLLJ_und_red, + "set_GLLJ_red": set_GLLJ_red, + "set_GLLJH_sgl": set_GLLJH_sgl, + "set_GLLJ_pre_fin_red": set_GLLJ_pre_fin_red, + "set_GLLJ_arc_inv_sos1": set_GLLJ_arc_inv_sos1, + "set_GLLJH_arc_inv_sos1": set_GLLJH_arc_inv_sos1, + "set_GLLJH_arc_inv_sos1_gllj": set_GLLJH_arc_inv_sos1_gllj, + "set_GLLJQK_und_sns_sos1_red": set_GLLJQK_und_sns_sos1_red, + "set_GLLJ_int": set_GLLJ_int, + "set_GLLJ_static": set_GLLJ_static, + "set_GLLJ_static_pre": set_GLLJ_static_pre, + "set_GLLJ_static_new": set_GLLJ_static_new, + "set_GLLJH_static_new": set_GLLJH_static_new, + "set_GLLJ_static_und_red": set_GLLJ_static_und_red, + # model.set_GLLJ_und_red, + # model.set_GLLJ_und, + # model.set_GLLJ_und_new, + # model.set_GLLJH_static_new, + # model.set_GLLJ_static_pre, + # model.set_GLLJ_static_und_red, + # model.set_GLLJ_static_new, + # model.set_GLLJ_static_dir, + # model.set_GLLJ_static_und, + # model.set_GLLJ_static_und_pre, + # model.set_GLLJ_static_und_new, + # ***************************************************************** + # ***************************************************************** + # parameters + # ***************************************************************** + # ***************************************************************** + # objective function + "param_c_wgt_q": param_c_wgt_q, + "param_c_df_qp": param_c_df_qp, + "param_c_time_qpk": param_c_time_qpk, + "param_p_glqpks": param_p_glqpks, + "param_v_max_glqpks": param_v_max_glqpks, + # ***************************************************************** + # converters + # objective function + "param_c_cvt_i": param_c_cvt_i, + "param_c_cvt_u_im": param_c_cvt_u_im, + "param_c_cvt_x_in": param_c_cvt_x_in, + "param_c_cvt_y_ir": param_c_cvt_y_ir, + "param_c_ext_u_imqk": param_c_ext_u_imqk, + "param_c_ext_x_inqk": param_c_ext_x_inqk, + "param_c_ext_y_irqk": param_c_ext_y_irqk, + # inputs + "param_u_ub_imqk": param_u_ub_imqk, + "param_u_lim_max_im": param_u_lim_max_im, + "param_f_amp_u_imqk": param_f_amp_u_imqk, + # states + "param_a_eq_x_innqk": param_a_eq_x_innqk, + "param_b_eq_x_inmqk": param_b_eq_x_inmqk, + "param_e_eq_x_inqk": param_e_eq_x_inqk, + "param_x_inq0": param_x_inq0, + "param_x_ub_inqk": param_x_ub_inqk, + "param_x_lb_inqk": param_x_lb_inqk, + "param_x_pos_lim_max_in": param_x_pos_lim_max_in, + "param_x_pos_lim_min_in": param_x_pos_lim_min_in, + "param_x_neg_lim_max_in": param_x_neg_lim_max_in, + "param_x_neg_lim_min_in": param_x_neg_lim_min_in, + "param_f_amp_x_inqk": param_f_amp_x_inqk, + # outputs + "param_c_eq_y_irnqk": param_c_eq_y_irnqk, + "param_d_eq_y_irmqk": param_d_eq_y_irmqk, + "param_e_eq_y_irqk": param_e_eq_y_irqk, + "param_y_ub_irqk": param_y_ub_irqk, + "param_y_lb_irqk": param_y_lb_irqk, + "param_y_pos_lim_max_ir": param_y_pos_lim_max_ir, + "param_y_pos_lim_min_ir": param_y_pos_lim_min_ir, + "param_y_neg_lim_max_ir": param_y_neg_lim_max_ir, + "param_y_neg_lim_min_ir": param_y_neg_lim_min_ir, + "param_f_amp_y_irqk": param_f_amp_y_irqk, + # ***************************************************************** + # network + "param_max_number_parallel_arcs": param_max_number_parallel_arcs, + "param_v_base_glqk": param_v_base_glqk, + "param_a_nw_glimqk": param_a_nw_glimqk, + "param_a_nw_glirqk": param_a_nw_glirqk, + # ***************************************************************** + # arc parameters + "param_eta_glljqk": param_eta_glljqk, + "param_v_ub_glljqk": param_v_ub_glljqk, + "param_v_amp_max_glljh": param_v_amp_max_glljh, + "param_c_arc_min_glljh": param_c_arc_min_glljh, + "param_c_arc_var_gllj": param_c_arc_var_gllj, + "param_f_amp_v_glljqk": param_f_amp_v_glljqk, + # fixed losses + "param_w_pre_glljqk": param_w_pre_glljqk, + "param_w_new_glljhqk": param_w_new_glljhqk, + # sos1 weights for arc selection + "param_arc_inv_sos1_weights_glljh": param_arc_inv_sos1_weights_glljh, + # sos1 weights for flow sense determination + "param_arc_sns_sos1_weights_glljqk": param_arc_sns_sos1_weights_glljqk, + # ***************************************************************** + # ***************************************************************** + } + } + + print("dictionary created...") + return data_dict - + # ************************************************************************* # ************************************************************************* - + def assert_solver_problem_compatibility(self, solver_name): - return SolverInterface.problem_and_solver_are_compatible( - solver_name, - self.optimisation_problem_type) + solver_name, self.optimisation_problem_type + ) # ************************************************************************* # ************************************************************************* - - def import_results(self, - solved_instance: pyo.ConcreteModel = None, - integrality_tolerance: float = 0.01): - + + def import_results( + self, + solved_instance: pyo.ConcreteModel = None, + integrality_tolerance: float = 0.01, + ): # no solved_instance has been provided, load from the class instance - + if solved_instance == None: - solved_instance = self.instance - + # ********************************************************************* - + # upload investment results, namely the NPV, NCFs and DFs - + # # net present value - + # self.net_present_value = pyo.value(solved_instance.obj_f) - + # # discount factors - + # self.discount_factors = [ # solved_instance.param_c_df_p[p] - # for p in solved_instance.set_P] - + # for p in solved_instance.set_P] + # # net cash flows (for each time period) - + # self.net_cash_flows = [ # pyo.value(solved_instance.var_ncf_p[p]) - # for p in solved_instance.set_P] - + # for p in solved_instance.set_P] + # # sanity check - + # temp_npv = Investment.npv(self.discount_rates, self.net_cash_flows) - + # if abs(temp_npv - self.net_present_value) > npv_tolerance: - + # Warning('The NPV results are not consistent with the input data.') - + # ********************************************************************* - + # for each arc subject to optimisation - - for (g,l1,l2,j,h) in solved_instance.set_GLLJH_sgl: - + + for g, l1, l2, j, h in solved_instance.set_GLLJH_sgl: # find the network and the arc - + # convert the decision variable into a boolean (due to integrality tol.) - + decision = bool( round( - pyo.value( - solved_instance.var_delta_arc_inv_glljh[ - (g,l1,l2,j,h) - ] - ) + pyo.value( + solved_instance.var_delta_arc_inv_glljh[(g, l1, l2, j, h)] ) ) - + ) + # ***************************************************************** - + # update the object - - self.networks[g].edges[(l1,l2,j)][ - Network.KEY_ARC_TECH].options_selected[h] = decision - + + self.networks[g].edges[(l1, l2, j)][Network.KEY_ARC_TECH].options_selected[ + h + ] = decision + # ***************************************************************** - + # ********************************************************************* - + # for each arc group - - for (t,h) in solved_instance.set_TH: - + + for t, h in solved_instance.set_TH: decision = bool( - round( - pyo.value( - solved_instance.var_delta_arc_inv_th[ - (t,h) - ] - ) - ) - ) - - for (g,l1,l2,j) in solved_instance.set_GLLJ_col_t[t]: - - self.networks[g].edges[(l1,l2,j)][ - Network.KEY_ARC_TECH].options_selected[h] = decision - + round(pyo.value(solved_instance.var_delta_arc_inv_th[(t, h)])) + ) + + for g, l1, l2, j in solved_instance.set_GLLJ_col_t[t]: + self.networks[g].edges[(l1, l2, j)][ + Network.KEY_ARC_TECH + ].options_selected[h] = decision + # ***************************************************************** - + # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + + # ***************************************************************************** # ***************************************************************************** + def simplify_peak_total_problem( - problem: InfrastructurePlanningProblem - ) -> InfrastructurePlanningProblem: - + problem: InfrastructurePlanningProblem, +) -> InfrastructurePlanningProblem: # ************************************************************************* # ************************************************************************* - + # check if the simplification is feasible if not is_peak_total_problem(problem): # it is not possible to simplify the problem return problem - + # ************************************************************************* # ************************************************************************* - + # identify the peak assessment ref_found = False for key, net in problem.networks.items(): for node_key in net.source_sink_nodes: q_ref, k_ref = sorted( - ((value, key) - for key, value in net.nodes[node_key][ - Network.KEY_NODE_BASE_FLOW - ].items() - ), - reverse=True - )[0][1] + ( + (value, key) + for key, value in net.nodes[node_key][ + Network.KEY_NODE_BASE_FLOW + ].items() + ), + reverse=True, + )[0][1] ref_found = True break if ref_found: break - + # ************************************************************************* # ************************************************************************* - + # define the peak assessment and the peak interval - q_peak = 'peak' + q_peak = "peak" k_peak = 0 # define the total assessment and the total interval - q_total = 'total' + q_total = "total" k_total = 0 - + # ************************************************************************* # ************************************************************************* - + # create one peak scenario per polarity within each network # create one total scenario per polarity within each network for key, net in problem.networks.items(): - # 1) losses in arcs: + # 1) losses in arcs: # 1.1) sum the static losses for all time intervals (total assessment) # 1.2) insert the static losses for the peak assessment # 1.3) remove all static losses but for the peak assessment @@ -4024,67 +3702,63 @@ def simplify_peak_total_problem( loss_sum = { (h, q_total, k_total): sum( net.edges[arc_key][Network.KEY_ARC_TECH].static_loss[ - (h, q_ref, k) + (h, q_ref, k) ] for k in range(problem.number_time_intervals[q_ref]) - ) + ) for h in range( - net.edges[arc_key][ - Network.KEY_ARC_TECH - ].number_options() - ) - } + net.edges[arc_key][Network.KEY_ARC_TECH].number_options() + ) + } # 1.2) insert the static losses for the peak assessment - net.edges[arc_key][Network.KEY_ARC_TECH].static_loss.update({ - (h, q_peak, k_peak): ( - net.edges[arc_key][Network.KEY_ARC_TECH].static_loss[ - (h, q_ref, k_ref) + net.edges[arc_key][Network.KEY_ARC_TECH].static_loss.update( + { + (h, q_peak, k_peak): ( + net.edges[arc_key][Network.KEY_ARC_TECH].static_loss[ + (h, q_ref, k_ref) ] ) - for h in range( - net.edges[arc_key][ - Network.KEY_ARC_TECH].number_options() - ) - }) + for h in range( + net.edges[arc_key][Network.KEY_ARC_TECH].number_options() + ) + } + ) # 1.3) remove all static losses but for the peak assessment - for hqk in tuple(net.edges[arc_key][ - Network.KEY_ARC_TECH].static_loss): + for hqk in tuple(net.edges[arc_key][Network.KEY_ARC_TECH].static_loss): if hqk[1:] != (q_peak, k_peak): - net.edges[arc_key][ - Network.KEY_ARC_TECH].static_loss.pop(hqk) + net.edges[arc_key][Network.KEY_ARC_TECH].static_loss.pop(hqk) # 1.4) insert the static losses for the total assessment - net.edges[arc_key][Network.KEY_ARC_TECH].static_loss.update( - loss_sum) - + net.edges[arc_key][Network.KEY_ARC_TECH].static_loss.update(loss_sum) + # efficiency # 1.2) insert the efficiencies for the peak and total assessments # 1.3) remove all efficiencies but for the peak and total assessm. - - if net.edges[arc_key][ - Network.KEY_ARC_TECH - ].has_proportional_losses(): + + if net.edges[arc_key][Network.KEY_ARC_TECH].has_proportional_losses(): # peak assessment efficiency - net.edges[arc_key][Network.KEY_ARC_TECH].efficiency.update({ - (q_peak, k_peak): ( - net.edges[arc_key][Network.KEY_ARC_TECH].efficiency[ - (q_ref, k_ref) + net.edges[arc_key][Network.KEY_ARC_TECH].efficiency.update( + { + (q_peak, k_peak): ( + net.edges[arc_key][Network.KEY_ARC_TECH].efficiency[ + (q_ref, k_ref) ] ) - }) + } + ) # total assessment efficiency - net.edges[arc_key][Network.KEY_ARC_TECH].efficiency.update({ - (q_total, k_total): ( - net.edges[arc_key][Network.KEY_ARC_TECH].efficiency[ - (q_ref, k_ref) + net.edges[arc_key][Network.KEY_ARC_TECH].efficiency.update( + { + (q_total, k_total): ( + net.edges[arc_key][Network.KEY_ARC_TECH].efficiency[ + (q_ref, k_ref) ] ) - }) - for qk in tuple(net.edges[arc_key][ - Network.KEY_ARC_TECH].efficiency): + } + ) + for qk in tuple(net.edges[arc_key][Network.KEY_ARC_TECH].efficiency): if qk != (q_peak, k_peak) and qk != (q_total, k_total): - net.edges[arc_key][ - Network.KEY_ARC_TECH].efficiency.pop(qk) - + net.edges[arc_key][Network.KEY_ARC_TECH].efficiency.pop(qk) + # 2) prices in import/export nodes: # 2.1) determine the price for the total assessment # 2.2) insert the prices for the peak assessment @@ -4094,7 +3768,7 @@ def simplify_peak_total_problem( # 2.1) determine the price for the total assessment # 2.2) insert the prices for the peak assessment if node_key in net.import_nodes or node_key in net.export_nodes: - # import node: + # import node: # - get the current price # - insert the peak price = 0 # export node: @@ -4102,122 +3776,119 @@ def simplify_peak_total_problem( # - insert the peak price = 0 total_price = { (q_total, p, k_total): ( - net.nodes[node_key][Network.KEY_NODE_PRICES][ - (q_ref, p, k_ref) - ] - ) + net.nodes[node_key][Network.KEY_NODE_PRICES][(q_ref, p, k_ref)] + ) for p in problem.reporting_periods[q_ref] + } + net.nodes[node_key][Network.KEY_NODE_PRICES].update( + { + (q_peak, p, k_peak): ResourcePrice(prices=0) + for p in problem.reporting_periods[q_ref] } - net.nodes[node_key][Network.KEY_NODE_PRICES].update({ - (q_peak, p, k_peak): ResourcePrice(prices=0) - for p in problem.reporting_periods[q_ref] - }) - else: # other nodes + ) + else: # other nodes continue # 2.3) remove all prices but those for the peak assessment - for qpk in tuple( - net.nodes[node_key][Network.KEY_NODE_PRICES].keys() - ): + for qpk in tuple(net.nodes[node_key][Network.KEY_NODE_PRICES].keys()): if qpk[0] != q_peak and qpk[2] != k_peak: net.nodes[node_key][Network.KEY_NODE_PRICES].pop(qpk) # 2.4) insert the prices for the total assessment net.nodes[node_key][Network.KEY_NODE_PRICES].update(total_price) - - # 3) flows in other nodes: + + # 3) flows in other nodes: # 3.1) determine the flow volume for the total assessment # 3.2) insert the prices and base flows for the peak scenario # 3.3) remove all but the peak scenario and intervals # 3.4) insert the flow volume for the total assessment - + for node_key in net.source_sink_nodes: # 3.1) determine the flow volume for the total assessment total_flow = { (q_total, k_total): sum( net.nodes[node_key][Network.KEY_NODE_BASE_FLOW][(q_ref, k)] for k in range(problem.number_time_intervals[q_ref]) - ) - } + ) + } # 3.2) insert the prices and base flows for the peak scenario net.nodes[node_key][Network.KEY_NODE_BASE_FLOW].update( - {(q_peak, k_peak): net.nodes[node_key][ - Network.KEY_NODE_BASE_FLOW - ][(q_ref, k_ref)] - } - ) + { + (q_peak, k_peak): net.nodes[node_key][Network.KEY_NODE_BASE_FLOW][ + (q_ref, k_ref) + ] + } + ) # 3.3) remove all but the peak scenario and intervals for qk in tuple(net.nodes[node_key][Network.KEY_NODE_BASE_FLOW]): if qk != (q_peak, k_peak): net.nodes[node_key][Network.KEY_NODE_BASE_FLOW].pop(qk) # 3.4) insert the flow volume for the total assessment - net.nodes[node_key][Network.KEY_NODE_BASE_FLOW].update(total_flow) - + net.nodes[node_key][Network.KEY_NODE_BASE_FLOW].update(total_flow) + # ************************************************************************* # ************************************************************************* - + # update the assessments, reporting periods, intervals and segments - + # assessments problem.assessment_keys = (q_peak, q_total) problem.number_assessments = len(problem.assessment_keys) - + # reporting periods problem.reporting_periods = { - q: tuple(problem.reporting_periods[q_ref]) - for q in problem.assessment_keys - } + q: tuple(problem.reporting_periods[q_ref]) for q in problem.assessment_keys + } problem.number_reporting_periods = { - q: len(problem.reporting_periods[q]) - for q in problem.assessment_keys - } - + q: len(problem.reporting_periods[q]) for q in problem.assessment_keys + } + # time intervals problem.time_intervals = { - q_peak: [problem.time_intervals[q_ref][k_ref]], - q_total: [sum(problem.time_intervals[q_ref])] - } + q_peak: [problem.time_intervals[q_ref][k_ref]], + q_total: [sum(problem.time_intervals[q_ref])], + } problem.number_time_intervals = { - q: len(problem.time_intervals[q]) - for q in problem.assessment_keys - } - + q: len(problem.time_intervals[q]) for q in problem.assessment_keys + } + # average time interval problem.average_time_interval = { - q: mean(problem.time_intervals[q]) - for q in problem.assessment_keys - } - # normalised time interval duration + q: mean(problem.time_intervals[q]) for q in problem.assessment_keys + } + # normalised time interval duration # problem.normalised_time_interval_duration = { # (q,k): duration/problem.average_time_interval[q] # for q in problem.assessment_keys # for k, duration in enumerate(problem.time_intervals[q]) - # } + # } problem.normalised_time_interval_duration = { - (q_peak, k_peak): 1, + (q_peak, k_peak): 1, (q_total, k_total): ( - sum(problem.time_intervals[q_total])/ - problem.time_intervals[q_peak][k_total] - ) - } - + sum(problem.time_intervals[q_total]) + / problem.time_intervals[q_peak][k_total] + ), + } + # discount factors (use the reference assessment) problem.discount_rates[q_peak] = tuple(problem.discount_rates[q_ref]) problem.discount_rates[q_total] = tuple(problem.discount_rates[q_ref]) problem.discount_rates.pop(q_ref) - + # f coefficients - + # ************************************************************************* # ************************************************************************* - + # return the modified problem return problem - + + # ***************************************************************************** # ***************************************************************************** + def is_peak_total_problem(problem: InfrastructurePlanningProblem) -> bool: """Returns True if the problem only concerns peak capacity and volume.""" - + # conditions: # 1) maximum congestion occurs simultaneously across the network # - corollary: dynamic behaviours do not change the peaks @@ -4225,22 +3896,22 @@ def is_peak_total_problem(problem: InfrastructurePlanningProblem) -> bool: # - simplifying assumption: no proportional losses in the network # 2) the time during which maximum congestion occurs can be determined # 3) energy prices are constant in time and volume - + # check #1 - + # check #2 # check #3: energy prices are constant in time and volume for key, net in problem.networks.items(): # check import nodes for imp_node_key in net.import_nodes: # is an import node, check if it is time invariant - if not net.nodes[imp_node_key][ - Network.KEY_NODE_PRICES_TIME_INVARIANT]: - return False # is not time invariant + if not net.nodes[imp_node_key][Network.KEY_NODE_PRICES_TIME_INVARIANT]: + return False # is not time invariant # it is time invariant, but is it volume invariant? check any qpk for qpk in net.nodes[imp_node_key][Network.KEY_NODE_PRICES]: - if not net.nodes[imp_node_key][ - Network.KEY_NODE_PRICES][qpk].is_volume_invariant(): + if not net.nodes[imp_node_key][Network.KEY_NODE_PRICES][ + qpk + ].is_volume_invariant(): # it is not volume invariant return False # if the entries are time invariant, checking one will do @@ -4248,18 +3919,18 @@ def is_peak_total_problem(problem: InfrastructurePlanningProblem) -> bool: # check export nodes for exp_node_key in net.export_nodes: # is an import node, check if it is time invariant - if not net.nodes[exp_node_key][ - Network.KEY_NODE_PRICES_TIME_INVARIANT]: - return False # is not time invariant + if not net.nodes[exp_node_key][Network.KEY_NODE_PRICES_TIME_INVARIANT]: + return False # is not time invariant # it is time invariant, but is it volume invariant? check any qpk for qpk in net.nodes[exp_node_key][Network.KEY_NODE_PRICES]: - if not net.nodes[exp_node_key][ - Network.KEY_NODE_PRICES][qpk].is_volume_invariant(): + if not net.nodes[exp_node_key][Network.KEY_NODE_PRICES][ + qpk + ].is_volume_invariant(): # it is not volume invariant return False # if the entries are time invariant, checking one will do break - + # # check #4: none of the arcs can have proportional losses # for key, net in problem.networks.items(): # # check each edge @@ -4267,7 +3938,8 @@ def is_peak_total_problem(problem: InfrastructurePlanningProblem) -> bool: # if net.edges[edge_key][ # Network.KEY_ARC_TECH].has_proportional_losses(): # return False # has proportional losses, return False - return True # all conditions are true - + return True # all conditions are true + + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/src/topupopt/problems/esipp/resource.py b/src/topupopt/problems/esipp/resource.py index 039f887..a6ce9d3 100644 --- a/src/topupopt/problems/esipp/resource.py +++ b/src/topupopt/problems/esipp/resource.py @@ -8,290 +8,250 @@ from numbers import Real # TODO: change name to ResourceTariff + class ResourcePrice: """A class for piece-wise linear resource prices in network problems.""" def __init__(self, prices: list or int, volumes: list = None): - # how do we keep the size of the object as small as possible # if the tariff is time-invariant, how can information be stored? # - a flag - - # accepted inputs: + + # accepted inputs: # 1) prices and values as lists with matching sizes # - all elements in the lists need to be numeric # - all prices need to be non-negative # - all volumes need to be positive # 2) price as a list, volume as None # - all elements in the list need to be numeric and non-negative - - (self._number_segments, - self.prices, - self.volumes) = self.validate_list( - prices, - volumes - ) - + + (self._number_segments, self.prices, self.volumes) = self.validate_list( + prices, volumes + ) + self._volume_invariant = self.is_volume_invariant() - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def validate_list(self, prices, volumes): """Validates the inputs provided in list format.""" - + if type(volumes) == list: - # ensure prices are also provided as a list - + if type(prices) != list: - raise TypeError( - 'The prices need to be provided as a list, if volumes are'+ - ' too.' - ) - + "The prices need to be provided as a list, if volumes are" + " too." + ) + # prices and volumes are lists - + number_segments = len(volumes) - + if number_segments != len(prices): - - raise ValueError( - 'The number of prices and volumes are inconsistent.' - ) - + raise ValueError("The number of prices and volumes are inconsistent.") + # check the elements - + for segment_index in range(number_segments): - price = prices[segment_index] - + volume = volumes[segment_index] - + # price - + try: - if price < 0: - - raise ValueError( - 'The prices provided were not all positive.' - ) - + raise ValueError("The prices provided were not all positive.") + except TypeError: - raise TypeError( - 'The prices were not all provided as numeric types.' - ) - + "The prices were not all provided as numeric types." + ) + # volume - + if type(volume) == type(None): - # if None, move to the next iteration - - if segment_index == number_segments-1: - + + if segment_index == number_segments - 1: # the last segment is None: unlimited volume - + continue - + else: - - # intermediate segment: - + # intermediate segment: + raise ValueError( - 'The intermediate segments cannot have volume '+ - 'limits.' - ) - + "The intermediate segments cannot have volume " + "limits." + ) + else: - # if not None, make sure it is positive - + try: - if volume <= 0: - raise ValueError( - 'The volumes provided were not all positive.' - ) - + "The volumes provided were not all positive." + ) + except TypeError: - raise TypeError( - 'The volumes were not all provided as numeric '+ - 'types.' - ) - + "The volumes were not all provided as numeric " + "types." + ) + # done - + return number_segments, prices, volumes - + elif type(volumes) == type(None) or isinstance(volumes, Real): - # the prices must be numeric and positive - + if not isinstance(prices, Real): - - raise TypeError( - 'The prices were not all provided as numeric types.' - ) - - + raise TypeError("The prices were not all provided as numeric types.") + if prices < 0: - - raise ValueError( - 'The prices provided were not all positive.' - ) - + raise ValueError("The prices provided were not all positive.") + # the number of segments must be 1 - + number_segments = 1 - + # done - + return number_segments, [prices], [volumes] - + else: - - raise TypeError('Unrecognised type for volumes.') - + raise TypeError("Unrecognised type for volumes.") + # ************************************************************************* # ************************************************************************* - + def price_monotonically_increasing_with_volume(self) -> bool: - # check if we are to focus on one specific time interval or not - + # if there is only one segment, return false - + if self._number_segments == 1: - return True - + # check one interval: - - for segment_index in range(self._number_segments-1): - + + for segment_index in range(self._number_segments - 1): # check consecutive segments - - if self.prices[segment_index] > self.prices[segment_index+1]: - + + if self.prices[segment_index] > self.prices[segment_index + 1]: # prices decreased - + return False - + # otherwise - + return True - + # ************************************************************************* # ************************************************************************* - + def price_monotonically_decreasing_with_volume(self) -> bool: - # check if we are to focus on one specific time interval or not - + # if there is only one segment, return false - + if self._number_segments == 1: - return True - + # check one interval: - - for segment_index in range(self._number_segments-1): - + + for segment_index in range(self._number_segments - 1): # check consecutive segments - - if self.prices[segment_index] < self.prices[segment_index+1]: - + + if self.prices[segment_index] < self.prices[segment_index + 1]: # prices increased - + return False - + # otherwise - + return True - + # ************************************************************************* # ************************************************************************* - + def is_volume_capped(self) -> bool: - return not (type(self.volumes[-1]) == type(None)) - + # ************************************************************************* # ************************************************************************* - + def is_volume_invariant(self) -> bool: """Returns True if the prices are the same regardless of the volume.""" # is a list if len(set(self.prices)) == 1: - return True # a list with the same price >> volume invariant + return True # a list with the same price >> volume invariant else: - return False # a list with more than 1 price >> volume sensitive - + return False # a list with more than 1 price >> volume sensitive + # ************************************************************************* # ************************************************************************* - + def number_segments(self, redo: bool = False) -> int: """Returns the number of price segments.""" if hasattr(self, "_number_segments") and not redo: return self._number_segments return len(self.prices) - + # ************************************************************************* # ************************************************************************* - + def is_equivalent(self, other) -> bool: """Returns True if a given ResourcePrice is equivalent to another.""" # resources are equivalent if: # 1) the prices are the same # 2) the volume limits are the same - + # the number of segments has to match if self.number_segments() != other.number_segments(): - return False # the number of segments do not match + return False # the number of segments do not match # check the prices if self.prices != other.prices: - return False # prices are different + return False # prices are different # prices match, check the volumes if self.volumes != other.volumes: - return False # volumes are different - return True # all conditions have been met + return False # volumes are different + return True # all conditions have been met # ************************************************************************* # ************************************************************************* - + + # ***************************************************************************** # ***************************************************************************** + def are_prices_time_invariant(resource_prices_qpk: dict) -> bool: """Returns True if all prices are identical per time interval.""" # check if there is only one or no (q,p,k) entry if len(resource_prices_qpk) <= 1: - return True # only one or no entry = is time invariant + return True # only one or no entry = is time invariant # check if the entries for the same period and assessment are time invariant entries_qp = set([qpk[0:2] for qpk in resource_prices_qpk]) qpk_qp = { - qp: [qpk for qpk in resource_prices_qpk if qp == qpk[0:2]] - for qp in entries_qp - } + qp: [qpk for qpk in resource_prices_qpk if qp == qpk[0:2]] for qp in entries_qp + } # check if the tariffs per period and assessment are equivalent for qp, qpk_list in qpk_qp.items(): - for i in range(len(qpk_list)-1): + for i in range(len(qpk_list) - 1): if not resource_prices_qpk[qpk_list[0]].is_equivalent( - resource_prices_qpk[qpk_list[i+1]] - ): + resource_prices_qpk[qpk_list[i + 1]] + ): return False # all tariffs are equivalent per period and assessment: they are invariant return True + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/src/topupopt/problems/esipp/signal.py b/src/topupopt/problems/esipp/signal.py index e134ea4..7df2910 100644 --- a/src/topupopt/problems/esipp/signal.py +++ b/src/topupopt/problems/esipp/signal.py @@ -10,733 +10,670 @@ Created on Wed Nov 17 13:04:53 2021 # local libraries, external # local libraries, internal - + # ***************************************************************************** # ***************************************************************************** # TODO: consider mentioning that the classes are actually for discretised systems and signals - + + class Signal: """A class for discretised dynamic system signals.""" - - def __init__(self, - number_samples: int, - samples: list, - lower_bounds: list, - upper_bounds: list): - + + def __init__( + self, number_samples: int, samples: list, lower_bounds: list, upper_bounds: list + ): # validate the inputs - - (self.is_fixed, - self.is_upper_bounded, - self.is_lower_bounded) = self.validate_inputs( - number_samples, - samples, - lower_bounds, - upper_bounds) - + + ( + self.is_fixed, + self.is_upper_bounded, + self.is_lower_bounded, + ) = self.validate_inputs(number_samples, samples, lower_bounds, upper_bounds) + # initialise the variables - + self.number_samples = number_samples - + self.samples = samples - + self.lower_bounds = lower_bounds - + self.upper_bounds = upper_bounds - + # is it bounded? - + self.is_bounded = self.is_upper_bounded or self.is_lower_bounded - + # ************************************************************************* # ************************************************************************* - - def validate_inputs(self, - number_samples: int, - samples: list, - lower_bounds: list, - upper_bounds: list): - + + def validate_inputs( + self, number_samples: int, samples: list, lower_bounds: list, upper_bounds: list + ): # number of samples - + if type(number_samples) != int: - - raise TypeError('The number of samples has to be an integer.') - + raise TypeError("The number of samples has to be an integer.") + if number_samples <= 0: - - raise ValueError( - 'The number of samples has to be a positive intenger.') - + raise ValueError("The number of samples has to be a positive intenger.") + # samples - + if type(samples) != type(None): - # it is not None - + if type(samples) != list: - - raise TypeError('The samples have to be provided via a list.') - + raise TypeError("The samples have to be provided via a list.") + if len(samples) != number_samples: - - raise ValueError( - 'The samples are inconsistent with the number of samples'+ - ' specified.' - ) - + raise ValueError( + "The samples are inconsistent with the number of samples" + + " specified." + ) + is_fixed = True - + else: - is_fixed = False - + # upper bounds - + if type(upper_bounds) != type(None): - # it is not None - + if type(upper_bounds) != list: - - raise TypeError('The samples have to be provided via a list.') - + raise TypeError("The samples have to be provided via a list.") + if len(upper_bounds) != number_samples: - - raise ValueError( - 'The number of upper bounds specified is not consistent'+ - ' with the number of samples specified.' - ) - + raise ValueError( + "The number of upper bounds specified is not consistent" + + " with the number of samples specified." + ) + has_upper_bounds = True - + else: - has_upper_bounds = False - + # lower bounds - + if type(lower_bounds) != type(None): - # it is not None - + if type(lower_bounds) != list: - - raise TypeError('The samples have to be provided via a list.') - + raise TypeError("The samples have to be provided via a list.") + if len(lower_bounds) != number_samples: - - raise ValueError( - 'The number of lower bounds specified is not consistent'+ - ' with the number of samples specified.' - ) - + raise ValueError( + "The number of lower bounds specified is not consistent" + + " with the number of samples specified." + ) + has_lower_bounds = True - + else: - has_lower_bounds = False - + # the bounds need to be coherent - + if has_lower_bounds and has_upper_bounds: - for ub, lb in zip(upper_bounds, lower_bounds): - if ub < lb: - - raise ValueError('The bounds specified are incoherent.') - + raise ValueError("The bounds specified are incoherent.") + # bounds - return is_fixed, has_upper_bounds, has_lower_bounds - + return is_fixed, has_upper_bounds, has_lower_bounds + # ************************************************************************* # ************************************************************************* - + def has_upper_bounds(self) -> bool: "Returns True if the signal has predetermined upper bounds." - + if type(self.upper_bounds) == list: - if len(self.upper_bounds) == self.number_samples: - # upper bounds exist with correctly sized lists # size - + return True - + else: - raise ValueError( - 'The number of upper bounds does not match the number of '+ - 'samples.' - ) - + "The number of upper bounds does not match the number of " + + "samples." + ) + # if it is not a list or does not have the correct number of samples: - + return False - + # ************************************************************************* # ************************************************************************* - + def has_lower_bounds(self) -> bool: "Returns True if the signal has predetermined lower bounds." - + if type(self.lower_bounds) == list: - if len(self.lower_bounds) == self.number_samples: - # lower bounds exist with correctly sized lists # size - + return True - + else: - raise ValueError( - 'The number of lower bounds does not match the number of '+ - 'samples.' - ) - + "The number of lower bounds does not match the number of " + + "samples." + ) + # if it is not a list or does not have the correct number of samples: - + return False - + # ************************************************************************* # ************************************************************************* - + def is_signal_bounded(self) -> bool: "Returns True if the signal has predetermined lower or upper bounds." - + # a signal is bounded if it has upper or lower bounds - + return self.has_lower_bounds() or self.has_upper_bounds() - + # ************************************************************************* # ************************************************************************* - + def is_signal_fixed(self) -> bool: "Returns True if the signal is predetermined or has been fixed." - + # a signal is fixed if samples have been provided as a non-empty list - + if type(self.samples) == list: - # if the samples object is a list, check its size - + if len(self.samples) == self.number_samples: - return True - + else: - raise ValueError( - 'The samples are inconsistent with the number of samples'+ - ' specified.' - ) - + "The samples are inconsistent with the number of samples" + + " specified." + ) + else: - # if it is not a list, return False - + return False - + # ************************************************************************* # ************************************************************************* - + def violates_bounds(self, tolerance: float = 0.0) -> bool: "Returns True if the signal violates its bounds or if it has none." - + if self.is_bounded and self.is_fixed: - if self.is_lower_bounded: - # check lower bounds - + for u, u_lb in zip(self.samples, self.lower_bounds): - if u < u_lb - tolerance: - # the lower bound is violated - + return True - + if self.is_upper_bounded: - # check upper bounds - + for u, u_ub in zip(self.samples, self.upper_bounds): - if u > u_ub + tolerance: - # the upper bound is violated - + return True - + # if none of the bounds was violated, it is not out of bounds - + return False - + else: - # return False, if it has no bounds - + return False - + # ************************************************************************* # ************************************************************************* - + def set_signal(self, samples: list): "Defines a signal using externally-provided samples." - + # check the size - + if type(samples) != list: - - raise TypeError('The signal must be provided as a list.') - + raise TypeError("The signal must be provided as a list.") + if len(samples) != self.number_samples: - - raise ValueError('An unexpected number of samples was provided.') - + raise ValueError("An unexpected number of samples was provided.") + # store the data - - self.samples = samples # list(samples) - + + self.samples = samples # list(samples) + # update the relevant attributes - + self.is_fixed = True # ************************************************************************* # ************************************************************************* - - def is_signal_integer_only(self, - integrality_tolerance: float = 0.0) -> bool: + + def is_signal_integer_only(self, integrality_tolerance: float = 0.0) -> bool: "Return True if all the samples in the signal are integers." - + if self.is_fixed: - # if it is fixed, it may or may not be integer only - + return are_elements_integer( - self.samples, - integrality_tolerance=integrality_tolerance - ) - + self.samples, integrality_tolerance=integrality_tolerance + ) + else: - # if it is not fixed, it is not integer only - + return False # ************************************************************************* # ************************************************************************* - - def is_signal_binary_only(self, - integrality_tolerance: float = 0.0) -> bool: + + def is_signal_binary_only(self, integrality_tolerance: float = 0.0) -> bool: "Return True if all the samples in the signal are binary." - + if self.is_fixed: - # if it is fixed, it may or may not be integer only - + return are_elements_binary( - self.samples, - integrality_tolerance=integrality_tolerance - ) - + self.samples, integrality_tolerance=integrality_tolerance + ) + else: - # if it is not fixed, it is not integer only - + return False # ************************************************************************* # ************************************************************************* - + + # ***************************************************************************** # ***************************************************************************** -def are_elements_integer(elements: list, - integrality_tolerance: float or int = 0.0) -> bool: + +def are_elements_integer( + elements: list, integrality_tolerance: float or int = 0.0 +) -> bool: "Returns True if all the elements in a list are integer numbers." - + # check the tolerance - - if (type(integrality_tolerance) != float and - type(integrality_tolerance) != int): - - raise TypeError('The integrality tolerance should be a float or zero.') - - if integrality_tolerance >= 0.5 or integrality_tolerance < 0.0: - + + if type(integrality_tolerance) != float and type(integrality_tolerance) != int: + raise TypeError("The integrality tolerance should be a float or zero.") + + if integrality_tolerance >= 0.5 or integrality_tolerance < 0.0: raise ValueError( - 'The integrality tolerance should not be greater than or equal to'+ - ' 0.5 for the distinction (i.e., integrality) to make sense.' - ) - + "The integrality tolerance should not be greater than or equal to" + + " 0.5 for the distinction (i.e., integrality) to make sense." + ) + # go through each element - + for element in elements: - # if at least one isn't, it isn't - - if abs(element-round(element)) > integrality_tolerance: - + + if abs(element - round(element)) > integrality_tolerance: # it is not integer - + return False - + return True - + + # ***************************************************************************** # ***************************************************************************** -def are_elements_binary(elements: list, - integrality_tolerance: float or int = None) -> bool: + +def are_elements_binary( + elements: list, integrality_tolerance: float or int = None +) -> bool: "Returns True if all the elements in a list are binary numbers." - + # no particular integrality tolerance was defined: assume rounding is okay - + if type(integrality_tolerance) == type(None): - # go through each element - + for element in elements: - # check if it is an integer - + if round(element) != 1 and round(element) != 0: - # if not 0 or 1, it is not binary - + return False - + # if this point is reached, all the elements are binary - + return True - + # ************************************************************************* - + # a specific integrality tolerance was provided - - if (type(integrality_tolerance) != float and - type(integrality_tolerance) != int): - - raise TypeError('The integrality tolerance should be a float or zero.') - + + if type(integrality_tolerance) != float and type(integrality_tolerance) != int: + raise TypeError("The integrality tolerance should be a float or zero.") + if integrality_tolerance >= 0.5 or integrality_tolerance < 0.0: - raise ValueError( - 'The integrality tolerance should not be greater than or equal to'+ - ' 0.5 for the distinction (i.e., integrality) to make sense.' - ) - + "The integrality tolerance should not be greater than or equal to" + + " 0.5 for the distinction (i.e., integrality) to make sense." + ) + # go through each element - + for element in elements: - rounded_element = round(element) - + # check if it is an integer - - if abs(element-rounded_element) > integrality_tolerance: - + + if abs(element - rounded_element) > integrality_tolerance: # it is not integer - + return False - + # if it is an integer, check if it is zero or one - + if rounded_element != 1 and rounded_element != 0: - # if not 0 or 1, it is not binary - + return False - + # if this point is reached, all the elements are binary - + return True + # ***************************************************************************** # ***************************************************************************** + class FixedSignal(Signal): "A class for signals that are predetermined, measured or decided on." - - def __init__(self, - samples: list, - lower_bounds: list = None, - upper_bounds: list = None): - + + def __init__( + self, samples: list, lower_bounds: list = None, upper_bounds: list = None + ): if type(samples) != list: - - raise TypeError('The samples must be provided as a list.') - - Signal.__init__(self, - number_samples=len(samples), - samples=samples, - lower_bounds=lower_bounds, - upper_bounds=upper_bounds) + raise TypeError("The samples must be provided as a list.") + + Signal.__init__( + self, + number_samples=len(samples), + samples=samples, + lower_bounds=lower_bounds, + upper_bounds=upper_bounds, + ) + # ***************************************************************************** # ***************************************************************************** + class NonNegativeRealSignal(Signal): "A class for non-negative real signals." - - def __init__(self, - number_samples: int, - samples: list = None, - lower_bounds: list = None, - upper_bounds: list = None): - + + def __init__( + self, + number_samples: int, + samples: list = None, + lower_bounds: list = None, + upper_bounds: list = None, + ): if type(number_samples) != int: - - raise TypeError('The number of samples must be an integer.') - + raise TypeError("The number of samples must be an integer.") + if type(lower_bounds) != type(None): - # specific lower bounds have been provided - - Signal.__init__(self, - number_samples=number_samples, - samples=samples, - lower_bounds=lower_bounds, - upper_bounds=upper_bounds) - + + Signal.__init__( + self, + number_samples=number_samples, + samples=samples, + lower_bounds=lower_bounds, + upper_bounds=upper_bounds, + ) + else: - # no lower bounds have been provided: zero is the lower bound - - Signal.__init__(self, - number_samples=number_samples, - samples=samples, - lower_bounds=[0 for _ in range(number_samples)], - upper_bounds=upper_bounds) - + + Signal.__init__( + self, + number_samples=number_samples, + samples=samples, + lower_bounds=[0 for _ in range(number_samples)], + upper_bounds=upper_bounds, + ) + # TODO: check bounds before calling the base class init method - + # make sure the bounds are nnr - + if not self.are_bounds_nnr(): - - raise ValueError('The bounds are not non-negative real.') - + raise ValueError("The bounds are not non-negative real.") + # ************************************************************************* # ************************************************************************* - + def is_nnr(self, tolerance: float = 0.0) -> bool: "Returns True if the samples are all non-negative real." - + if self.is_fixed: - # if it is fixed, make sure each sample is nnr - + for u in self.samples: - if u < 0 - tolerance: - return False - + # if all bounds are positive or zero, return True - + return True - + else: - # if it is not fixed, return false - + return False - + # ************************************************************************* # ************************************************************************* - + def are_bounds_nnr(self, tolerance: float = 0.0) -> bool: "Returns True if the bounds are non-negative real." - + # if no bounds have been provided, check the internal bounds - + if self.is_lower_bounded: - # if the signal is bounded, its bounds may or may not be nnr - + # check the lower bounds - + # if it does, make sure each lower bound is positive or zero - + for lb in self.lower_bounds: - if lb < 0 - tolerance: - return False - + # check the upper bounds - + if self.is_upper_bounded: - # if it has upper bounds, make sure they are positive or zero - + for ub in self.upper_bounds: - if ub < 0 - tolerance: - return False - + # note: the base class's __init__ method will check the bounds - # for sanity (i.e., ub >= lb), and so that step is skipped here - + # for sanity (i.e., ub >= lb), and so that step is skipped here + # if both the upper and lower bounds (if any) are nnr, return True - + return True - + else: - # if the signal is not bounded, its bounds are not nnr - + return False - + # ************************************************************************* # ************************************************************************* + # ***************************************************************************** # ***************************************************************************** + class FixedNonNegativeRealSignal(NonNegativeRealSignal): "A class for free and controllable signals." - - def __init__(self, - samples: list, - lower_bounds: list = None, - upper_bounds: list = None): - + + def __init__( + self, samples: list, lower_bounds: list = None, upper_bounds: list = None + ): if type(samples) != list: - - raise TypeError('The samples must be provided via a list.') - + raise TypeError("The samples must be provided via a list.") + NonNegativeRealSignal.__init__( - self, + self, number_samples=len(samples), samples=samples, lower_bounds=lower_bounds, - upper_bounds=upper_bounds) - + upper_bounds=upper_bounds, + ) + if not self.is_nnr(): - - raise ValueError('At least one sample is not non-negative real.') + raise ValueError("At least one sample is not non-negative real.") + # ***************************************************************************** # ***************************************************************************** + class FreeSignal(Signal): "A class for undetermined signals." - - def __init__(self, - number_samples: int, - lower_bounds: list = None, - upper_bounds: list = None): - - Signal.__init__(self, - number_samples=number_samples, - samples=None, - lower_bounds=lower_bounds, - upper_bounds=upper_bounds) + + def __init__( + self, number_samples: int, lower_bounds: list = None, upper_bounds: list = None + ): + Signal.__init__( + self, + number_samples=number_samples, + samples=None, + lower_bounds=lower_bounds, + upper_bounds=upper_bounds, + ) + # ***************************************************************************** # ***************************************************************************** + class FreeUnboundedSignal(FreeSignal): "A class for free and unbounded signals." - + def __init__(self, number_samples: int): - FreeSignal.__init__( - self, - number_samples=number_samples, - lower_bounds=None, - upper_bounds=None) + self, number_samples=number_samples, lower_bounds=None, upper_bounds=None + ) + # ***************************************************************************** # ***************************************************************************** + class BinarySignal(NonNegativeRealSignal): "A class for binary signals." - + def __init__(self, number_samples: int): - if type(number_samples) != int: - - raise TypeError('The number of samples must be an integer.') - + raise TypeError("The number of samples must be an integer.") + NonNegativeRealSignal.__init__( - self, + self, number_samples=number_samples, samples=None, lower_bounds=[0 for _ in range(number_samples)], - upper_bounds=[1 for _ in range(number_samples)]) - + upper_bounds=[1 for _ in range(number_samples)], + ) + + # ***************************************************************************** # ***************************************************************************** + class AmplitudeConstrainedSignal(Signal): "A class for amplitude-constrained signals." - - def __init__(self, - number_samples: int, - max_pos_amp_limit: float or int, - min_pos_amp_limit: float or int, - max_neg_amp_limit: float or int, - min_neg_amp_limit: float or int, - positive_amplitude: list = None, - negative_amplitude: list = None, - samples: list = None, - lower_bounds: list = None, - upper_bounds: list = None): - + + def __init__( + self, + number_samples: int, + max_pos_amp_limit: float or int, + min_pos_amp_limit: float or int, + max_neg_amp_limit: float or int, + min_neg_amp_limit: float or int, + positive_amplitude: list = None, + negative_amplitude: list = None, + samples: list = None, + lower_bounds: list = None, + upper_bounds: list = None, + ): # validate the inputs - - self.set_positive_amplitude_limits(max_pos_amp_limit, - min_pos_amp_limit) - - self.set_negative_amplitude_limits(max_neg_amp_limit, - min_neg_amp_limit) - + + self.set_positive_amplitude_limits(max_pos_amp_limit, min_pos_amp_limit) + + self.set_negative_amplitude_limits(max_neg_amp_limit, min_neg_amp_limit) + # call the base class init method - - Signal.__init__(self, - number_samples=number_samples, - samples=samples, - lower_bounds=lower_bounds, - upper_bounds=upper_bounds) - + + Signal.__init__( + self, + number_samples=number_samples, + samples=samples, + lower_bounds=lower_bounds, + upper_bounds=upper_bounds, + ) + # validate the bounds - + self.validate_positive_bounds( - self.has_max_pos_amp_limit, - self.max_pos_amp_limit, - self.is_lower_bounded, - self.lower_bounds) - + self.has_max_pos_amp_limit, + self.max_pos_amp_limit, + self.is_lower_bounded, + self.lower_bounds, + ) + self.validate_negative_bounds( - self.has_max_neg_amp_limit, - self.max_neg_amp_limit, + self.has_max_neg_amp_limit, + self.max_neg_amp_limit, self.is_upper_bounded, - self.upper_bounds) - + self.upper_bounds, + ) + # set the amplitudes - + self.set_positive_amplitude(positive_amplitude) - + self.set_negative_amplitude(negative_amplitude) - + # ************************************************************************* def violates_amplitude_limits( - self, - samples: list = None, - tolerance: float = 0.0) -> bool: + self, samples: list = None, tolerance: float = 0.0 + ) -> bool: """ Returns True if the samples violate the signal's amplitude limits. @@ -759,70 +696,55 @@ class AmplitudeConstrainedSignal(Signal): If True, the samples violate the limits. If False, they do not. """ - + # if there are no amplitude limits, return False - + if self.has_pos_amp_limits or self.has_neg_amp_limits: - # if there are amplitude limits, check each sample - + if type(samples) == list: - # external samples were provided, check them - + for sample in samples: - # positive amplitude limits - + if self.has_max_pos_amp_limit: - if sample > self.max_pos_amp_limit + tolerance: - return True - + if self.has_max_neg_amp_limit: - if sample < -self.max_neg_amp_limit - tolerance: - return True - + elif self.is_signal_fixed(): - # no external samples were provided, check the internal samples - + for sample in self.samples: - # positive amplitude limits - + if self.has_max_pos_amp_limit: - if sample > self.max_pos_amp_limit + tolerance: - return True - + if self.has_max_neg_amp_limit: - if sample < -self.max_neg_amp_limit - tolerance: - return True else: - # the signal has not been set - - raise ValueError('There are no samples to evaluate.') - + + raise ValueError("There are no samples to evaluate.") + # if no other return statement was reached, return False - + return False - - else: - + + else: # if there are no amplitude limits, return False - + return False - + # ************************************************************************* - + def set_positive_amplitude(self, positive_amplitude: float or int = None): """ Sets the positive amplitude. @@ -842,19 +764,19 @@ class AmplitudeConstrainedSignal(Signal): None. """ - - if (type(positive_amplitude) == int or - isinstance(positive_amplitude, float) or - type(positive_amplitude) == type(None)): - + + if ( + type(positive_amplitude) == int + or isinstance(positive_amplitude, float) + or type(positive_amplitude) == type(None) + ): self.positive_amplitude = positive_amplitude - + else: - - raise TypeError('Unknown positive amplitude type.') - + raise TypeError("Unknown positive amplitude type.") + # ************************************************************************* - + def set_negative_amplitude(self, negative_amplitude: float or int = None): """ Sets the positive amplitude. @@ -874,27 +796,27 @@ class AmplitudeConstrainedSignal(Signal): None. """ - - if (type(negative_amplitude) == int or - isinstance(negative_amplitude, float) or - type(negative_amplitude) == type(None)): - + + if ( + type(negative_amplitude) == int + or isinstance(negative_amplitude, float) + or type(negative_amplitude) == type(None) + ): self.negative_amplitude = negative_amplitude - + else: - - raise TypeError('Unknown negative amplitude type.') - + raise TypeError("Unknown negative amplitude type.") + # ************************************************************************* - - def set_positive_amplitude_limits(self, - max_pos_amp_limit: float or int, - min_pos_amp_limit: float or int): + + def set_positive_amplitude_limits( + self, max_pos_amp_limit: float or int, min_pos_amp_limit: float or int + ): """ Sets the maximum and minimum positive amplitude limits for the signal. - + The positive amplitude limits refer to the maximum value the signal can - assume when positive. As such, it will affect feasiblity if the signal + assume when positive. As such, it will affect feasiblity if the signal has lower bounds above the maximum limit. Parameters @@ -909,47 +831,44 @@ class AmplitudeConstrainedSignal(Signal): None. """ - + # validate inputs - - (self.has_max_pos_amp_limit, - self.has_min_pos_amp_limit) = self.validate_limits(max_pos_amp_limit, - min_pos_amp_limit) - + + (self.has_max_pos_amp_limit, self.has_min_pos_amp_limit) = self.validate_limits( + max_pos_amp_limit, min_pos_amp_limit + ) + if self.has_max_pos_amp_limit: - self.max_pos_amp_limit = max_pos_amp_limit - + else: - self.max_pos_amp_limit = None - + # minimum amplitude limit - + if self.has_min_pos_amp_limit: - self.min_pos_amp_limit = min_pos_amp_limit - + else: - self.min_pos_amp_limit = None - + # update the status - + self.has_pos_amp_limits = ( - self.has_max_pos_amp_limit or self.has_min_pos_amp_limit) + self.has_max_pos_amp_limit or self.has_min_pos_amp_limit + ) # ************************************************************************* - - def set_negative_amplitude_limits(self, - max_neg_amp_limit: float or int, - min_neg_amp_limit: float or int): + + def set_negative_amplitude_limits( + self, max_neg_amp_limit: float or int, min_neg_amp_limit: float or int + ): """ Sets the maximum and minimum negative amplitude limits for the signal. - + The negative amplitude limits refer to the minimum value the signal can assume when negative, though it is a positive number. As such, it will - affect feasiblity if the upper bounds are below what the maximum limit + affect feasiblity if the upper bounds are below what the maximum limit allows for (i.e., if the upper bounds are negative and more so). Parameters @@ -964,41 +883,39 @@ class AmplitudeConstrainedSignal(Signal): None. """ - + # validate inputs - - (self.has_max_neg_amp_limit, - self.has_min_neg_amp_limit) = self.validate_limits(max_neg_amp_limit, - min_neg_amp_limit) - + + (self.has_max_neg_amp_limit, self.has_min_neg_amp_limit) = self.validate_limits( + max_neg_amp_limit, min_neg_amp_limit + ) + if self.has_max_neg_amp_limit: - self.max_neg_amp_limit = max_neg_amp_limit - + else: - self.max_neg_amp_limit = None - + # minimum amplitude limit - + if self.has_min_neg_amp_limit: - self.min_neg_amp_limit = min_neg_amp_limit - + else: - self.min_neg_amp_limit = None - + # update the status - + self.has_neg_amp_limits = ( - self.has_max_neg_amp_limit or self.has_min_neg_amp_limit) + self.has_max_neg_amp_limit or self.has_min_neg_amp_limit + ) # ************************************************************************* - + @staticmethod - def validate_limits(max_amp_limit: float or int, - min_amp_limit: float or int) -> tuple: + def validate_limits( + max_amp_limit: float or int, min_amp_limit: float or int + ) -> tuple: """ Validates maximum and minimum amplitude limits. @@ -1027,65 +944,54 @@ class AmplitudeConstrainedSignal(Signal): """ # both amplitudes need to be positive or None - + # max amplitude - - if (type(max_amp_limit) == int or - isinstance(max_amp_limit, float)): - + + if type(max_amp_limit) == int or isinstance(max_amp_limit, float): if max_amp_limit <= 0: - - raise ValueError('The amplitude limits need to be positive.') - + raise ValueError("The amplitude limits need to be positive.") + has_max_amp = True - + elif type(max_amp_limit) == type(None): - has_max_amp = False - - else: # none of the above - - raise TypeError('Unrecognised amplitude format.') - + + else: # none of the above + raise TypeError("Unrecognised amplitude format.") + # min amplitude - - if (type(min_amp_limit) == int or - isinstance(min_amp_limit, float)): - + + if type(min_amp_limit) == int or isinstance(min_amp_limit, float): if min_amp_limit <= 0: - - raise ValueError('The amplitude limits need to be positive.') - + raise ValueError("The amplitude limits need to be positive.") + has_min_amp = True - + elif type(min_amp_limit) == type(None): - has_min_amp = False - - else: # none of the above - - raise TypeError('Unrecognised amplitude format.') - + + else: # none of the above + raise TypeError("Unrecognised amplitude format.") + # the minimum amplitude must be lower than the maximum, if both exist - + if has_max_amp and has_min_amp: - if min_amp_limit >= max_amp_limit: - raise ValueError( - 'The maximum amplitude limit is not greater than the '+ - 'minimum.' - ) - + "The maximum amplitude limit is not greater than the " + "minimum." + ) + return has_max_amp, has_min_amp # ************************************************************************* - + @staticmethod - def validate_positive_bounds(has_max_pos_amp_limit: bool, - max_pos_amp_limit: float or int, - is_lower_bounded: bool, - lower_bounds: list): + def validate_positive_bounds( + has_max_pos_amp_limit: bool, + max_pos_amp_limit: float or int, + is_lower_bounded: bool, + lower_bounds: list, + ): """ Validates positive amplitude limits against any existing bounds. @@ -1110,28 +1016,29 @@ class AmplitudeConstrainedSignal(Signal): None. """ - + # the lower bounds cannot be below the maximum amplitude limit - + if has_max_pos_amp_limit and is_lower_bounded: - # the signal is lower bounded and has a maximum pos. amp. limit - + min_lower_bound = min(lower_bounds) - + if min_lower_bound > max_pos_amp_limit: - raise ValueError( - 'The positive amplitude limits are not compatible with '+ - ' the bounds provided.') + "The positive amplitude limits are not compatible with " + + " the bounds provided." + ) # ************************************************************************* - + @staticmethod - def validate_negative_bounds(has_max_neg_amp_limit: bool, - max_neg_amp_limit: float or int, - is_upper_bounded: bool, - upper_bounds: list): + def validate_negative_bounds( + has_max_neg_amp_limit: bool, + max_neg_amp_limit: float or int, + is_upper_bounded: bool, + upper_bounds: list, + ): """ Validates negative amplitude limits against any existing bounds. @@ -1156,250 +1063,220 @@ class AmplitudeConstrainedSignal(Signal): None. """ - + # the upper bounds cannot be below the level implied by the maximum ne- # gative amplitude limit, since that leads to infeasibility # max_upper_bound < -max_neg_amp_limit = infeasibility - + if has_max_neg_amp_limit and is_upper_bounded: - # the signal is upper bounded and has a maximum neg. amp. limit - + max_upper_bound = max(upper_bounds) - + if max_upper_bound < -max_neg_amp_limit: - raise ValueError( - 'The negative amplitude limits are not compatible with '+ - ' the bounds provided.') + "The negative amplitude limits are not compatible with " + + " the bounds provided." + ) # ************************************************************************* - - def validate_positive_amplitude(self, - positive_amplitude: float or int = None, - tolerance: float or int = 0.0): - + + def validate_positive_amplitude( + self, positive_amplitude: float or int = None, tolerance: float or int = 0.0 + ): # without amplitude limits, there is nothing to validate against - + if not self.has_pos_amp_limits: - - raise ValueError('There are no limits against which to validate.') - + raise ValueError("There are no limits against which to validate.") + else: - # if there are limits, the amplitudes have to fall within their # respective ranges: min amplitude <= amplitude <= max amplitude - + # the method is the same for both amplitudes - + # positive amplitude - + if type(positive_amplitude) == type(None): - # compare internal positive amplitude - + if self.has_max_pos_amp_limit: - # there is a maximum amplitude limit - - if (self.positive_amplitude > - self.max_pos_amp_limit + tolerance): - + + if self.positive_amplitude > self.max_pos_amp_limit + tolerance: # the maximum was exceeded - + raise ValueError( - 'The positive amplitude exceeds its tolerated '+ - 'maximum.') - + "The positive amplitude exceeds its tolerated " + "maximum." + ) + if self.has_min_pos_amp_limit: - # there is a minimum amplitude limit - - if (self.positive_amplitude < - self.min_pos_amp_limit - tolerance): - + + if self.positive_amplitude < self.min_pos_amp_limit - tolerance: # the minimum was not observed - + raise ValueError( - 'The positive amplitude is below its tolerated '+ - 'minimum.') - + "The positive amplitude is below its tolerated " + + "minimum." + ) + else: - # compare external positive amplitude - + if self.has_max_pos_amp_limit: - # there is a maximum amplitude limit - - if (positive_amplitude > - self.max_pos_amp_limit + tolerance): - + + if positive_amplitude > self.max_pos_amp_limit + tolerance: # the maximum was exceeded - + raise ValueError( - 'The positive amplitude exceeds its tolerated '+ - 'maximum.') - + "The positive amplitude exceeds its tolerated " + "maximum." + ) + if self.has_min_pos_amp_limit: - # there is a minimum amplitude limit - - if (positive_amplitude < - self.min_pos_amp_limit - tolerance): - + + if positive_amplitude < self.min_pos_amp_limit - tolerance: # the minimum was not observed - + raise ValueError( - 'The positive amplitude is below its tolerated '+ - 'minimum.') + "The positive amplitude is below its tolerated " + + "minimum." + ) # ************************************************************************* - - def validate_negative_amplitude(self, - negative_amplitude: float or int = None, - tolerance: float or int = 0.0): - + + def validate_negative_amplitude( + self, negative_amplitude: float or int = None, tolerance: float or int = 0.0 + ): # without amplitude limits, there is nothing to validate against - + if not self.has_neg_amp_limits: - - raise ValueError('There are no limits against which to validate.') - + raise ValueError("There are no limits against which to validate.") + else: - # if there are limits, the amplitudes have to fall within their # respective ranges: min amplitude <= amplitude <= max amplitude - + # the method is the same for both amplitudes - + # negative amplitude - + if type(negative_amplitude) == type(None): - # compare internal negative amplitude - + if self.has_max_neg_amp_limit: - # there is a maximum amplitude limit - - if (self.negative_amplitude > - self.max_neg_amp_limit + tolerance): - + + if self.negative_amplitude > self.max_neg_amp_limit + tolerance: # the maximum was exceeded - + raise ValueError( - 'The negative amplitude exceeds its tolerated '+ - 'maximum.') - + "The negative amplitude exceeds its tolerated " + "maximum." + ) + if self.has_min_neg_amp_limit: - # there is a minimum amplitude limit - - if (self.negative_amplitude < - self.min_neg_amp_limit - tolerance): - + + if self.negative_amplitude < self.min_neg_amp_limit - tolerance: # the minimum was not observed - + raise ValueError( - 'The negative amplitude is below its tolerated '+ - 'minimum.') - + "The negative amplitude is below its tolerated " + + "minimum." + ) + else: - # compare external negative amplitude - + if self.has_max_neg_amp_limit: - # there is a maximum amplitude limit - - if (negative_amplitude > - self.max_neg_amp_limit + tolerance): - + + if negative_amplitude > self.max_neg_amp_limit + tolerance: # the maximum was exceeded - + raise ValueError( - 'The negative amplitude exceeds its tolerated '+ - 'maximum.') - + "The negative amplitude exceeds its tolerated " + "maximum." + ) + if self.has_min_neg_amp_limit: - # there is a minimum amplitude limit - - if (negative_amplitude < - self.min_neg_amp_limit - tolerance): - + + if negative_amplitude < self.min_neg_amp_limit - tolerance: # the minimum was not observed - + raise ValueError( - 'The negative amplitude is below its tolerated '+ - 'minimum.') - + "The negative amplitude is below its tolerated " + + "minimum." + ) + + # ***************************************************************************** # ***************************************************************************** -class AmplitudeConstrainedNNRSignal(AmplitudeConstrainedSignal, - NonNegativeRealSignal): + +class AmplitudeConstrainedNNRSignal(AmplitudeConstrainedSignal, NonNegativeRealSignal): "A class for non-negative real (NNR) amplitude-constrained signals." - - def __init__(self, - number_samples: int, - max_pos_amp_limit: float or int, - min_pos_amp_limit: float or int = None, - positive_amplitude: float or int = None, - samples: list = None, - lower_bounds: list = None, - upper_bounds: list = None): - + + def __init__( + self, + number_samples: int, + max_pos_amp_limit: float or int, + min_pos_amp_limit: float or int = None, + positive_amplitude: float or int = None, + samples: list = None, + lower_bounds: list = None, + upper_bounds: list = None, + ): if type(lower_bounds) != type(None): - # call the AmplitudeConstrainedSignal class's init method - + AmplitudeConstrainedSignal.__init__( self, - number_samples=number_samples, - max_pos_amp_limit=max_pos_amp_limit, - min_pos_amp_limit=min_pos_amp_limit, - max_neg_amp_limit=None, + number_samples=number_samples, + max_pos_amp_limit=max_pos_amp_limit, + min_pos_amp_limit=min_pos_amp_limit, + max_neg_amp_limit=None, min_neg_amp_limit=None, positive_amplitude=positive_amplitude, negative_amplitude=None, samples=samples, lower_bounds=lower_bounds, - upper_bounds=upper_bounds) - - else: # no peculiar lower bounds were specified - + upper_bounds=upper_bounds, + ) + + else: # no peculiar lower bounds were specified # the lower bounds must be set to zero - + # the number of samples should be checked before calling the method - + if type(number_samples) != int: - # the number of samples was not correctly specified - - raise TypeError('The number of samples must be an integer.') - + + raise TypeError("The number of samples must be an integer.") + # call the AmplitudeConstrainedSignal class's init method - + AmplitudeConstrainedSignal.__init__( self, - number_samples=number_samples, - max_pos_amp_limit=max_pos_amp_limit, - min_pos_amp_limit=min_pos_amp_limit, - max_neg_amp_limit=None, + number_samples=number_samples, + max_pos_amp_limit=max_pos_amp_limit, + min_pos_amp_limit=min_pos_amp_limit, + max_neg_amp_limit=None, min_neg_amp_limit=None, positive_amplitude=positive_amplitude, negative_amplitude=None, samples=samples, lower_bounds=[0 for _ in range(number_samples)], - upper_bounds=upper_bounds) - + upper_bounds=upper_bounds, + ) + # make sure the bounds are nnr - + if not self.are_bounds_nnr(): - - raise ValueError('The bounds are not non-negative real.') - + raise ValueError("The bounds are not non-negative real.") + + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/src/topupopt/problems/esipp/system.py b/src/topupopt/problems/esipp/system.py index fdcb636..e1c07f1 100644 --- a/src/topupopt/problems/esipp/system.py +++ b/src/topupopt/problems/esipp/system.py @@ -20,258 +20,236 @@ from .network import Network # ***************************************************************************** # ***************************************************************************** + class EnergySystem: """A class to model energy systems for simulation or optimisation.""" - + # instances of this class should contain enough information to simulate or # optimise the energy system. In the former case, no decisions can be left # out. - + # systems: # optional or not # decision to install, if not already defined (if not optional) # inputs, for every mode and time interval # outputs, for every output and time interval # states, for every node and time interval - - + # networks: # flows # technologies selected - - + # ************************************************************************* # ************************************************************************* - - def __init__(self, - networks: dict = None, - converters: dict = None, - optional_converters: list = None, - selected_converters: list = None): - + + def __init__( + self, + networks: dict = None, + converters: dict = None, + optional_converters: list = None, + selected_converters: list = None, + ): # ********************************************************************* # ********************************************************************* - + # networks should be a dict of nx.MultiDiGraph objects - + if networks != None and type(networks) == dict: - self.networks = dict(networks) - + else: - self.networks = {} - + # systems should be a dict of DynamicSystem objects - + if converters != None and type(converters) == dict: - self.converters = dict(converters) - + else: - self.converters = {} - + # ********************************************************************* # ********************************************************************* - + # optional converters - + # list to indicate which converters are optional # elements: converter keys # default outcome (if empty): all converters are mandatory - - if (optional_converters != None and type(optional_converters) == list): - + + if optional_converters != None and type(optional_converters) == list: self.optional_converters = list(optional_converters) - + else: - self.optional_converters = [] - + # ********************************************************************* # ********************************************************************* - + # selected converters - + # list to indicate which converters have been selected # elements: converter keys # default outcome (if empty): no converters have been selected - - if (selected_converters != None and type(selected_converters) == list): - + + if selected_converters != None and type(selected_converters) == list: self.selected_converters = list(selected_converters) - + else: - self.selected_converters = [] - + # ********************************************************************* # ********************************************************************* - + # TODO: explain why some of attributes should stay here - + # dict to indicate the maximum number of arcs between two nodes # key: (g,u,v) tuple # value: int # default outcome (leaving it empty): +inf - - self.max_number_parallel_arcs = {} - + + self.max_number_parallel_arcs = {} + # list to indicate which specific arcs are selectable yet mandatory # elements: (g,u,v,j) tuples # default outcome (by leaving it empty): optional/False # Note: it is placed here because it is an implication of the lack of # alternatives to meet demand/supply in a given node - + self.mandatory_arcs = [] - + # dict indicating which arcs # key: (network, node, converter, output, time interval) # value: coefficient relative to a gi # default outcome (if tuple not present): - + self.network_effects = {} - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def add_network(self, network_key, network: Network): """Add a new network to the energy system.""" - + self.networks[network_key] = network - + # ************************************************************************* # ************************************************************************* - - def add_converter(self, - converter_key, - converter: Converter, - is_optional: bool, - is_selected: bool = False) -> bool: + + def add_converter( + self, + converter_key, + converter: Converter, + is_optional: bool, + is_selected: bool = False, + ) -> bool: """Add a converter to the energy system object or update it.""" - + # sanity check: mandatory systems cannot have dimensiona. input signals - + if converter.has_dimensionable_input_signals() and not is_optional: - # the ds is not supposed to be optional but has dim. input signals - + return False - + # update the dict - + self.converters[converter_key] = converter - + # set whether or not it is optional - + self.set_converter_as_optional_or_not(converter_key, is_optional) - + # set whether or not it has been selected - + self.set_converter_selection_status(converter_key, is_selected) - + return True - + # ************************************************************************* # ************************************************************************* - - def set_converter_as_optional_or_not(self, - converter_key, - is_optional: bool): - + + def set_converter_as_optional_or_not(self, converter_key, is_optional: bool): # if is_optional is True: add key to list, if not already there # if is_optional is False: remove key from list, if there already - + if is_optional: - if converter_key not in self.optional_converters: - self.optional_converters.append(converter_key) - + else: - while converter_key in self.optional_converters: - self.optional_converters.remove(converter_key) - + # ************************************************************************* # ************************************************************************* - - def set_converter_selection_status(self, - converter_key, - selection_status: bool): - + + def set_converter_selection_status(self, converter_key, selection_status: bool): # if selection_status is True: add key to list, if not already there # if selection_status is False: remove key from list, if there already - + if selection_status: - if converter_key not in self.selected_converters: - self.selected_converters.append(converter_key) - + else: - while converter_key in self.selected_converters: - self.selected_converters.remove(converter_key) - + # ************************************************************************* # ************************************************************************* - - def set_converter_network_effects(self, - converter_key, - network_effects: dict) -> bool: - - # # validate converter key - + + def set_converter_network_effects( + self, converter_key, network_effects: dict + ) -> bool: + # # validate converter key + # if converter_key not in self.converters.keys(): - + # return False - + # # validate dict entries - + # for dict_tuple in network_effects.keys(): - + # if len(dict_tuple) != 4: # (network, node, output, time interval) - + # return False - + # if dict_tuple[0] not in self.networks.keys(): - + # return False - + # if dict_tuple[1] not in self.networks[dict_tuple[0]].nodes(): - + # return False - + # if dict_tuple[2] not in range( # self.converters[converter_key].number_outputs): - + # return False - + # if dict_tuple[3] not in range(self.number_time_intervals): - + # return False - + # # update dict - + # self.network_effects[ # (dict_tuple[0], # dict_tuple[1], - # converter_key, - # dict_tuple[2], + # converter_key, + # dict_tuple[2], # dict_tuple[3])] = networks_effects[] - + raise NotImplementedError - + # ************************************************************************* # ************************************************************************* - + def make_arc_mandatory(self, network_key, arc_key: tuple): """ Marks an arc as being mandatory in an energy system. @@ -294,38 +272,35 @@ class EnergySystem: None. """ - + if network_key in self.networks and len(arc_key) == 3: - if self.networks[network_key].has_edge(*arc_key): - # can only be used for selectable arcs: not preselected - - if self.networks[network_key].edges[arc_key][ - Network.KEY_ARC_TECH].has_been_selected(): - - raise ValueError( - 'The arc selected has been preselected.') - - if ((network_key,*arc_key) not in - self.mandatory_arcs): - - self.mandatory_arcs.append((network_key,*arc_key)) - - else: # the arc does not exist - + + if ( + self.networks[network_key] + .edges[arc_key][Network.KEY_ARC_TECH] + .has_been_selected() + ): + raise ValueError("The arc selected has been preselected.") + + if (network_key, *arc_key) not in self.mandatory_arcs: + self.mandatory_arcs.append((network_key, *arc_key)) + + else: # the arc does not exist raise ValueError( - 'The arc key used does not match any arc in the network.') - - else: # something is up with the network key or arc key length - + "The arc key used does not match any arc in the network." + ) + + else: # something is up with the network key or arc key length raise ValueError( - 'Either the network key provided is incorrect or the arc key '+ - 'lacks the proper size.') - + "Either the network key provided is incorrect or the arc key " + + "lacks the proper size." + ) + # ************************************************************************* # ************************************************************************* - + def unmake_arc_mandatory(self, network_key, arc_key: tuple): """ Unmarks an arc from being mandatory in an energy system. @@ -348,46 +323,34 @@ class EnergySystem: None. """ - + if network_key in self.networks and len(arc_key) == 3: - - if ((network_key, *arc_key) in - self.mandatory_arcs): - + if (network_key, *arc_key) in self.mandatory_arcs: # if it does, pop it - - self.mandatory_arcs.pop( - (network_key, *arc_key) - ) - - else: # something is up with the network key or arc key length - + + self.mandatory_arcs.pop((network_key, *arc_key)) + + else: # something is up with the network key or arc key length raise ValueError( - 'Either the network key provided is incorrect or the arc key '+ - 'lacks the proper size.') - + "Either the network key provided is incorrect or the arc key " + + "lacks the proper size." + ) + # ************************************************************************* # ************************************************************************* - - def set_maximum_number_parallel_arcs(self, - network_key, - node_a, - node_b, - limit: int): - + + def set_maximum_number_parallel_arcs(self, network_key, node_a, node_b, limit: int): # TODO: docstring - + if network_key in self.networks: - if self.networks[network_key].has_edge(node_a, node_b): - - self.max_number_parallel_arcs[ - (network_key,node_a, node_b)] = limit - + self.max_number_parallel_arcs[(network_key, node_a, node_b)] = limit + # ************************************************************************* # ************************************************************************* - + # TODO: create a method to identify mandatory nodes - + + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/src/topupopt/problems/esipp/utils.py b/src/topupopt/problems/esipp/utils.py index f191307..2908cd1 100644 --- a/src/topupopt/problems/esipp/utils.py +++ b/src/topupopt/problems/esipp/utils.py @@ -22,681 +22,713 @@ from .network import Network # ***************************************************************************** # ***************************************************************************** + def review_final_network(network: Network): - # check that the network topology is a tree - if network.has_tree_topology(): - print('The network has a tree topology.') + if network.has_tree_topology(): + print("The network has a tree topology.") else: - print('The network does not have a tree topology.') - + print("The network does not have a tree topology.") + # check the existence of forward and reverse arcs between the same nodes has_forward_reverse_arcs(network, print_result=True) + # ***************************************************************************** # ***************************************************************************** -def has_forward_reverse_arcs( - network: Network, - print_result: bool = True - ) -> bool: + +def has_forward_reverse_arcs(network: Network, print_result: bool = True) -> bool: """Returns True if there are simultaneous forward and reverse arcs.""" # check the existence of forward and reverse arcs in the same segment - forward_reverse_arcs = [ # get the arcs selected + forward_reverse_arcs = [ # get the arcs selected arc_key[0:2] for arc_key in network.edges(keys=True) - if True in network.edges[arc_key][ - Network.KEY_ARC_TECH].options_selected - ] - forward_reverse_arcs = [ # get the selected arcs that exist both ways + if True in network.edges[arc_key][Network.KEY_ARC_TECH].options_selected + ] + forward_reverse_arcs = [ # get the selected arcs that exist both ways arc_key for arc_key in forward_reverse_arcs - if (arc_key[1],arc_key[0]) in forward_reverse_arcs - ] + if (arc_key[1], arc_key[0]) in forward_reverse_arcs + ] if print_result: if len(forward_reverse_arcs) == 0: print( - 'The network has no forward and reverse arcs in'+ - ' the same segment.' - ) + "The network has no forward and reverse arcs in" + " the same segment." + ) else: - print( - 'The network has forward and reverse arcs in'+ - ' the same segment.' - ) + print("The network has forward and reverse arcs in" + " the same segment.") + # ***************************************************************************** # ***************************************************************************** -def run_mvesipp_analysis(problem: InfrastructurePlanningProblem = None, - model_instance: pyo.ConcreteModel = None, - analyse_results: bool = False, - analyse_problem: bool = False): - + +def run_mvesipp_analysis( + problem: InfrastructurePlanningProblem = None, + model_instance: pyo.ConcreteModel = None, + analyse_results: bool = False, + analyse_problem: bool = False, +): # ************************************************************************* - + if model_instance != None and analyse_problem: - describe_mves(model_instance) # ************************************************************************* if model_instance != None and analyse_results: - - (flow_in, - flow_in_k, - flow_out, - flow_in_cost, - flow_out_revenue) = compute_cost_volume_metrics(model_instance) - - present_summary_results( - flow_in, + ( + flow_in, + flow_in_k, flow_out, flow_in_cost, - flow_out_revenue) + flow_out_revenue, + ) = compute_cost_volume_metrics(model_instance) + + present_summary_results(flow_in, flow_out, flow_in_cost, flow_out_revenue) # ************************************************************************* - + if problem != None and analyse_results: - # paths - + describe_solution(problem) # ************************************************************************* - + + # ***************************************************************************** # ***************************************************************************** # prepare a dictionary with the key results - -def compute_cost_volume_metrics(instance: pyo.ConcreteModel, - read_directly_if_possible: bool = True): - + + +def compute_cost_volume_metrics( + instance: pyo.ConcreteModel, read_directly_if_possible: bool = True +): # select calculation method if read_directly_if_possible: - # total flow imported - + flow_in = { - (g,q,p): pyo.value( + (g, q, p): pyo.value( sum( - instance.var_if_glqpks[(g,l,q,p,k,s)]* - instance.param_c_time_qpk[(q,p,k)] + instance.var_if_glqpks[(g, l, q, p, k, s)] + * instance.param_c_time_qpk[(q, p, k)] for l in instance.set_L_imp[g] for k in instance.set_K_q[q] - for s in instance.set_S[(g,l,q,p,k)] - ) + for s in instance.set_S[(g, l, q, p, k)] ) - for g in instance.set_G + ) + for g in instance.set_G for q, p in instance.set_QP - } - + } + # total flow imported per network, scenario, period, time interval - + flow_in_k = { - (g,q,p,k): pyo.value( + (g, q, p, k): pyo.value( sum( - instance.var_if_glqpks[(g,l,q,p,k,s)]* - instance.param_c_time_qpk[(q,p,k)] + instance.var_if_glqpks[(g, l, q, p, k, s)] + * instance.param_c_time_qpk[(q, p, k)] for l in instance.set_L_imp[g] - #for k in instance.set_K_q[q] - for s in instance.set_S[(g,l,q,p,k)] - ) + # for k in instance.set_K_q[q] + for s in instance.set_S[(g, l, q, p, k)] ) - for g in instance.set_G + ) + for g in instance.set_G for q, p, k in instance.set_QPK - } - + } + # total flow exported - + flow_out = { - (g,q,p): pyo.value( + (g, q, p): pyo.value( sum( - instance.var_ef_glqpks[(g,l,q,p,k,s)]* - instance.param_c_time_qpk[(q,p,k)] + instance.var_ef_glqpks[(g, l, q, p, k, s)] + * instance.param_c_time_qpk[(q, p, k)] for l in instance.set_L_exp[g] for k in instance.set_K_q[q] - for s in instance.set_S[(g,l,q,p,k)] - ) + for s in instance.set_S[(g, l, q, p, k)] ) - for g in instance.set_G + ) + for g in instance.set_G for q, p in instance.set_QP - } - + } + # import costs - + flow_in_cost = { - (g,q,p): pyo.value( + (g, q, p): pyo.value( sum( - instance.var_ifc_glqpk[(g,l,q,p,k)]* - instance.param_c_time_qpk[(q,p,k)] + instance.var_ifc_glqpk[(g, l, q, p, k)] + * instance.param_c_time_qpk[(q, p, k)] for l in instance.set_L_imp[g] for k in instance.set_K_q[q] - ) ) + ) for g in instance.set_G for q, p in instance.set_QP - } - + } + # export revenue - + flow_out_revenue = { - (g,q,p): pyo.value( + (g, q, p): pyo.value( sum( - instance.var_efr_glqpk[(g,l,q,p,k)]* - instance.param_c_time_qpk[(q,p,k)] + instance.var_efr_glqpk[(g, l, q, p, k)] + * instance.param_c_time_qpk[(q, p, k)] for l in instance.set_L_exp[g] for k in instance.set_K_q[q] - ) ) + ) for g in instance.set_G for q, p in instance.set_QP - } - + } + else: - # total flow imported - + flow_in = { - (g,q,p): pyo.value( + (g, q, p): pyo.value( sum( - instance.var_v_glljqk[(g,l1,l2,j,q,k)]* - instance.param_c_time_qpk[(q,p,k)] + instance.var_v_glljqk[(g, l1, l2, j, q, k)] + * instance.param_c_time_qpk[(q, p, k)] for l1 in instance.set_L_imp[g] - for l2 in instance.set_L[g]-instance.set_L_imp[g] - for j in instance.set_J[(g,l1,l2)] + for l2 in instance.set_L[g] - instance.set_L_imp[g] + for j in instance.set_J[(g, l1, l2)] for k in instance.set_K - ) ) - for g in instance.set_G + ) + for g in instance.set_G for q, p in instance.set_QP - } - + } + # total flow imported per network, scenario, period, time interval - + flow_in_k = { - (g,q,p,k): pyo.value( + (g, q, p, k): pyo.value( sum( - instance.var_if_glqpks[(g,l,q,p,k,s)]* - instance.param_c_time_qpk[(q,p,k)] + instance.var_if_glqpks[(g, l, q, p, k, s)] + * instance.param_c_time_qpk[(q, p, k)] for l in instance.set_L_imp[g] - #for k in instance.set_K_q[q] - for s in instance.set_S[(g,l,q,p,k)] - ) + # for k in instance.set_K_q[q] + for s in instance.set_S[(g, l, q, p, k)] ) - for g in instance.set_G + ) + for g in instance.set_G for q, p, k in instance.set_QPK - } - + } + # total flow exported - + flow_out = { - (g,q,p): pyo.value( + (g, q, p): pyo.value( sum( - instance.var_v_glljqk[(g,l1,l2,j,q,k)]* - instance.param_c_time_qpk[(q,p,k)] + instance.var_v_glljqk[(g, l1, l2, j, q, k)] + * instance.param_c_time_qpk[(q, p, k)] for l2 in instance.set_L_exp[g] - for l1 in instance.set_L[g]-instance.set_L_exp[g] - for j in instance.set_J[(g,l1,l2)] + for l1 in instance.set_L[g] - instance.set_L_exp[g] + for j in instance.set_J[(g, l1, l2)] for k in instance.set_K - ) ) - for g in instance.set_G + ) + for g in instance.set_G for q, p in instance.set_QP - } - + } + # import costs - + flow_in_cost = { - (g,q,p): pyo.value( + (g, q, p): pyo.value( sum( - instance.var_if_glqpks[(g,l,q,p,k,s)]* - instance.param_p_glqks[(g,l,q,p,k,s)]* - instance.param_c_time_qpk[(q,p,k)] + instance.var_if_glqpks[(g, l, q, p, k, s)] + * instance.param_p_glqks[(g, l, q, p, k, s)] + * instance.param_c_time_qpk[(q, p, k)] for l in instance.set_L_imp[g] for k in instance.set_K_q[q] - for s in instance.set_S[(g,l,q,p,k)] - #for (_g,l,q,p,k,s) in instance.var_if_glqpks - #if g == _g - ) + for s in instance.set_S[(g, l, q, p, k)] + # for (_g,l,q,p,k,s) in instance.var_if_glqpks + # if g == _g ) - for g in instance.set_G + ) + for g in instance.set_G for q, p in instance.set_QP - } - + } + # export revenue - + flow_out_revenue = { - (g,q,p): pyo.value( + (g, q, p): pyo.value( sum( - instance.var_ef_glqpks[(g,l,q,p,k,s)]* - instance.param_p_glqpks[(g,l,q,p,k,s)]* - instance.param_c_time_qpk[(q,p,k)] + instance.var_ef_glqpks[(g, l, q, p, k, s)] + * instance.param_p_glqpks[(g, l, q, p, k, s)] + * instance.param_c_time_qpk[(q, p, k)] for l in instance.set_L_exp[g] for k in instance.set_K_q[q] - for s in instance.set_S[(g,l,q,p,k)] + for s in instance.set_S[(g, l, q, p, k)] # for (_g,l,q,p,k,s) in instance.var_ef_glqpks # if g == _g - ) ) - for g in instance.set_G + ) + for g in instance.set_G for q, p in instance.set_QP - } - + } + # ************************************************************************* # ************************************************************************* - + return flow_in, flow_in_k, flow_out, flow_in_cost, flow_out_revenue # ************************************************************************* # ************************************************************************* - + + # ***************************************************************************** # ***************************************************************************** + def compute_network_performance(solved_problem: InfrastructurePlanningProblem): - # gross results - + network_flows_dict = compute_gross_network_flows(solved_problem) - + # actual results - - (flow_in, - flow_in_k, - flow_out, - flow_in_cost, - flow_out_revenue) = compute_cost_volume_metrics(solved_problem.instance) - + + ( + flow_in, + flow_in_k, + flow_out, + flow_in_cost, + flow_out_revenue, + ) = compute_cost_volume_metrics(solved_problem.instance) + # losses - + losses_dict = { - (g,q,p): abs( + (g, q, p): abs( # imports - flow_in[(g,q,p)] + flow_in[(g, q, p)] + # local supply - network_flows_dict['gross_supply_gq'][(g,q)] + network_flows_dict["gross_supply_gq"][(g, q)] - # exports - flow_out[(g,q,p)] + flow_out[(g, q, p)] - # local demand - network_flows_dict['gross_demand_gq'][(g,q)] - ) + network_flows_dict["gross_demand_gq"][(g, q)] + ) for q in solved_problem.assessment_keys for p in solved_problem.reporting_periods[q] for g in solved_problem.networks - } - + } + return ( network_flows_dict, - losses_dict, - flow_in, - flow_in_k, + losses_dict, + flow_in, + flow_in_k, flow_out, flow_in_cost, - flow_out_revenue - ) - + flow_out_revenue, + ) + + # ***************************************************************************** # ***************************************************************************** # provide a summary of the results -def present_summary_results(flow_in: dict, - flow_out: dict, - flow_in_cost: dict, - flow_out_revenue: dict, - flow_unit: str = 'MWh', - currency: str = 'EUR'): - + +def present_summary_results( + flow_in: dict, + flow_out: dict, + flow_in_cost: dict, + flow_out_revenue: dict, + flow_unit: str = "MWh", + currency: str = "EUR", +): # ************************************************************************* # ************************************************************************* - + if len(flow_in) != 0: + print(">> Imports") - print('>> Imports') - - for (g,q,p) in flow_in: - - print('Assessment: '+str(q)) - - print('Network: '+str(g)) - - print('Volume: '+str(flow_in[(g,q,p)]) + ' ' + str(flow_unit)) - - print('Cost: '+str(flow_in_cost[(g,q,p)]) + ' ' + str(currency)) - - if flow_in[(g,q,p)] != 0: - + for g, q, p in flow_in: + print("Assessment: " + str(q)) + + print("Network: " + str(g)) + + print("Volume: " + str(flow_in[(g, q, p)]) + " " + str(flow_unit)) + + print("Cost: " + str(flow_in_cost[(g, q, p)]) + " " + str(currency)) + + if flow_in[(g, q, p)] != 0: print( - 'Average price: '+ - str(flow_in_cost[(g,q,p)]/flow_in[(g,q,p)]) + - ' '+ - str(currency)+'/'+str(flow_unit) - ) - + "Average price: " + + str(flow_in_cost[(g, q, p)] / flow_in[(g, q, p)]) + + " " + + str(currency) + + "/" + + str(flow_unit) + ) + else: # no flow - - print( - 'Average price: N/A (no flow imports are set to take place).') + print("Average price: N/A (no flow imports are set to take place).") # ************************************************************************* # ************************************************************************* - + if len(flow_out) != 0: - - print('>> Exports') - - for (g,q,p) in flow_out: - - print('Assessment: '+str(q)) - - print('Network: '+str(g)) - - print('Volume: '+str(flow_out[(g,q,p)]) + ' ' + str(flow_unit)) - - print('Cost: '+str(flow_out_revenue[(g,q,p)]) + ' ' + str(currency)) - - if flow_out[(g,q,p)] != 0: - + print(">> Exports") + + for g, q, p in flow_out: + print("Assessment: " + str(q)) + + print("Network: " + str(g)) + + print("Volume: " + str(flow_out[(g, q, p)]) + " " + str(flow_unit)) + + print("Cost: " + str(flow_out_revenue[(g, q, p)]) + " " + str(currency)) + + if flow_out[(g, q, p)] != 0: print( - 'Average price: '+ - str(flow_out_revenue[(g,q,p)]/flow_out[(g,q,p)]) + - ' '+ - str(currency)+'/'+str(flow_unit) - ) - + "Average price: " + + str(flow_out_revenue[(g, q, p)] / flow_out[(g, q, p)]) + + " " + + str(currency) + + "/" + + str(flow_unit) + ) + else: # no flow - - print( - 'Average price: N/A (no flow exports are set to take place).') - + print("Average price: N/A (no flow exports are set to take place).") + # ************************************************************************* # ************************************************************************* - + + # ***************************************************************************** # ***************************************************************************** - + + def unused_node_key(network: nx.MultiDiGraph): """Returns an unused node key.""" # if the network has N nodes, checking every natural number up to N+1 will # certainly return an unused node_key (worst case: N+1th iteration) - for i in range(network.number_of_nodes()+1): + for i in range(network.number_of_nodes() + 1): # check if it exists if not network.has_node(i): # it doesn't, return it return i - + + # ***************************************************************************** # ***************************************************************************** # TODO: document + def compute_gross_network_flows(problem: InfrastructurePlanningProblem) -> dict: - gross_supply_g = {} - + gross_demand_g = {} - + gross_supply_gq = {} - + gross_demand_gq = {} - + gross_supply_gqk = {} - + gross_demand_gqk = {} - + for g, net in problem.networks.items(): - end_use_node_keys = tuple( - node_key - for node_key in net.nodes() - if Network.KEY_NODE_BASE_FLOW in net.nodes[node_key] + node_key + for node_key in net.nodes() + if Network.KEY_NODE_BASE_FLOW in net.nodes[node_key] if len(net.nodes[node_key][Network.KEY_NODE_BASE_FLOW]) != 0 - ) - + ) + # flow: q, k gross_demand_qk = { - (g,q,k): sum( - net.nodes[node_key][Network.KEY_NODE_BASE_FLOW][(q,k)] + (g, q, k): sum( + net.nodes[node_key][Network.KEY_NODE_BASE_FLOW][(q, k)] for node_key in end_use_node_keys - if net.nodes[node_key][Network.KEY_NODE_BASE_FLOW][(q,k)] >= 0 - ) + if net.nodes[node_key][Network.KEY_NODE_BASE_FLOW][(q, k)] >= 0 + ) for q in problem.assessment_keys for k in range(problem.number_time_intervals[q]) - } + } gross_supply_qk = { - (g,q,k): -sum( - net.nodes[node_key][Network.KEY_NODE_BASE_FLOW][(q,k)] + (g, q, k): -sum( + net.nodes[node_key][Network.KEY_NODE_BASE_FLOW][(q, k)] for node_key in end_use_node_keys - if net.nodes[node_key][Network.KEY_NODE_BASE_FLOW][(q,k)] < 0 - ) + if net.nodes[node_key][Network.KEY_NODE_BASE_FLOW][(q, k)] < 0 + ) for q in problem.assessment_keys for k in range(problem.number_time_intervals[q]) - } - + } + # (g,q,k) - + gross_supply_gqk.update(gross_supply_qk) gross_demand_gqk.update(gross_demand_qk) - + # (g,q) - + gross_supply_gq.update( - {(g,q): sum(gross_supply_qk[(g,q,k)] - for k in range(problem.number_time_intervals[q])) + { + (g, q): sum( + gross_supply_qk[(g, q, k)] + for k in range(problem.number_time_intervals[q]) + ) for q in problem.assessment_keys - } - ) + } + ) gross_demand_gq.update( - {(g,q): sum(gross_demand_qk[(g,q,k)] - for k in range(problem.number_time_intervals[q])) + { + (g, q): sum( + gross_demand_qk[(g, q, k)] + for k in range(problem.number_time_intervals[q]) + ) for q in problem.assessment_keys - } - ) - + } + ) + # g - - gross_supply_g.update( - {g: sum(supply for supply in gross_supply_qk.values())} - ) - gross_demand_g.update( - {g: sum(demand for demand in gross_demand_qk.values())} - ) - + + gross_supply_g.update({g: sum(supply for supply in gross_supply_qk.values())}) + gross_demand_g.update({g: sum(demand for demand in gross_demand_qk.values())}) + return { - 'gross_supply_gqk': gross_supply_gqk, - 'gross_demand_gqk': gross_demand_gqk, - 'gross_supply_gq': gross_supply_gq, - 'gross_demand_gq': gross_demand_gq, - 'gross_supply_g': gross_supply_g, - 'gross_demand_g': gross_demand_g, - } - + "gross_supply_gqk": gross_supply_gqk, + "gross_demand_gqk": gross_demand_gqk, + "gross_supply_gq": gross_supply_gq, + "gross_demand_gq": gross_demand_gq, + "gross_supply_g": gross_supply_g, + "gross_demand_g": gross_demand_g, + } + + # ***************************************************************************** # ***************************************************************************** + def describe_mves(obj: pyo.ConcreteModel): - # describe the multi-vector energy system - - print('******************************************************************') - print('******************************************************************') - + + print("******************************************************************") + print("******************************************************************") + # mves - + # the number of networks - - print('This MVES consists of '+str(len(obj.set_G))+' network(s).') - + + print("This MVES consists of " + str(len(obj.set_G)) + " network(s).") + # nodes - + for g in obj.set_G: - # ********************************************************************* - + # the number of nodes for each network - - print('Network '+str(g)+' has '+str(len(obj.set_L[g]))+' node(s).') - print('Network '+str(g)+'\'s nodes: '+str([l for l in obj.set_L[g]])) - + + print("Network " + str(g) + " has " + str(len(obj.set_L[g])) + " node(s).") + print("Network " + str(g) + "'s nodes: " + str([l for l in obj.set_L[g]])) + # the number of import nodes for each network - - print('Network '+str(g)+' has '+ - str(len(obj.set_L_imp[g]))+' import node(s).') + + print( + "Network " + + str(g) + + " has " + + str(len(obj.set_L_imp[g])) + + " import node(s)." + ) if len(obj.set_L_imp[g]) != 0: - print('Network '+str(g)+'\'s import nodes: '+ - str([l for l in obj.set_L_imp[g]])) - + print( + "Network " + + str(g) + + "'s import nodes: " + + str([l for l in obj.set_L_imp[g]]) + ) + # the number of export nodes for each network - - print('Network '+str(g)+' has '+ - str(len(obj.set_L_exp[g]))+' export node(s).') + + print( + "Network " + + str(g) + + " has " + + str(len(obj.set_L_exp[g])) + + " export node(s)." + ) if len(obj.set_L_exp[g]) != 0: - print('Network '+str(g)+'\'s export nodes: ' - +str([l for l in obj.set_L_exp[g]])) - + print( + "Network " + + str(g) + + "'s export nodes: " + + str([l for l in obj.set_L_exp[g]]) + ) + # the number of other nodes for each network - - other_nodes = [node - for node in obj.set_L[g] - if node not in obj.set_L_exp[g] - if node not in obj.set_L_imp[g]] - - # number_other_nodes = ( + + other_nodes = [ + node + for node in obj.set_L[g] + if node not in obj.set_L_exp[g] + if node not in obj.set_L_imp[g] + ] + + # number_other_nodes = ( # len(obj.set_L[g])-len(obj.set_L_exp[g])-len(obj.set_L_imp[g]) # ) - + number_other_nodes = len(other_nodes) - - print('Network '+str(g)+' has '+str(number_other_nodes)+' other nodes.') + + print("Network " + str(g) + " has " + str(number_other_nodes) + " other nodes.") if number_other_nodes != 0: - print('Network '+str(g)+'\'s other nodes: '+str(other_nodes)) - + print("Network " + str(g) + "'s other nodes: " + str(other_nodes)) + # the static flow needs for other nodes - + static_flow_limits = [ - (min([pyo.value(obj.param_v_base_glqk[(g,node,q,k)]) - for q in obj.set_Q - for k in obj.set_K_q[q]]), - max([pyo.value(obj.param_v_base_glqk[(g,node,q,k)]) - for q in obj.set_Q - for k in obj.set_K_q[q]])) + ( + min( + [ + pyo.value(obj.param_v_base_glqk[(g, node, q, k)]) + for q in obj.set_Q + for k in obj.set_K_q[q] + ] + ), + max( + [ + pyo.value(obj.param_v_base_glqk[(g, node, q, k)]) + for q in obj.set_Q + for k in obj.set_K_q[q] + ] + ), + ) for node in other_nodes - ] - - print('Min. and max. flow needs for other nodes: '+ - str(static_flow_limits)) - + ] + + print("Min. and max. flow needs for other nodes: " + str(static_flow_limits)) + # ********************************************************************* - + # arcs - + # the number of arcs under consideration - - potential_directed_arcs = [(l1,l2) - for (gx,l1,l2) in obj.set_GLL - if gx == g - if (gx,l1,l2) in obj.set_J - if len(obj.set_J[(gx,l1,l2)]) != 0] - - print('Network '+str(g)+' considers '+ - str(len(potential_directed_arcs))+' potential directed arcs.') + + potential_directed_arcs = [ + (l1, l2) + for (gx, l1, l2) in obj.set_GLL + if gx == g + if (gx, l1, l2) in obj.set_J + if len(obj.set_J[(gx, l1, l2)]) != 0 + ] + + print( + "Network " + + str(g) + + " considers " + + str(len(potential_directed_arcs)) + + " potential directed arcs." + ) if number_other_nodes != 0: - print('Network '+str(g)+'\'s potential directed arcs: ' - +str(potential_directed_arcs)) - + print( + "Network " + + str(g) + + "'s potential directed arcs: " + + str(potential_directed_arcs) + ) + # unreacheable nodes - + # ********************************************************************* - + # ************************************************************************* - + # systems - + # the number of systems - + number_systems = len(obj.set_I) - + number_optional_systems = len(obj.set_I_new) - + if number_systems == 0: - - print('This MVES has no systems under consideration.') - + print("This MVES has no systems under consideration.") + else: - if number_systems == 1: - - print('This MVES considers a single system.') - - print('System: '+str(obj.set_I)) - + print("This MVES considers a single system.") + + print("System: " + str(obj.set_I)) + else: - - print('This MVES considers '+str(number_systems)+ ' systems.') - - print('Systems: '+str(obj.set_I)) - + print("This MVES considers " + str(number_systems) + " systems.") + + print("Systems: " + str(obj.set_I)) + if number_optional_systems == 1: - if number_systems == 1: - - print('The only system under consideration is also optional.') - + print("The only system under consideration is also optional.") + else: - - print('One of the aforementioned systems is optional.') - - print('Optional system: '+str(obj.set_I_new)) - + print("One of the aforementioned systems is optional.") + + print("Optional system: " + str(obj.set_I_new)) + else: - - print('Among these systems, '+str(number_optional_systems)+ - ' are optional.') - - print('Optional systems: '+str(obj.set_I_new)) - + print( + "Among these systems, " + + str(number_optional_systems) + + " are optional." + ) + + print("Optional systems: " + str(obj.set_I_new)) + # how many have dimensionable state amplitudes - + # how many have dimensionable upper (positive) state limits - + # how many have dimensionable lower (negative) state limits - + # how many have dimensionable input signals - - - - print('******************************************************************') - print('******************************************************************') - + + print("******************************************************************") + print("******************************************************************") + + # ***************************************************************************** # ***************************************************************************** # data structure - + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** - -def plot_networks(ipp: InfrastructurePlanningProblem, - ax = None, - show = False, - filepath: str = None, - filename_radical: str = None): - + + +def plot_networks( + ipp: InfrastructurePlanningProblem, + ax=None, + show=False, + filepath: str = None, + filename_radical: str = None, +): # TODO: include hints about operational performance in the plot - - # TODO: find a way to plot parallel arcs without overlap - + + # TODO: find a way to plot parallel arcs without overlap + # NOTE: the code below works (could be used to submit a PR) - + # G=nx.MultiGraph ([(1,2),(1,2),(1,2),(3,1),(3,2)]) # pos = nx.random_layout(G) # nx.draw_networkx_nodes(G, pos, node_color = 'r', node_size = 100, alpha = 1) @@ -714,7 +746,7 @@ def plot_networks(ipp: InfrastructurePlanningProblem, # ) # plt.axis('off') # plt.show() - + # G=nx.MultiDiGraph([(1,2,0),(1,2,1),(1,2,2),(3,1,0),(3,2,0)]) # pos = nx.random_layout(G) # nx.draw_networkx_nodes(G, pos, node_color = 'r', node_size = 100, alpha = 1) @@ -732,181 +764,190 @@ def plot_networks(ipp: InfrastructurePlanningProblem, # ) # plt.axis('off') # plt.show() - + # ************************************************************************* # ************************************************************************* - + plt.rcParams["figure.figsize"] = [7.50, 3.50] plt.rcParams["figure.autolayout"] = True - + # ************************************************************************* # ************************************************************************* - - node_shapes = ['o','s','d'] - + + node_shapes = ["o", "s", "d"] + seed_number = 381 - + node_size_offset = 300 - + node_size_gain = 100 - + arc_width_gain = 5 - + # for each network - + for grid_key, grid in ipp.networks.items(): - # create a figure - + fig, ax = plt.subplots() - + # create a plot showing which nodes are connected by which arcs - + # get the position of each node - + # pos = { # node_key:grid.nodes[node_key][Network.STR_DATA_NODE].position # for node_key in grid.nodes()} - + pos = nx.spring_layout(grid, seed=seed_number) - + # peak static balance - + peak_base_flow = { node_key: sum( grid.nodes[node_key][Network.KEY_NODE_BASE_FLOW][sb_key] for sb_key, sb in grid.nodes[node_key][ - Network.KEY_NODE_BASE_FLOW].items() - ) if node_key in grid.source_sink_nodes else 0 + Network.KEY_NODE_BASE_FLOW + ].items() + ) + if node_key in grid.source_sink_nodes + else 0 for node_key in grid.nodes() - } - + } + # max supply/demand rating - - max_node_demand = max([peak_base_flow[node_key] - for node_key in grid.nodes()]) - + + max_node_demand = max([peak_base_flow[node_key] for node_key in grid.nodes()]) + # max_node_demand = max([ # sum([abs(sb_glk) # for sb_glk in grid.nodes[node_key][ # Network.KEY_NODE_BASE_FLOW]]) # for node_key in grid.nodes() # ]) - + # select the node sizes - + node_sizes = { - node_key: - node_size_offset+ # offset - node_size_gain* # gain - peak_base_flow[node_key]/max_node_demand + node_key: node_size_offset + + node_size_gain # offset + * peak_base_flow[node_key] # gain + / max_node_demand for node_key in grid.nodes() - } - + } + # select the relevant edges - + selected_edges = [ - arc_key # grid.edges[arc_key] + arc_key # grid.edges[arc_key] for arc_key in grid.edges(keys=True) - if True in grid.edges[arc_key][ - Network.KEY_ARC_TECH].options_selected - ] - + if True in grid.edges[arc_key][Network.KEY_ARC_TECH].options_selected + ] + # select the edge width based on the arc capacity - + edge_widths = [ - arc_width_gain* - grid.edges[arc_key][ - Network.KEY_ARC_TECH].capacity[option_index]/ - max(grid.edges[arc_key][Network.KEY_ARC_TECH].capacity) + arc_width_gain + * grid.edges[arc_key][Network.KEY_ARC_TECH].capacity[option_index] + / max(grid.edges[arc_key][Network.KEY_ARC_TECH].capacity) for arc_key in grid.edges(keys=True) for option_index, arc_tech_option in enumerate( - grid.edges[arc_key][ - Network.KEY_ARC_TECH].options_selected) + grid.edges[arc_key][Network.KEY_ARC_TECH].options_selected + ) if arc_tech_option - ] - + ] + if len(selected_edges) != len(edge_widths): - raise Exception - + pass - + M = len(selected_edges) edge_colors = range(2, M + 2) edge_alphas = [(5 + i) / (M + 4) for i in range(M)] cmap = plt.cm.plasma - + # ********************************************************************* - + # plot import nodes - + if len(grid.import_nodes) != 0: - - nodes = nx.draw_networkx_nodes(grid, - pos, - nodelist=grid.import_nodes, - node_shape='s', - node_size=[ - node_size - for node_key, node_size in node_sizes.items() - if node_key in grid.import_nodes], - node_color="indigo", - label='Import node') - + nodes = nx.draw_networkx_nodes( + grid, + pos, + nodelist=grid.import_nodes, + node_shape="s", + node_size=[ + node_size + for node_key, node_size in node_sizes.items() + if node_key in grid.import_nodes + ], + node_color="indigo", + label="Import node", + ) + # plot export nodes - + if len(grid.export_nodes) != 0: - - nodes = nx.draw_networkx_nodes(grid, - pos, - nodelist=grid.export_nodes, - node_shape='d', - node_size=[ - node_size - for node_key, node_size in node_sizes.items() - if node_key in grid.export_nodes], - node_color="orange", - label='Export node') - + nodes = nx.draw_networkx_nodes( + grid, + pos, + nodelist=grid.export_nodes, + node_shape="d", + node_size=[ + node_size + for node_key, node_size in node_sizes.items() + if node_key in grid.export_nodes + ], + node_color="orange", + label="Export node", + ) + # plot other/normal nodes - + other_nodes = list(grid) - + for node in grid.import_nodes: - other_nodes.remove(node) - + for node in grid.export_nodes: - other_nodes.remove(node) - - nodes = nx.draw_networkx_nodes(grid, - pos, - nodelist=other_nodes, - node_shape='o', - node_size=[ - node_size - for node_key, node_size in node_sizes.items() - if node_key in other_nodes], - node_color="thistle", - label='Normal node') - + + nodes = nx.draw_networkx_nodes( + grid, + pos, + nodelist=other_nodes, + node_shape="o", + node_size=[ + node_size + for node_key, node_size in node_sizes.items() + if node_key in other_nodes + ], + node_color="thistle", + label="Normal node", + ) + # multi-edges - + ax = plt.gca() for e in selected_edges: - ax.annotate("", - xy=pos[e[1]], xycoords='data', - xytext=pos[e[0]], textcoords='data', - arrowprops=dict(arrowstyle="->", color="0.5", - shrinkA=5, shrinkB=5, - patchA=None, patchB=None, - connectionstyle="arc3,rad=rrr".replace('rrr',str(0.3*e[2]) - ), - ), - ) - + ax.annotate( + "", + xy=pos[e[1]], + xycoords="data", + xytext=pos[e[0]], + textcoords="data", + arrowprops=dict( + arrowstyle="->", + color="0.5", + shrinkA=5, + shrinkB=5, + patchA=None, + patchB=None, + connectionstyle="arc3,rad=rrr".replace("rrr", str(0.3 * e[2])), + ), + ) + # edges = nx.draw_networkx_edges( # grid, # pos, @@ -921,195 +962,228 @@ def plot_networks(ipp: InfrastructurePlanningProblem, # width=edge_widths, # #connectionstyle='arc3, rad = 0.1' # ) - + labels = nx.draw_networkx_labels(grid, pos) - + # # set alpha value for each edge # for i in range(M): # edges[i].set_alpha(edge_alphas[i]) - + # pc = mpl.collections.PatchCollection(edges, cmap=cmap) # pc.set_array(edge_colors) # plt.colorbar(pc,ax=ax) - + ax = plt.gca() ax.set_axis_off() plt.legend() - + if filepath is not None and filename_radical is not None: - # save file - - plt.savefig(filepath+filename_radical+str(grid_key)) - + + plt.savefig(filepath + filename_radical + str(grid_key)) + if show: - plt.show() - - # G = nx.balanced_tree(3,2,create_using=nx.MultiDiGraph) + + # G = nx.balanced_tree(3,2,create_using=nx.MultiDiGraph) # G.add_edge(1,3) # G.add_edge(2,4) # G.add_edge(5,1) # G.add_edge(1,4,data='savage') - # G.add_edge(1,4,data='traszq') + # G.add_edge(1,4,data='traszq') # fig, ax = plt.subplots(figsize=(25,25)) # pos = nx.spring_layout(G, seed=225) # Seed for reproducible layout # nx.draw(G, pos) # plt.show() - + # ************************************************************************* # ************************************************************************* - + + # ***************************************************************************** # ***************************************************************************** + def is_integer(variable: float, integrality_tolerance: float) -> bool: """Returns True if a given number qualifies as an integer.""" if integrality_tolerance >= 0.5: raise ValueError( - 'A tolerance greater than or equal to 0.5 '+ - 'renders the concept of integrality useless.' - ) - return not (abs(round(variable)-variable) > integrality_tolerance) - + "A tolerance greater than or equal to 0.5 " + + "renders the concept of integrality useless." + ) + return not (abs(round(variable) - variable) > integrality_tolerance) + + # ***************************************************************************** # ***************************************************************************** - + + def describe_solution(ipp: InfrastructurePlanningProblem): - # ************************************************************************* - - print('******************************************************************') - + + print("******************************************************************") + # for each grid - + for grid_key, net in ipp.networks.items(): - # describe the path from import nodes to demand nodes - - print('Flow path analysis: grid '+str(grid_key)) - + + print("Flow path analysis: grid " + str(grid_key)) + # for each node - + for node_key in net.nodes: - # as long as it is an import node - + if node_key not in net.import_nodes: - continue - + # for every node - + for node2_key in net.nodes: - # except node_key or other import nodes - - if (node_key is node2_key or - node2_key in net.import_nodes): - + + if node_key is node2_key or node2_key in net.import_nodes: continue - + # or if there is no path - - if nx.has_path(net, - node_key, - node2_key) == False: - + + if nx.has_path(net, node_key, node2_key) == False: continue - + # for each viable/potential path - - for path in nx.all_simple_paths(net, - node_key, - node2_key): - + + for path in nx.all_simple_paths(net, node_key, node2_key): # check if all the pairs of nodes on the path were selected - + # if multiple technologies were selected, add the capacities - + arc_flow_capacities = [ - sum(net.edges[ - (path[node_pair],path[node_pair+1],j)][ - Network.KEY_ARC_TECH].capacity[ - net.edges[ - (path[node_pair],path[node_pair+1],j)][ - Network.KEY_ARC_TECH].options_selected.index(True)] - for j in net._adj[ - path[node_pair]][path[node_pair+1]] - if True in net.edges[(path[node_pair],path[node_pair+1],j)][ - Network.KEY_ARC_TECH].options_selected) - for node_pair in range(len(path)-1) - if (path[node_pair],path[node_pair+1]) in net.edges - ] - + sum( + net.edges[(path[node_pair], path[node_pair + 1], j)][ + Network.KEY_ARC_TECH + ].capacity[ + net.edges[(path[node_pair], path[node_pair + 1], j)][ + Network.KEY_ARC_TECH + ].options_selected.index(True) + ] + for j in net._adj[path[node_pair]][path[node_pair + 1]] + if True + in net.edges[(path[node_pair], path[node_pair + 1], j)][ + Network.KEY_ARC_TECH + ].options_selected + ) + for node_pair in range(len(path) - 1) + if (path[node_pair], path[node_pair + 1]) in net.edges + ] + # skip if at least one arc has zero capacity - + if 0 in arc_flow_capacities: - continue - + arc_tech_efficiencies = [ - (min(net.edges[(path[node_pair],path[node_pair+1],uv_k)][Network.KEY_ARC_TECH].efficiency[(0,k)] - for uv_k in net._adj[ - path[node_pair]][path[node_pair+1]] - if True in net.edges[(path[node_pair],path[node_pair+1],uv_k)][Network.KEY_ARC_TECH].options_selected - for k in range(len(net.edges[(path[node_pair],path[node_pair+1],uv_k)][Network.KEY_ARC_TECH].efficiency))), - max(net.edges[(path[node_pair],path[node_pair+1],uv_k)][Network.KEY_ARC_TECH].efficiency[(0,k)] - for uv_k in net._adj[ - path[node_pair]][path[node_pair+1]] - if True in net.edges[(path[node_pair],path[node_pair+1],uv_k)][Network.KEY_ARC_TECH].options_selected - for k in range(len(net.edges[(path[node_pair],path[node_pair+1],uv_k)][Network.KEY_ARC_TECH].efficiency)))) - for node_pair in range(len(path)-1) - if (path[node_pair],path[node_pair+1]) in net.edges - ] - + ( + min( + net.edges[(path[node_pair], path[node_pair + 1], uv_k)][ + Network.KEY_ARC_TECH + ].efficiency[(0, k)] + for uv_k in net._adj[path[node_pair]][ + path[node_pair + 1] + ] + if True + in net.edges[ + (path[node_pair], path[node_pair + 1], uv_k) + ][Network.KEY_ARC_TECH].options_selected + for k in range( + len( + net.edges[ + (path[node_pair], path[node_pair + 1], uv_k) + ][Network.KEY_ARC_TECH].efficiency + ) + ) + ), + max( + net.edges[(path[node_pair], path[node_pair + 1], uv_k)][ + Network.KEY_ARC_TECH + ].efficiency[(0, k)] + for uv_k in net._adj[path[node_pair]][ + path[node_pair + 1] + ] + if True + in net.edges[ + (path[node_pair], path[node_pair + 1], uv_k) + ][Network.KEY_ARC_TECH].options_selected + for k in range( + len( + net.edges[ + (path[node_pair], path[node_pair + 1], uv_k) + ][Network.KEY_ARC_TECH].efficiency + ) + ) + ), + ) + for node_pair in range(len(path) - 1) + if (path[node_pair], path[node_pair + 1]) in net.edges + ] + max_static_flow = [ - max([net.nodes[node][ - Network.KEY_NODE_BASE_FLOW][(0,k)] - for k in range(len(ipp.networks[ - grid_key].nodes[node][ - Network.KEY_NODE_BASE_FLOW])) - ]) + max( + [ + net.nodes[node][Network.KEY_NODE_BASE_FLOW][(0, k)] + for k in range( + len( + ipp.networks[grid_key].nodes[node][ + Network.KEY_NODE_BASE_FLOW + ] + ) + ) + ] + ) if node in net.source_sink_nodes else 0 for node in path if node in net.nodes - ] - + ] + min_static_flow = [ - min([net.nodes[node][ - Network.KEY_NODE_BASE_FLOW][(0,k)] - for k in range(len(ipp.networks[ - grid_key].nodes[node][ - Network.KEY_NODE_BASE_FLOW])) - ]) + min( + [ + net.nodes[node][Network.KEY_NODE_BASE_FLOW][(0, k)] + for k in range( + len( + ipp.networks[grid_key].nodes[node][ + Network.KEY_NODE_BASE_FLOW + ] + ) + ) + ] + ) if node in net.source_sink_nodes else 0 for node in path if node in net.nodes - ] - + ] + # for each pair of nodes on the path - - if len(arc_flow_capacities) == len(path)-1: - - print('**********************************************') - print('Path: '+str(path)) - print('Max. static flow: '+str(max_static_flow)) - print('Min. static flow: '+str(min_static_flow)) - print('Capacities: '+str(arc_flow_capacities)) - print('Efficiencies: '+str(arc_tech_efficiencies)) - - for arc_flow_index in range(len(arc_flow_capacities)-1): - - if (arc_flow_capacities[arc_flow_index] < - arc_flow_capacities[arc_flow_index+1]): - + + if len(arc_flow_capacities) == len(path) - 1: + print("**********************************************") + print("Path: " + str(path)) + print("Max. static flow: " + str(max_static_flow)) + print("Min. static flow: " + str(min_static_flow)) + print("Capacities: " + str(arc_flow_capacities)) + print("Efficiencies: " + str(arc_tech_efficiencies)) + + for arc_flow_index in range(len(arc_flow_capacities) - 1): + if ( + arc_flow_capacities[arc_flow_index] + < arc_flow_capacities[arc_flow_index + 1] + ): # the flow capacities are increasing, which # usually indicates suboptimality - + # tech_options_first = [ # tech[Network.KEY_ARC_TECH_CAPACITY] # for tech in ipp.networks[ @@ -1118,7 +1192,7 @@ def describe_solution(ipp: InfrastructurePlanningProblem): # path[arc_flow_index+1])][ # net.KEY_ARC_TECH] # if True in tech.options_selected] - + # tech_options_sec = [ # tech[net.KEY_ARC_TECH_CAPACITY] # for tech in ipp.networks[ @@ -1127,157 +1201,196 @@ def describe_solution(ipp: InfrastructurePlanningProblem): # path[arc_flow_index+2])][ # net.KEY_ARC_TECH] # if True in tech.options_selected] - - #print('******************') - print('Increasing capacities along the flow path have been detected between nodes ' - +str(path[arc_flow_index]) - +' and ' - +str(path[arc_flow_index+2])+'.') - #print(tech_options_first) - #print(tech_options_sec) - #print('******************') - + + # print('******************') + print( + "Increasing capacities along the flow path have been detected between nodes " + + str(path[arc_flow_index]) + + " and " + + str(path[arc_flow_index + 2]) + + "." + ) + # print(tech_options_first) + # print(tech_options_sec) + # print('******************') + # ***************************************************************** - + # ********************************************************************* - + # for each node - + for node_key in net.nodes: - # as long as it is an export node - + if node_key not in net.export_nodes: - continue - + # for every node - + for node2_key in net.nodes: - # except node_key or other export nodes - - if (node_key is node2_key or - node2_key in net.export_nodes): - + + if node_key is node2_key or node2_key in net.export_nodes: continue - + # or if there is no path - - if nx.has_path(net, - node2_key, - node_key) == False: - + + if nx.has_path(net, node2_key, node_key) == False: continue - + # for each viable/potential path - - for path in nx.all_simple_paths(net, - node2_key, - node_key): - + + for path in nx.all_simple_paths(net, node2_key, node_key): # check if all the pairs of nodes on the path were selected - + # if multiple technologies were selected, add the capacities - + arc_flow_capacities = [ - sum(net.edges[ - (path[node_pair],path[node_pair+1],k)][ - Network.KEY_ARC_TECH].capacity[ - net.edges[ - (path[node_pair],path[node_pair+1],k)][ - Network.KEY_ARC_TECH].options_selected.index(True)] - for k in net._adj[ - path[node_pair]][path[node_pair+1]] - if True in net.edges[ - (path[node_pair],path[node_pair+1],k)][ - Network.KEY_ARC_TECH].options_selected) - for node_pair in range(len(path)-1) - if (path[node_pair],path[node_pair+1]) in net.edges - ] - + sum( + net.edges[(path[node_pair], path[node_pair + 1], k)][ + Network.KEY_ARC_TECH + ].capacity[ + net.edges[(path[node_pair], path[node_pair + 1], k)][ + Network.KEY_ARC_TECH + ].options_selected.index(True) + ] + for k in net._adj[path[node_pair]][path[node_pair + 1]] + if True + in net.edges[(path[node_pair], path[node_pair + 1], k)][ + Network.KEY_ARC_TECH + ].options_selected + ) + for node_pair in range(len(path) - 1) + if (path[node_pair], path[node_pair + 1]) in net.edges + ] + # skip if at least one arc has zero capacity - + if 0 in arc_flow_capacities: - continue - + arc_tech_efficiencies = [ - (min(net.edges[(path[node_pair],path[node_pair+1],uv_k)][Network.KEY_ARC_TECH].efficiency[(0,k)] - for uv_k in net._adj[ - path[node_pair]][path[node_pair+1]] - if True in net.edges[(path[node_pair],path[node_pair+1],uv_k)][Network.KEY_ARC_TECH].options_selected - for k in range(len(net.edges[(path[node_pair],path[node_pair+1],uv_k)][Network.KEY_ARC_TECH].efficiency))), - max(net.edges[(path[node_pair],path[node_pair+1],uv_k)][Network.KEY_ARC_TECH].efficiency[(0,k)] - for uv_k in net._adj[ - path[node_pair]][path[node_pair+1]] - if True in net.edges[(path[node_pair],path[node_pair+1],uv_k)][Network.KEY_ARC_TECH].options_selected - for k in range(len(net.edges[(path[node_pair],path[node_pair+1],uv_k)][Network.KEY_ARC_TECH].efficiency)))) - for node_pair in range(len(path)-1) - if (path[node_pair],path[node_pair+1]) in net.edges - ] - + ( + min( + net.edges[(path[node_pair], path[node_pair + 1], uv_k)][ + Network.KEY_ARC_TECH + ].efficiency[(0, k)] + for uv_k in net._adj[path[node_pair]][ + path[node_pair + 1] + ] + if True + in net.edges[ + (path[node_pair], path[node_pair + 1], uv_k) + ][Network.KEY_ARC_TECH].options_selected + for k in range( + len( + net.edges[ + (path[node_pair], path[node_pair + 1], uv_k) + ][Network.KEY_ARC_TECH].efficiency + ) + ) + ), + max( + net.edges[(path[node_pair], path[node_pair + 1], uv_k)][ + Network.KEY_ARC_TECH + ].efficiency[(0, k)] + for uv_k in net._adj[path[node_pair]][ + path[node_pair + 1] + ] + if True + in net.edges[ + (path[node_pair], path[node_pair + 1], uv_k) + ][Network.KEY_ARC_TECH].options_selected + for k in range( + len( + net.edges[ + (path[node_pair], path[node_pair + 1], uv_k) + ][Network.KEY_ARC_TECH].efficiency + ) + ) + ), + ) + for node_pair in range(len(path) - 1) + if (path[node_pair], path[node_pair + 1]) in net.edges + ] + max_static_flow = [ - max([net.nodes[node][ - Network.KEY_NODE_BASE_FLOW][(0,k)] - for k in range(len(ipp.networks[ - grid_key].nodes[node][ - Network.KEY_NODE_BASE_FLOW])) - ]) + max( + [ + net.nodes[node][Network.KEY_NODE_BASE_FLOW][(0, k)] + for k in range( + len( + ipp.networks[grid_key].nodes[node][ + Network.KEY_NODE_BASE_FLOW + ] + ) + ) + ] + ) if node in net.source_sink_nodes else 0 for node in path if node in net.nodes - ] - + ] + min_static_flow = [ - min([net.nodes[node][ - Network.KEY_NODE_BASE_FLOW][(0,k)] - for k in range(len(ipp.networks[ - grid_key].nodes[node][ - Network.KEY_NODE_BASE_FLOW])) - ]) + min( + [ + net.nodes[node][Network.KEY_NODE_BASE_FLOW][(0, k)] + for k in range( + len( + ipp.networks[grid_key].nodes[node][ + Network.KEY_NODE_BASE_FLOW + ] + ) + ) + ] + ) if node in net.source_sink_nodes else 0 for node in path if node in net.nodes - ] - + ] + # for each pair of nodes on the path - - if len(arc_flow_capacities) == len(path)-1: - - print('**********************************************') - print('Path: '+str(path)) - print('Max. static flow: '+str(max_static_flow)) - print('Min. static flow: '+str(min_static_flow)) - print('Capacities: '+str(arc_flow_capacities)) - print('Efficiencies: '+str(arc_tech_efficiencies)) - - for arc_flow_index in range(len(arc_flow_capacities)-1): - - if (arc_flow_capacities[arc_flow_index] < - arc_flow_capacities[arc_flow_index+1]): - + + if len(arc_flow_capacities) == len(path) - 1: + print("**********************************************") + print("Path: " + str(path)) + print("Max. static flow: " + str(max_static_flow)) + print("Min. static flow: " + str(min_static_flow)) + print("Capacities: " + str(arc_flow_capacities)) + print("Efficiencies: " + str(arc_tech_efficiencies)) + + for arc_flow_index in range(len(arc_flow_capacities) - 1): + if ( + arc_flow_capacities[arc_flow_index] + < arc_flow_capacities[arc_flow_index + 1] + ): # the flow capacities are increasing, which # usually indicates suboptimality - - #print('******************') - print('Increasing capacities along the flow path have been detected between nodes ' - +str(path[arc_flow_index]) - +' and ' - +str(path[arc_flow_index+2])+'.') - #print(tech_options_first) - #print(tech_options_sec) - #print('******************') - + + # print('******************') + print( + "Increasing capacities along the flow path have been detected between nodes " + + str(path[arc_flow_index]) + + " and " + + str(path[arc_flow_index + 2]) + + "." + ) + # print(tech_options_first) + # print(tech_options_sec) + # print('******************') + # ***************************************************************** - + # ********************************************************************* - - print('******************************************************************') - + + print("******************************************************************") + # ************************************************************************* + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/src/topupopt/solvers/__init__.py b/src/topupopt/solvers/__init__.py index e7ce4a7..80cf2ba 100644 --- a/src/topupopt/solvers/__init__.py +++ b/src/topupopt/solvers/__init__.py @@ -1,2 +1,2 @@ # -*- coding: utf-8 -*- -#from . import mvesipp \ No newline at end of file +# from . import mvesipp diff --git a/src/topupopt/solvers/interface.py b/src/topupopt/solvers/interface.py index 279c858..e6d68a4 100644 --- a/src/topupopt/solvers/interface.py +++ b/src/topupopt/solvers/interface.py @@ -31,134 +31,134 @@ from pyomo.opt.results.solver import check_optimal_termination # TODO: create a way to identify the solver status independent of each solver + class SolverInterface(object): "A class for interfacing with solvers through Pyomo." - + # ************************************************************************* # ************************************************************************* - SOLVER_GLPK = 'glpk' - - SOLVER_CBC = 'cbc' - - SOLVER_GUROBI = 'gurobi' - - SOLVER_SCIP = 'scip' - - SOLVER_CPLEX = 'cplex' - - SOLVER_CPLEX_DIRECT = 'cplex_direct' - - SOLVERS = [SOLVER_CBC, - SOLVER_GLPK, - SOLVER_SCIP, - SOLVER_GUROBI, - SOLVER_CPLEX, - SOLVER_CPLEX_DIRECT] - + SOLVER_GLPK = "glpk" + + SOLVER_CBC = "cbc" + + SOLVER_GUROBI = "gurobi" + + SOLVER_SCIP = "scip" + + SOLVER_CPLEX = "cplex" + + SOLVER_CPLEX_DIRECT = "cplex_direct" + + SOLVERS = [ + SOLVER_CBC, + SOLVER_GLPK, + SOLVER_SCIP, + SOLVER_GUROBI, + SOLVER_CPLEX, + SOLVER_CPLEX_DIRECT, + ] + # ************************************************************************* # ************************************************************************* - + # topupopt options - - STR_DET_TIME_LIMIT = 'det_time_limit' - STR_TIME_LIMIT = 'time_limit' - STR_ABS_MIP_GAP = 'absolute_mip_gap' - STR_REL_MIP_GAP = 'relative_mip_gap' - + + STR_DET_TIME_LIMIT = "det_time_limit" + STR_TIME_LIMIT = "time_limit" + STR_ABS_MIP_GAP = "absolute_mip_gap" + STR_REL_MIP_GAP = "relative_mip_gap" + # option keywords for specific solvers - - OPTIONS_GLPK = {STR_TIME_LIMIT: 'tmlim', - STR_REL_MIP_GAP: 'mipgap'} - - OPTIONS_CBC = {STR_TIME_LIMIT: 'seconds', - STR_REL_MIP_GAP: 'ratioGap'} - - OPTIONS_SCIP = {STR_TIME_LIMIT: 'limits/time', - STR_ABS_MIP_GAP: 'limits/absgap', - STR_REL_MIP_GAP: 'limits/gap'} - + + OPTIONS_GLPK = {STR_TIME_LIMIT: "tmlim", STR_REL_MIP_GAP: "mipgap"} + + OPTIONS_CBC = {STR_TIME_LIMIT: "seconds", STR_REL_MIP_GAP: "ratioGap"} + + OPTIONS_SCIP = { + STR_TIME_LIMIT: "limits/time", + STR_ABS_MIP_GAP: "limits/absgap", + STR_REL_MIP_GAP: "limits/gap", + } + # source: https://www.gurobi.com/documentation/9.5/refman/parameters.html - - OPTIONS_GUROBI = {STR_TIME_LIMIT: 'TimeLimit', - STR_ABS_MIP_GAP: 'MIPGapAbs', - STR_REL_MIP_GAP: 'MIPGap'} - + + OPTIONS_GUROBI = { + STR_TIME_LIMIT: "TimeLimit", + STR_ABS_MIP_GAP: "MIPGapAbs", + STR_REL_MIP_GAP: "MIPGap", + } + # souce: https://www.ibm.com/docs/en/icos/12.8.0.0?topic=cplex-parameters - - OPTIONS_CPLEX = {STR_DET_TIME_LIMIT: 'dettimelimit', - STR_TIME_LIMIT: 'timelimit', - # STR_ABS_MIP_GAP: 'absmipgap', - STR_ABS_MIP_GAP: 'mip_tolerances_absmipgap', - STR_REL_MIP_GAP: 'mipgap'} - + + OPTIONS_CPLEX = { + STR_DET_TIME_LIMIT: "dettimelimit", + STR_TIME_LIMIT: "timelimit", + # STR_ABS_MIP_GAP: 'absmipgap', + STR_ABS_MIP_GAP: "mip_tolerances_absmipgap", + STR_REL_MIP_GAP: "mipgap", + } + # static dict with solver options for each supported solver - - OPTIONS_SOLVER = {SOLVER_GLPK: OPTIONS_GLPK, - SOLVER_CBC: OPTIONS_CBC, - SOLVER_SCIP: OPTIONS_SCIP, - SOLVER_GUROBI: OPTIONS_GUROBI, - SOLVER_CPLEX: OPTIONS_CPLEX, - SOLVER_CPLEX_DIRECT: OPTIONS_CPLEX} - - PYOMO_OPTIONS = ['executable','warmstart','solver_io','tee'] - + + OPTIONS_SOLVER = { + SOLVER_GLPK: OPTIONS_GLPK, + SOLVER_CBC: OPTIONS_CBC, + SOLVER_SCIP: OPTIONS_SCIP, + SOLVER_GUROBI: OPTIONS_GUROBI, + SOLVER_CPLEX: OPTIONS_CPLEX, + SOLVER_CPLEX_DIRECT: OPTIONS_CPLEX, + } + + PYOMO_OPTIONS = ["executable", "warmstart", "solver_io", "tee"] + # ************************************************************************* # ************************************************************************* - - PROBLEM_LP = 'lp' - PROBLEM_MILP = 'milp' - PROBLEM_QP = 'qp' - PROBLEM_MIQP = 'miqp' - PROBLEM_QCP = 'qcp' - PROBLEM_MIQCP = 'miqcp' - + + PROBLEM_LP = "lp" + PROBLEM_MILP = "milp" + PROBLEM_QP = "qp" + PROBLEM_MIQP = "miqp" + PROBLEM_QCP = "qcp" + PROBLEM_MIQCP = "miqcp" + PROBLEM_TYPES = [ PROBLEM_LP, PROBLEM_MILP, PROBLEM_QP, PROBLEM_MIQP, PROBLEM_QCP, - PROBLEM_MIQCP - ] - + PROBLEM_MIQCP, + ] + # https://scipopt.org/doc/html/WHATPROBLEMS.php - - PROBLEMS_COMPATIBLE_SCIP = [ - PROBLEM_LP, - PROBLEM_MILP - ] - - PROBLEMS_COMPATIBLE_GLPK = [ - PROBLEM_LP, - PROBLEM_MILP - ] - - PROBLEMS_COMPATIBLE_CBC = [ - PROBLEM_LP, - PROBLEM_MILP - ] - + + PROBLEMS_COMPATIBLE_SCIP = [PROBLEM_LP, PROBLEM_MILP] + + PROBLEMS_COMPATIBLE_GLPK = [PROBLEM_LP, PROBLEM_MILP] + + PROBLEMS_COMPATIBLE_CBC = [PROBLEM_LP, PROBLEM_MILP] + # https://www.gurobi.com/products/gurobi-optimizer/ - + PROBLEMS_COMPATIBLE_GUROBI = [ PROBLEM_LP, PROBLEM_MILP, PROBLEM_QP, PROBLEM_MIQP, PROBLEM_QCP, - PROBLEM_MIQCP - ] - + PROBLEM_MIQCP, + ] + PROBLEMS_COMPATIBLE_CPLEX = [ PROBLEM_LP, PROBLEM_MILP, PROBLEM_QP, PROBLEM_MIQP, PROBLEM_QCP, - PROBLEM_MIQCP - ] - + PROBLEM_MIQCP, + ] + PROBLEMS_COMPATIBLE_SOLVER = { SOLVER_GLPK: PROBLEMS_COMPATIBLE_GLPK, SOLVER_SCIP: PROBLEMS_COMPATIBLE_SCIP, @@ -166,23 +166,23 @@ class SolverInterface(object): SOLVER_GUROBI: PROBLEMS_COMPATIBLE_GUROBI, SOLVER_CPLEX: PROBLEMS_COMPATIBLE_CPLEX, SOLVER_CPLEX_DIRECT: PROBLEMS_COMPATIBLE_CPLEX, - } - + } + # ************************************************************************* # ************************************************************************* - + # USER -->> SOLVER_INTERFACE -->> PYOMO --> SOLVER - + # the optimisation status depends on the solver status and the term. criteria - + SOLVER_STATUSES = [ SolverStatus.aborted, SolverStatus.error, SolverStatus.ok, SolverStatus.unknown, - SolverStatus.warning - ] - + SolverStatus.warning, + ] + TERMINATION_CRITERIA = [ # UNKNOWN TerminationCondition.unknown, @@ -211,236 +211,210 @@ class SolverInterface(object): # ABORTED TerminationCondition.userInterrupt, TerminationCondition.resourceInterrupt, - TerminationCondition.licensingProblems - ] - + TerminationCondition.licensingProblems, + ] + # ************************************************************************* # ************************************************************************* - - def __init__(self, - solver_name: str, - time_limit: int = None, - relative_mip_gap: float = None, - absolute_mip_gap: float = None, - **kwargs): - + + def __init__( + self, + solver_name: str, + time_limit: int = None, + relative_mip_gap: float = None, + absolute_mip_gap: float = None, + **kwargs + ): # check if the solver is supported - + if solver_name not in self.SOLVERS: - - raise self.UnknownSolverError(solver_name) - + raise self.UnknownSolverError(solver_name) + # init - + self.solver_name = solver_name - + self.time_limit = time_limit - + self.relative_mip_gap = relative_mip_gap - + self.absolute_mip_gap = absolute_mip_gap - + # create SolverFactory - + self.obj = SolverFactory(solver_name, **kwargs) - + # ************************************************************************* # ************************************************************************* - + def get_solver_handler(self, **kwargs): - # find - + options_dict_format = { self.OPTIONS_SOLVER[self.solver_name][key]: value for key, value in kwargs.items() - if key in self.OPTIONS_SOLVER[self.solver_name]} - + if key in self.OPTIONS_SOLVER[self.solver_name] + } + # parameters - + options_param_format = { - key: value - for key, value in kwargs.items() - if key in self.PYOMO_OPTIONS} - + key: value for key, value in kwargs.items() if key in self.PYOMO_OPTIONS + } + if len(options_param_format) == 0: - return self.get_pyomo_solver_object( - self.solver_name, - options_dict_format=options_dict_format) - + self.solver_name, options_dict_format=options_dict_format + ) + else: - return self.get_pyomo_solver_object( - self.solver_name, + self.solver_name, options_dict_format=options_dict_format, - options_param_format=options_param_format) - + options_param_format=options_param_format, + ) + # ************************************************************************* # ************************************************************************* - + @staticmethod - def get_pyomo_solver_object(solver_name: str, - options_dict_format: dict, - options_param_format: dict = None): - + def get_pyomo_solver_object( + solver_name: str, options_dict_format: dict, options_param_format: dict = None + ): # return an SolverFactory object properly configured as per the solver - + if options_param_format is None: - opt = pyo.SolverFactory(solver_name) - + else: - opt = pyo.SolverFactory(solver_name, **options_param_format) - + for key, value in options_dict_format.items(): - opt.options[key] = value - + # return the object - + return opt - + # ************************************************************************* # ************************************************************************* - + @classmethod - def problem_and_solver_are_compatible(cls, - solver_name, - problem_type) -> bool: - + def problem_and_solver_are_compatible(cls, solver_name, problem_type) -> bool: if solver_name not in cls.SOLVERS: - raise cls.UnknownSolverError(solver_name) - + if problem_type not in cls.PROBLEM_TYPES: - raise cls.UnknownProblemTypeError(problem_type) - + if problem_type in cls.PROBLEMS_COMPATIBLE_SOLVER[solver_name]: - return True - + else: - return False # ************************************************************************* # ************************************************************************* - + def was_optimisation_sucessful(self, results, problem_type) -> bool: - solver_status = results.solver.status - + termination_condition = results.solver.termination_condition - + # checks are made within problem_and_solver_are_compatible - - if not self.problem_and_solver_are_compatible(self.solver_name, - problem_type): - + + if not self.problem_and_solver_are_compatible(self.solver_name, problem_type): # if the problem and solver are not compatible, something is up - - raise self.InconsistentProblemTypeAndSolverError(problem_type, - self.solver_name) - + + raise self.InconsistentProblemTypeAndSolverError( + problem_type, self.solver_name + ) + # check the termination condition and the solver status - + if termination_condition not in self.TERMINATION_CRITERIA: - raise self.UnknownTerminationConditionError(termination_condition) - + if solver_status not in self.SOLVER_STATUSES: - raise self.UnknownSolverStatusError(solver_status) - + # ********************************************************************* # ********************************************************************* - + expected_solver_status = TerminationCondition.to_solver_status( - termination_condition) - + termination_condition + ) + if expected_solver_status != solver_status: - raise self.InconsistentSolverStatusError( - solver_status, - expected_solver_status) - + solver_status, expected_solver_status + ) + if check_optimal_termination(results): - return True - + # while not optimal, check if the solution was feasible - - if (expected_solver_status == SolverStatus.ok): - + + if expected_solver_status == SolverStatus.ok: return True - + else: - return False # ************************************************************************* # ************************************************************************* - + class InconsistentProblemTypeAndSolverError(Exception): - def __init__(self, problem_type, solver_name): - super().__init__( - 'The problem type ('+problem_type+ - ') cannot be handled by solver ('+solver_name+').' - ) + "The problem type (" + + problem_type + + ") cannot be handled by solver (" + + solver_name + + ")." + ) # ************************************************************************* # ************************************************************************* class InconsistentSolverStatusError(Exception): - def __init__(self, solver_status, expected_solver_status): - super().__init__( - 'The expected SolverStatus ('+expected_solver_status+ - ') does not match the one obtained ('+solver_status+').' - ) + "The expected SolverStatus (" + + expected_solver_status + + ") does not match the one obtained (" + + solver_status + + ")." + ) # ************************************************************************* # ************************************************************************* - + class UnknownSolverError(Exception): - def __init__(self, solver_name): - - super().__init__('Unknown solver: '+str(solver_name)) + super().__init__("Unknown solver: " + str(solver_name)) # ************************************************************************* # ************************************************************************* - + class UnknownProblemTypeError(Exception): - def __init__(self, problem_type): - - super().__init__('Unknown problem type: '+str(problem_type)) + super().__init__("Unknown problem type: " + str(problem_type)) # ************************************************************************* # ************************************************************************* - + class UnknownTerminationConditionError(Exception): - def __init__(self, termination_condition): - - super().__init__( - 'Unknown termination condition: '+termination_condition) + super().__init__("Unknown termination condition: " + termination_condition) # ************************************************************************* # ************************************************************************* - + class UnknownSolverStatusError(Exception): - def __init__(self, solver_status): - - super().__init__('Unknown solver status: '+solver_status) - + super().__init__("Unknown solver status: " + solver_status) + + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/tests/examples_esipp.py b/tests/examples_esipp.py index 29b9f97..b70f4ff 100644 --- a/tests/examples_esipp.py +++ b/tests/examples_esipp.py @@ -26,44 +26,46 @@ from src.topupopt.problems.esipp.resource import ResourcePrice # TODO: replace this set of examples with more deterministic ones -#****************************************************************************** -#****************************************************************************** - -def examples(solver: str, - solver_options: dict = None, - seed_number: int = None, - init_aux_sets: bool = False): - +# ****************************************************************************** +# ****************************************************************************** + + +def examples( + solver: str, + solver_options: dict = None, + seed_number: int = None, + init_aux_sets: bool = False, +): # test a generic mvesipp problem using the original classes - + # termination criteria - + solver_timelimit = 60 - + solver_abs_mip_gap = 0.001 - + solver_rel_mip_gap = 0.01 if type(solver_options) == dict: - - solver_options.update({ - 'time_limit':solver_timelimit, - 'relative_mip_gap':solver_rel_mip_gap, - 'absolute_mip_gap':solver_abs_mip_gap - }) - + solver_options.update( + { + "time_limit": solver_timelimit, + "relative_mip_gap": solver_rel_mip_gap, + "absolute_mip_gap": solver_abs_mip_gap, + } + ) + else: - solver_options = { - 'time_limit':solver_timelimit, - 'relative_mip_gap':solver_rel_mip_gap, - 'absolute_mip_gap':solver_abs_mip_gap - } - - #************************************************************************** - + "time_limit": solver_timelimit, + "relative_mip_gap": solver_rel_mip_gap, + "absolute_mip_gap": solver_abs_mip_gap, + } + + # ************************************************************************** + # no sos, regular time intervals - + example_generic_problem( solver=solver, solver_options=solver_options, @@ -71,14 +73,14 @@ def examples(solver: str, sos_weight_key=InfrastructurePlanningProblem.SOS1_ARC_WEIGHTS_NONE, seed_number=seed_number, perform_analysis=True, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=False, - init_aux_sets=init_aux_sets - ) - + init_aux_sets=init_aux_sets, + ) + # sos, cost as weight, regular time intervals - + example_generic_problem( solver=solver, solver_options=solver_options, @@ -89,11 +91,11 @@ def examples(solver: str, plot_results=False, print_solver_output=False, irregular_time_intervals=False, - init_aux_sets=init_aux_sets - ) - + init_aux_sets=init_aux_sets, + ) + # sos, capacity as weight, regular time intervals - + example_generic_problem( solver=solver, solver_options=solver_options, @@ -104,11 +106,11 @@ def examples(solver: str, plot_results=False, print_solver_output=False, irregular_time_intervals=False, - init_aux_sets=init_aux_sets - ) - + init_aux_sets=init_aux_sets, + ) + # sos, specific minimum cost as weight, irregular time intervals - + example_generic_problem( solver=solver, solver_options=solver_options, @@ -119,1108 +121,1120 @@ def examples(solver: str, plot_results=False, print_solver_output=False, irregular_time_intervals=True, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** -#****************************************************************************** -#****************************************************************************** - -def example_generic_problem(solver: str = 'glpk', - solver_options: dict = None, - use_sos_arcs: bool = False, - sos_weight_key: str = 'cost', - seed_number: int = None, - perform_analysis: bool = False, - plot_results: bool = False, - print_solver_output: bool = False, - irregular_time_intervals: bool = False, - init_aux_sets: bool = False): - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** +# ****************************************************************************** +# ****************************************************************************** + + +def example_generic_problem( + solver: str = "glpk", + solver_options: dict = None, + use_sos_arcs: bool = False, + sos_weight_key: str = "cost", + seed_number: int = None, + perform_analysis: bool = False, + plot_results: bool = False, + print_solver_output: bool = False, + irregular_time_intervals: bool = False, + init_aux_sets: bool = False, +): number_periods = 2 - + number_intraperiod_time_intervals = 5 - + discount_rates = tuple([0.035 for p in range(number_periods)]) - - planning_horizon = [365*24*3600 for p in range(number_periods)] # intra-period, of course - + + planning_horizon = [ + 365 * 24 * 3600 for p in range(number_periods) + ] # intra-period, of course + if irregular_time_intervals: - # TODO: adjust demand/supply levels - + time_step_max_relative_variation = 0.25 - + intraperiod_time_interval_duration = [ - (planning_horizon[0]/number_intraperiod_time_intervals)* - (1+(k/(number_intraperiod_time_intervals-1)-0.5)* - time_step_max_relative_variation) - for k in range(number_intraperiod_time_intervals)] - + (planning_horizon[0] / number_intraperiod_time_intervals) + * ( + 1 + + (k / (number_intraperiod_time_intervals - 1) - 0.5) + * time_step_max_relative_variation + ) + for k in range(number_intraperiod_time_intervals) + ] + else: - intraperiod_time_interval_duration = [ - planning_horizon[0]/number_intraperiod_time_intervals - for k in range(number_intraperiod_time_intervals)] - + planning_horizon[0] / number_intraperiod_time_intervals + for k in range(number_intraperiod_time_intervals) + ] + # time weights - + # average time interval duration - - average_time_interval_duration = round( - mean( - intraperiod_time_interval_duration - ) - ) - + + average_time_interval_duration = round(mean(intraperiod_time_interval_duration)) + # relative weight of time period - + # one interval twice as long as the average is worth twice # one interval half as long as the average is worth half - + # time_weights = [ - # [time_period_duration/average_time_interval_duration - # for time_period_duration in intraperiod_time_interval_duration] + # [time_period_duration/average_time_interval_duration + # for time_period_duration in intraperiod_time_interval_duration] # for p in range(number_periods)] - + time_weights = None - + # create problem object - + ipp = InfrastructurePlanningProblem( - name='problem', - discount_rates={0: discount_rates}, + name="problem", + discount_rates={0: discount_rates}, reporting_periods={0: tuple(i for i in range(number_periods))}, - time_intervals={ - 0: tuple(dt for dt in intraperiod_time_interval_duration) - }, - time_weights=time_weights - ) - + time_intervals={0: tuple(dt for dt in intraperiod_time_interval_duration)}, + time_weights=time_weights, + ) + # add networks and systems - - ipp = create_generic_networks(ipp, - seed_number) - + + ipp = create_generic_networks(ipp, seed_number) + # set up the use of sos, if necessary - + if use_sos_arcs: - for network_key in ipp.networks: - for arc_key in ipp.networks[network_key].edges(keys=True): - - if ipp.networks[network_key].edges[arc_key][ - Network.KEY_ARC_TECH].has_been_selected(): - + if ( + ipp.networks[network_key] + .edges[arc_key][Network.KEY_ARC_TECH] + .has_been_selected() + ): continue - + ipp.use_sos1_for_arc_selection( - network_key, + network_key, arc_key, use_real_variables_if_possible=False, - sos1_weight_method=sos_weight_key) - + sos1_weight_method=sos_weight_key, + ) + # instantiate - + ipp.instantiate(initialise_ancillary_sets=init_aux_sets) - + # optimise - + if print_solver_output: - ipp.instance.pprint() - - out = ipp.optimise(solver_name=solver, - solver_options=solver_options, - output_options={}, - print_solver_output=print_solver_output) - + + out = ipp.optimise( + solver_name=solver, + solver_options=solver_options, + output_options={}, + print_solver_output=print_solver_output, + ) + if out: + print("The optimisation was successful. Running post-optimisation analysis.") - print('The optimisation was successful. Running post-optimisation analysis.') - # run tests - - utils.run_mvesipp_analysis(ipp, - ipp.instance, - analyse_problem=perform_analysis, - analyse_results=perform_analysis) - + + utils.run_mvesipp_analysis( + ipp, + ipp.instance, + analyse_problem=perform_analysis, + analyse_results=perform_analysis, + ) + else: - - print('The optimisation failed. Skipping results analysis.') - + print("The optimisation failed. Skipping results analysis.") + # run tests - - utils.run_mvesipp_analysis(ipp, - ipp.instance, - analyse_problem=perform_analysis, - analyse_results=False) - - #************************************************************************** - #************************************************************************** - + + utils.run_mvesipp_analysis( + ipp, ipp.instance, analyse_problem=perform_analysis, analyse_results=False + ) + + # ************************************************************************** + # ************************************************************************** + # print results - + if plot_results: - - utils.plot_mves(ipp, - filepath='/another_folder/', - filename_radical='network_') - - #************************************************************************** - #************************************************************************** - + utils.plot_mves(ipp, filepath="/another_folder/", filename_radical="network_") + + # ************************************************************************** + # ************************************************************************** + # return something - + return True - -#****************************************************************************** -#****************************************************************************** - -def generic_problem_get_arc_techs(number_time_intervals, - network_order, - network_name, - arc_tech_efficiencies, - number_arc_technologies, - peak_flow, - n1, - n2, - distance - ): - + + +# ****************************************************************************** +# ****************************************************************************** + + +def generic_problem_get_arc_techs( + number_time_intervals, + network_order, + network_name, + arc_tech_efficiencies, + number_arc_technologies, + peak_flow, + n1, + n2, + distance, +): min_efficiency = min(arc_tech_efficiencies.values()) - + # note: the network order needs to be accurate - + capacity = [ - peak_flow* - (1/(min_efficiency**network_order))* - (k+1)/number_arc_technologies - for k in range(number_arc_technologies)] - + peak_flow + * (1 / (min_efficiency**network_order)) + * (k + 1) + / number_arc_technologies + for k in range(number_arc_technologies) + ] + min_cost = [ - (k+1)*distance*1e3*(1+rand.random()) - for k in range(number_arc_technologies)] - + (k + 1) * distance * 1e3 * (1 + rand.random()) + for k in range(number_arc_technologies) + ] + new_arc_tech = Arcs( - name=( - network_name+ - '_arc_tech_n'+str(n1)+ - '_n'+str(n2)), - efficiency=arc_tech_efficiencies, + name=(network_name + "_arc_tech_n" + str(n1) + "_n" + str(n2)), + efficiency=arc_tech_efficiencies, efficiency_reverse=None, static_loss=None, - capacity=capacity, + capacity=capacity, minimum_cost=min_cost, specific_capacity_cost=0, capacity_is_instantaneous=False, - validate=False) - + validate=False, + ) + # return - + return new_arc_tech -#****************************************************************************** -#****************************************************************************** - -def add_arc_this_way(network, - network_order, - ipp, - order_boost, - network_names, - g, - node_start, - node_end, - arc_number_key, - arc_tech_efficiency, - number_arc_technologies, - peak_flow, - distance_matrix): +# ****************************************************************************** +# ****************************************************************************** + + +def add_arc_this_way( + network, + network_order, + ipp, + order_boost, + network_names, + g, + node_start, + node_end, + arc_number_key, + arc_tech_efficiency, + number_arc_technologies, + peak_flow, + distance_matrix, +): arc_tech = generic_problem_get_arc_techs( ipp.time_intervals[ipp.assessment_keys[0]], - network_order[g]+order_boost, + network_order[g] + order_boost, network_names[g], arc_tech_efficiency[g], number_arc_technologies, peak_flow[g], node_start, node_end, - distance_matrix[g][node_start][node_end] - ) - + distance_matrix[g][node_start][node_end], + ) + # add it to the network - + if arc_number_key == None: - network.add_directed_arc( - node_key_a=node_start, - node_key_b=node_end, - arcs=arc_tech) - + node_key_a=node_start, node_key_b=node_end, arcs=arc_tech + ) + else: - network.modify_network_arc( node_key_a=node_start, node_key_b=node_end, arc_key_ab=arc_number_key, - data_dict={ - Network.KEY_ARC_TECH: arc_tech, - Network.KEY_ARC_UND: False} - ) - - #************************************************************************** - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** - -def create_generic_networks(ipp: InfrastructurePlanningProblem, - seed_number: int = None): - - #************************************************************************** - #************************************************************************** - + data_dict={Network.KEY_ARC_TECH: arc_tech, Network.KEY_ARC_UND: False}, + ) + + # ************************************************************************** + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + + +def create_generic_networks( + ipp: InfrastructurePlanningProblem, seed_number: int = None +): + # ************************************************************************** + # ************************************************************************** + if seed_number == None: - - seed_number = rand.randint(1,int(1e5)) - - print('Seed number: '+str(seed_number)) - + seed_number = rand.randint(1, int(1e5)) + + print("Seed number: " + str(seed_number)) + # initialise random number generators - + rand.seed(a=seed_number) - + np.random.seed(seed=seed_number) - - #************************************************************************** - #************************************************************************** - + + # ************************************************************************** + # ************************************************************************** + # problem specification - + # networks - + min_number_networks = 2 - + max_number_networks = 3 - + number_networks = rand.randint(min_number_networks, max_number_networks) - + # network type: supply (nodes only), demand (nodes only), hybrid (both) - - NET_TYPE_SUPPLY = 'supply' - NET_TYPE_DEMAND = 'demand' - NET_TYPE_HYBRID = 'hybrid' - - NET_TYPES = [NET_TYPE_SUPPLY, - NET_TYPE_DEMAND, - NET_TYPE_HYBRID] - - network_type = [NET_TYPES[rand.randint(0, len(NET_TYPES)-1)] - for g in range(number_networks)] + + NET_TYPE_SUPPLY = "supply" + NET_TYPE_DEMAND = "demand" + NET_TYPE_HYBRID = "hybrid" + + NET_TYPES = [NET_TYPE_SUPPLY, NET_TYPE_DEMAND, NET_TYPE_HYBRID] + + network_type = [ + NET_TYPES[rand.randint(0, len(NET_TYPES) - 1)] for g in range(number_networks) + ] print(network_type) # TODO: delete print above # order of network - - min_network_order = 2 # has to be at least 2 for hybrid mode - + + min_network_order = 2 # has to be at least 2 for hybrid mode + max_network_order = 4 - - network_order = [rand.randint(min_network_order,max_network_order) - for g in range(number_networks)] - + + network_order = [ + rand.randint(min_network_order, max_network_order) + for g in range(number_networks) + ] + # import and export nodes - + # import nodes are needed with insuf. supply - - min_number_import_nodes = [(1 if (network_type[g] == NET_TYPE_DEMAND or - network_type[g] == NET_TYPE_HYBRID) - else 0) - for g in range(number_networks)] - - max_number_import_nodes = [min_number_import_nodes[g]+rand.randint(0,1) - for g in range(number_networks)] - + + min_number_import_nodes = [ + ( + 1 + if ( + network_type[g] == NET_TYPE_DEMAND or network_type[g] == NET_TYPE_HYBRID + ) + else 0 + ) + for g in range(number_networks) + ] + + max_number_import_nodes = [ + min_number_import_nodes[g] + rand.randint(0, 1) for g in range(number_networks) + ] + # export nodes are needed with insuf. demand - - min_number_export_nodes = [(1 if (network_type[g] == NET_TYPE_SUPPLY or - network_type[g] == NET_TYPE_HYBRID) - else 0) - for g in range(number_networks)] - - max_number_export_nodes = [min_number_export_nodes[g]+rand.randint(0,1) - for g in range(number_networks)] - + + min_number_export_nodes = [ + ( + 1 + if ( + network_type[g] == NET_TYPE_SUPPLY or network_type[g] == NET_TYPE_HYBRID + ) + else 0 + ) + for g in range(number_networks) + ] + + max_number_export_nodes = [ + min_number_export_nodes[g] + rand.randint(0, 1) for g in range(number_networks) + ] + min_number_other_nodes = 3 - + max_number_other_nodes = 6 - - number_import_nodes = [rand.randint(min_number_import_nodes[g], - max_number_import_nodes[g]) - for g in range(number_networks)] - - number_export_nodes = [rand.randint(min_number_export_nodes[g], - max_number_export_nodes[g]) - for g in range(number_networks)] - - number_other_nodes = [rand.randint(min_number_other_nodes, - max_number_other_nodes) - for g in range(number_networks)] - - number_nodes = [2**network_order[g]+ - number_import_nodes[g]+ - number_export_nodes[g]+ - number_other_nodes[g] - for g in range(number_networks)] - + + number_import_nodes = [ + rand.randint(min_number_import_nodes[g], max_number_import_nodes[g]) + for g in range(number_networks) + ] + + number_export_nodes = [ + rand.randint(min_number_export_nodes[g], max_number_export_nodes[g]) + for g in range(number_networks) + ] + + number_other_nodes = [ + rand.randint(min_number_other_nodes, max_number_other_nodes) + for g in range(number_networks) + ] + + number_nodes = [ + 2 ** network_order[g] + + number_import_nodes[g] + + number_export_nodes[g] + + number_other_nodes[g] + for g in range(number_networks) + ] + # arc technologies - + min_number_arc_technologies = [1 for g in range(number_networks)] - + max_number_arc_technologies = [6 for g in range(number_networks)] - + number_arc_technologies = [ - rand.randint(min_number_arc_technologies[g], - max_number_arc_technologies[g]) - for g in range(number_networks)] - - #************************************************************************** - #************************************************************************** - + rand.randint(min_number_arc_technologies[g], max_number_arc_technologies[g]) + for g in range(number_networks) + ] + + # ************************************************************************** + # ************************************************************************** + # generate data - - network_names = ['grid_'+str(g) - for g in range(number_networks)] - + + network_names = ["grid_" + str(g) for g in range(number_networks)] + # import prices (could be an empty dict) - + import_prices = { - (g,n):[rand.random() - for k in ipp.time_intervals[ipp.assessment_keys[0]]] + (g, n): [rand.random() for k in ipp.time_intervals[ipp.assessment_keys[0]]] for g in range(number_networks) - for n in range(number_import_nodes[g])} - + for n in range(number_import_nodes[g]) + } + # export prices (lower than import ones; random if no imports prices exist) - + export_prices = { - (g,n):[min(import_prices[(g,n_imp)][k] - for n_imp in range(number_import_nodes[g]))* - rand.random() if number_import_nodes[g] != 0 else rand.random() - for k in range( - len(ipp.time_intervals[ipp.assessment_keys[0]]) - )] + (g, n): [ + min(import_prices[(g, n_imp)][k] for n_imp in range(number_import_nodes[g])) + * rand.random() + if number_import_nodes[g] != 0 + else rand.random() + for k in range(len(ipp.time_intervals[ipp.assessment_keys[0]])) + ] for g in range(number_networks) - for n in range(number_export_nodes[g])} - + for n in range(number_export_nodes[g]) + } + # static supply (negative) or demand (positive) - + base_flow = { - (g,n):[rand.random() if network_type[g] == NET_TYPE_DEMAND else - -rand.random() if network_type[g] == NET_TYPE_SUPPLY else - -1+2*rand.random() - for k in ipp.time_intervals[ipp.assessment_keys[0]]] + (g, n): [ + rand.random() + if network_type[g] == NET_TYPE_DEMAND + else -rand.random() + if network_type[g] == NET_TYPE_SUPPLY + else -1 + 2 * rand.random() + for k in ipp.time_intervals[ipp.assessment_keys[0]] + ] for g in range(number_networks) - for n in range(number_other_nodes[g])} - + for n in range(number_other_nodes[g]) + } + # positions - + position_nodes = [ - [(rand.random(),rand.random()) for n in range(number_nodes[g])] - for g in range(number_networks)] - + [(rand.random(), rand.random()) for n in range(number_nodes[g])] + for g in range(number_networks) + ] + # distance - - def distance_function(x1,x2,y1,y2): - - return np.sqrt((x1-x2)**2+(y1-y2)**2) - + + def distance_function(x1, x2, y1, y2): + return np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2) + distance_matrix = [ - [[distance_function(position_nodes[g][n1][0], - position_nodes[g][n2][0], - position_nodes[g][n1][1], - position_nodes[g][n2][1]) - for n2 in range(number_nodes[g])] - for n1 in range(number_nodes[g])] - for g in range(number_networks)] - + [ + [ + distance_function( + position_nodes[g][n1][0], + position_nodes[g][n2][0], + position_nodes[g][n1][1], + position_nodes[g][n2][1], + ) + for n2 in range(number_nodes[g]) + ] + for n1 in range(number_nodes[g]) + ] + for g in range(number_networks) + ] + # determine peak demand - + peak_flow = [ sum( max( - [abs(base_flow[(g,n)][k]) - for k in range( - len(ipp.time_intervals[ipp.assessment_keys[0]]) - ) - ] - ) - for n in range(number_other_nodes[g]) + [ + abs(base_flow[(g, n)][k]) + for k in range(len(ipp.time_intervals[ipp.assessment_keys[0]])) + ] ) + for n in range(number_other_nodes[g]) + ) for g in range(number_networks) - ] - + ] + # arc tech efficiency per arc tech and grid - + # arc_tech_efficiency = [ # [1-rand.random()*rand.random()*rand.random() # for k in range(ipp.number_intraperiod_time_intervals)] # for g in range(number_networks)] - + arc_tech_efficiency = [ - {(q,k): 1-rand.random()*rand.random()*rand.random() - for q in ipp.assessment_keys - for k in range(ipp.number_time_intervals[q])} + { + (q, k): 1 - rand.random() * rand.random() * rand.random() + for q in ipp.assessment_keys + for k in range(ipp.number_time_intervals[q]) + } for g in range(number_networks) - ] - - #************************************************************************** - #************************************************************************** - + ] + + # ************************************************************************** + # ************************************************************************** + # for each network: # 1) create network using networkx's graph creators # 2) add random data to the existing nodes and edges # 3) add import, export and other nodes (including the relevant data) # 4) add arcs from these new nodes to the other nodes in the network - + # list of Network objects - + for g in range(number_networks): - - #********************************************************************** - + # ********************************************************************** + order_boost = ( - 2 if number_import_nodes[g] and number_export_nodes[g] else 1 - if number_import_nodes[g] or number_export_nodes[g] else 0) + 2 + if number_import_nodes[g] and number_export_nodes[g] + else 1 + if number_import_nodes[g] or number_export_nodes[g] + else 0 + ) + + # ********************************************************************** - #********************************************************************** - # 1) create network using networkx's graph creators - + if network_type[g] == NET_TYPE_DEMAND: - # consumer network (positive SB_glk) - + new_network = Network( - nx.binomial_tree(network_order[g], - create_using=nx.MultiDiGraph)) - + nx.binomial_tree(network_order[g], create_using=nx.MultiDiGraph) + ) + elif network_type[g] == NET_TYPE_SUPPLY: - # producer network (negative SB_glk) - + new_network = Network( - nx.binomial_tree(network_order[g], - create_using=nx.MultiDiGraph)) - + nx.binomial_tree(network_order[g], create_using=nx.MultiDiGraph) + ) + # reverse arc directions - + arc_list = [] for arc in new_network.edges(): arc_list.append(arc) for arc in arc_list: - new_network.remove_edge(arc[0],arc[1]) - new_network.add_edge(arc[1],arc[0]) - - else: # hybrid - + new_network.remove_edge(arc[0], arc[1]) + new_network.add_edge(arc[1], arc[0]) + + else: # hybrid # join one supply grid with one demand grid - - G1 = nx.binomial_tree(network_order[g]-1, - create_using=nx.MultiDiGraph) - - G2 = nx.binomial_tree(network_order[g]-1, - create_using=nx.MultiDiGraph) - + + G1 = nx.binomial_tree(network_order[g] - 1, create_using=nx.MultiDiGraph) + + G2 = nx.binomial_tree(network_order[g] - 1, create_using=nx.MultiDiGraph) + nn_g1 = G1.number_of_nodes() arc_list = [arc for arc in G2.edges()] node_list = [node_key for node_key in G2.nodes()] for arc in arc_list: - G2.remove_edge(arc[0],arc[1]) - G2.add_node(arc[0]+nn_g1) - G2.add_node(arc[1]+nn_g1) - G2.add_edge(arc[1]+nn_g1,arc[0]+nn_g1) + G2.remove_edge(arc[0], arc[1]) + G2.add_node(arc[0] + nn_g1) + G2.add_node(arc[1] + nn_g1) + G2.add_edge(arc[1] + nn_g1, arc[0] + nn_g1) for node in node_list: G2.remove_node(node) - - G = nx.union(G1,G2) - G.add_edge(nn_g1,0) # G2 is the supply network, G1 is the demand 1 + + G = nx.union(G1, G2) + G.add_edge(nn_g1, 0) # G2 is the supply network, G1 is the demand 1 new_network = Network(G) - - + # define the nodes as not being import, nor export nor other nodes - + for n in new_network.nodes: - new_network.add_waypoint_node(node_key=n) - + # add arc data - + for edge in new_network.edges(keys=True): - # add arc - - add_arc_this_way(new_network, - network_order, - ipp, - order_boost, - network_names, - g, - edge[0], - edge[1], - edge[2], - arc_tech_efficiency, - number_arc_technologies[g], - peak_flow, - distance_matrix) - - #********************************************************************** - + + add_arc_this_way( + new_network, + network_order, + ipp, + order_boost, + network_names, + g, + edge[0], + edge[1], + edge[2], + arc_tech_efficiency, + number_arc_technologies[g], + peak_flow, + distance_matrix, + ) + + # ********************************************************************** + # compute the number of outgoing arcs per node - + dict_number_outgoing_arcs = { - node: len(nx.edges(new_network,node)) - for node in new_network.nodes()} - + node: len(nx.edges(new_network, node)) for node in new_network.nodes() + } + # list the nodes ordered by descending number of outgoing arcs - + list_nodes_descending_number_outgoing_arcs = sorted( - dict_number_outgoing_arcs, - key=dict_number_outgoing_arcs.get, - reverse=True) - + dict_number_outgoing_arcs, key=dict_number_outgoing_arcs.get, reverse=True + ) + # list the nodes ordered by ascending number of outgoing arcs - + list_nodes_ascending_number_outgoing_arcs = sorted( - dict_number_outgoing_arcs, - key=dict_number_outgoing_arcs.get) - + dict_number_outgoing_arcs, key=dict_number_outgoing_arcs.get + ) + # compute the number of incoming arcs per node - + dict_number_incoming_arcs = { - node: len([node_source - for node_source in new_network.predecessors(node)]) - for node in new_network.nodes()} - + node: len([node_source for node_source in new_network.predecessors(node)]) + for node in new_network.nodes() + } + # list of nodes ordered by descending number of incoming arcs - + list_nodes_descending_number_incoming_arcs = sorted( - dict_number_incoming_arcs, - key=dict_number_incoming_arcs.get, - reverse=True) - + dict_number_incoming_arcs, key=dict_number_incoming_arcs.get, reverse=True + ) + # list of nodes ordered by ascending number of incoming arcs - + list_nodes_ascending_number_incoming_arcs = sorted( - dict_number_incoming_arcs, - key=dict_number_incoming_arcs.get) - - #********************************************************************** - + dict_number_incoming_arcs, key=dict_number_incoming_arcs.get + ) + + # ********************************************************************** + # add import nodes - + for n in range(number_import_nodes[g]): - # define key - + node_key = new_network.number_of_nodes() - + # res_pri = ResourcePrice(prices=import_prices[(g,n)], # volumes=None) - + new_network.add_import_node( node_key=node_key, prices={ - (q,p,k): ResourcePrice( - prices=import_prices[(g,n)][k], - volumes=None - ) + (q, p, k): ResourcePrice( + prices=import_prices[(g, n)][k], volumes=None + ) for q in range(ipp.number_assessments) for p in range(ipp.number_reporting_periods[q]) for k in range(ipp.number_time_intervals[q]) - } - ) - + }, + ) + # add arc from import node to a node with many outgoing arcs - - add_arc_this_way(new_network, - network_order, - ipp, - order_boost, - network_names, - g, - node_key, - list_nodes_descending_number_outgoing_arcs[n], - None, - arc_tech_efficiency, - number_arc_technologies[g], - peak_flow, - distance_matrix) - - #********************************************************************** - + + add_arc_this_way( + new_network, + network_order, + ipp, + order_boost, + network_names, + g, + node_key, + list_nodes_descending_number_outgoing_arcs[n], + None, + arc_tech_efficiency, + number_arc_technologies[g], + peak_flow, + distance_matrix, + ) + + # ********************************************************************** + # add export nodes - + for n in range(number_export_nodes[g]): - # define key - + node_key = new_network.number_of_nodes() - + # res_pri = ResourcePrice(prices=export_prices[(g,n)], # volumes=None) - + new_network.add_export_node( node_key=node_key, prices={ - (q,p,k): ResourcePrice( - prices=export_prices[(g,n)][k], - volumes=None - ) + (q, p, k): ResourcePrice( + prices=export_prices[(g, n)][k], volumes=None + ) for q in range(ipp.number_assessments) for p in range(ipp.number_reporting_periods[q]) for k in range(ipp.number_time_intervals[q]) - - } - ) - + }, + ) + # add arc from node with many incoming arcs to the export node - - add_arc_this_way(new_network, - network_order, - ipp, - order_boost, - network_names, - g, - list_nodes_descending_number_incoming_arcs[n], - node_key, - None, - arc_tech_efficiency, - number_arc_technologies[g], - peak_flow, - distance_matrix) - - #********************************************************************** - + + add_arc_this_way( + new_network, + network_order, + ipp, + order_boost, + network_names, + g, + list_nodes_descending_number_incoming_arcs[n], + node_key, + None, + arc_tech_efficiency, + number_arc_technologies[g], + peak_flow, + distance_matrix, + ) + + # ********************************************************************** + # identify import and export nodes - + new_network.identify_node_types() - - #********************************************************************** - + + # ********************************************************************** + # demand/supply nodes: create them and add arcs to random nodes - + demand_node_counter = 0 - + supply_node_counter = 0 - + for n in range(number_other_nodes[g]): - # add demand/supply node - + node_key = new_network.number_of_nodes() - + new_network.add_source_sink_node( node_key=node_key, # base_flow=base_flow[(g,n)], base_flow={ - (q, k): base_flow[(g,n)][k] + (q, k): base_flow[(g, n)][k] for q in ipp.assessment_keys - for k in range( - len(ipp.time_intervals[q]) - ) - } - ) - + for k in range(len(ipp.time_intervals[q])) + }, + ) + # differentiate node placement based on the static flow needs - + # this will tend to ensure feasibility - - if min(base_flow[(g,n)]) >= 0: - - # demand node: + + if min(base_flow[(g, n)]) >= 0: + # demand node: # from nodes with zero/few outgoing arcs to the demand node - + node_key_start = list_nodes_ascending_number_outgoing_arcs[ - demand_node_counter] - - add_arc_this_way(new_network, - network_order, - ipp, - order_boost, - network_names, - g, - node_key_start, - node_key, - None, - arc_tech_efficiency, - number_arc_technologies[g], - peak_flow, - distance_matrix) - + demand_node_counter + ] + + add_arc_this_way( + new_network, + network_order, + ipp, + order_boost, + network_names, + g, + node_key_start, + node_key, + None, + arc_tech_efficiency, + number_arc_technologies[g], + peak_flow, + distance_matrix, + ) + # increment counter - + demand_node_counter = demand_node_counter + 1 - - elif max(base_flow[(g,n)]) <= 0: - - # supply node: + + elif max(base_flow[(g, n)]) <= 0: + # supply node: # from the supply node to nodes with zero/few incoming arcs - + node_key_end = list_nodes_ascending_number_incoming_arcs[ - supply_node_counter] - - add_arc_this_way(new_network, - network_order, - ipp, - order_boost, - network_names, - g, - node_key, - node_key_end, - None, - arc_tech_efficiency, - number_arc_technologies[g], - peak_flow, - distance_matrix) - + supply_node_counter + ] + + add_arc_this_way( + new_network, + network_order, + ipp, + order_boost, + network_names, + g, + node_key, + node_key_end, + None, + arc_tech_efficiency, + number_arc_technologies[g], + peak_flow, + distance_matrix, + ) + # increment counter - + supply_node_counter = supply_node_counter + 1 - + else: - # demand/supply node - + # add two arcs: # arc 1) from an import node or nodes directly or indirectly # connected to an import node (from which imports are possible) # arc 2) to an export node or nodes directly or indirectly co- # nnected to an export node (from which exports are possible) - - #************************************************************** - + + # ************************************************************** + # arc 1 - + # randomly select a starting node - + # for each import node - + for import_node in new_network.import_nodes: - # select a node with few outgoing arcs - + node_key_start = list_nodes_ascending_number_outgoing_arcs[ - supply_node_counter] - + supply_node_counter + ] + # check if there is a path between them - + if nx.has_path(new_network, import_node, node_key_start): - # call random for comparison purposes - - rand.randint(0,1) - + + rand.randint(0, 1) + # update the counter - + supply_node_counter = supply_node_counter + 1 - + # if there is, break - + break - + # if not, continue - + # TODO: while loop to try more times with each import node - - else: - + + else: # randomly select an import node - + node_key_start = new_network.import_nodes[ - rand.randint(0,len(new_network.import_nodes)-1)] - + rand.randint(0, len(new_network.import_nodes) - 1) + ] + # add arc - - add_arc_this_way(new_network, - network_order, - ipp, - order_boost, - network_names, - g, - node_key_start, - node_key, - None, - arc_tech_efficiency, - number_arc_technologies[g], - peak_flow, - distance_matrix) - - #************************************************************** - + + add_arc_this_way( + new_network, + network_order, + ipp, + order_boost, + network_names, + g, + node_key_start, + node_key, + None, + arc_tech_efficiency, + number_arc_technologies[g], + peak_flow, + distance_matrix, + ) + + # ************************************************************** + # arc 2 - + # randomly select an end node - + # for each export node - + for export_node in new_network.export_nodes: - # select a node with few incoming arcs - + node_key_end = list_nodes_ascending_number_incoming_arcs[ - demand_node_counter] - + demand_node_counter + ] + # check if there is a path between them - + if nx.has_path(new_network, node_key_end, export_node): - # call random for comparison purposes - - rand.randint(0,1) - + + rand.randint(0, 1) + # update the counter - + demand_node_counter = demand_node_counter + 1 - + # if there is, break - + break - + # if not, continue - + # TODO: while loop to try more times with each export node - - else: - + + else: # randomly select an export node - + node_key_end = new_network.export_nodes[ - rand.randint(0,len(new_network.export_nodes)-1)] - + rand.randint(0, len(new_network.export_nodes) - 1) + ] + # add arc - - add_arc_this_way(new_network, - network_order, - ipp, - order_boost, - network_names, - g, - node_key, - node_key_end, - None, - arc_tech_efficiency, - number_arc_technologies[g], - peak_flow, - distance_matrix) - - #************************************************************** - - #****************************************************************** - + + add_arc_this_way( + new_network, + network_order, + ipp, + order_boost, + network_names, + g, + node_key, + node_key_end, + None, + arc_tech_efficiency, + number_arc_technologies[g], + peak_flow, + distance_matrix, + ) + + # ************************************************************** + + # ****************************************************************** + # restart counters - - if demand_node_counter >= 2**(network_order[g])-1: - + + if demand_node_counter >= 2 ** (network_order[g]) - 1: demand_node_counter = 0 - - if supply_node_counter >= 2**(network_order[g])-1: - + + if supply_node_counter >= 2 ** (network_order[g]) - 1: supply_node_counter = 0 - - #****************************************************************** - + + # ****************************************************************** + # # print(new_network.nodes()) # print('here now') # print(new_network.edges(keys=True,data=True)) # assert False - - #********************************************************************** - + + # ********************************************************************** + # test preselected arcs with finite capacity - + # from import node to new node - + if len(new_network.import_nodes) != 0: - # an import node is required for this - - node_key = 'test_node_1' - + + node_key = "test_node_1" + new_network.add_source_sink_node( - node_key=node_key, + node_key=node_key, # base_flow=[ - # rand.random() + # rand.random() # for k in range(ipp.number_intraperiod_time_intervals)] base_flow={ - (q, k): rand.random() + (q, k): rand.random() for q in ipp.assessment_keys - for k in range( - len(ipp.time_intervals[q]) - ) - } - ) - + for k in range(len(ipp.time_intervals[q])) + }, + ) + new_network.add_preexisting_directed_arc( node_key_a=new_network.import_nodes[0], node_key_b=node_key, # efficiency=[ # 1 for k in range(ipp.number_intraperiod_time_intervals)], efficiency={ - (q, k): 1 + (q, k): 1 for q in ipp.assessment_keys - for k in range( - len(ipp.time_intervals[q]) - ) - }, + for k in range(len(ipp.time_intervals[q])) + }, static_loss=None, capacity=1, - capacity_is_instantaneous=False) - + capacity_is_instantaneous=False, + ) + # from new node to export node - + if len(new_network.export_nodes) != 0: - - node_key = 'test_node_2' - + node_key = "test_node_2" + new_network.add_source_sink_node( - node_key=node_key, + node_key=node_key, # base_flow=[ - # -rand.random() - # for k in range(ipp.number_intraperiod_time_intervals)], + # -rand.random() + # for k in range(ipp.number_intraperiod_time_intervals)], base_flow={ - (q, k): -rand.random() + (q, k): -rand.random() for q in ipp.assessment_keys - for k in range( - len(ipp.time_intervals[q]) - ) - } - ) - + for k in range(len(ipp.time_intervals[q])) + }, + ) + # add preselected arc from export node - + new_network.add_preexisting_directed_arc( node_key_a=node_key, node_key_b=new_network.export_nodes[0], # efficiency=[ # 1 for k in range(ipp.number_intraperiod_time_intervals)], efficiency={ - (q, k): 1 + (q, k): 1 for q in ipp.assessment_keys - for k in range( - len(ipp.time_intervals[q]) - ) - }, + for k in range(len(ipp.time_intervals[q])) + }, static_loss=None, capacity=1, - capacity_is_instantaneous=False) - + capacity_is_instantaneous=False, + ) + # add preselected infinite capacity arcs - + # add infinite capacity arc from import node to new node - + if len(new_network.import_nodes) != 0: - # an import node is required for this - - node_key = 'test_node_3' - + + node_key = "test_node_3" + new_network.add_source_sink_node( - node_key=node_key, + node_key=node_key, # base_flow=[ - # 1e3*rand.random() + # 1e3*rand.random() # for k in range(ipp.number_intraperiod_time_intervals)] base_flow={ - (q, k): 1e3*rand.random() + (q, k): 1e3 * rand.random() for q in ipp.assessment_keys - for k in range( - len(ipp.time_intervals[q]) - ) - } - ) - + for k in range(len(ipp.time_intervals[q])) + }, + ) + new_network.add_preexisting_directed_arc( node_key_a=new_network.import_nodes[0], - node_key_b='test_node_3', + node_key_b="test_node_3", # efficiency=[ # 1 for k in range(ipp.number_intraperiod_time_intervals)], efficiency={ - (q, k): 1 + (q, k): 1 for q in ipp.assessment_keys - for k in range( - len(ipp.time_intervals[q]) - ) - }, + for k in range(len(ipp.time_intervals[q])) + }, static_loss=None, capacity=math.inf, - capacity_is_instantaneous=False - ) - + capacity_is_instantaneous=False, + ) + # add infinite capacity from new node to export node - + if len(new_network.export_nodes) != 0: - - node_key = 'test_node_4' - + node_key = "test_node_4" + new_network.add_source_sink_node( - node_key=node_key, + node_key=node_key, # base_flow=[ - # -1e3*rand.random() + # -1e3*rand.random() # for k in range(ipp.number_intraperiod_time_intervals)] base_flow={ - (q, k): -1e3*rand.random() + (q, k): -1e3 * rand.random() for q in ipp.assessment_keys - for k in range( - len(ipp.time_intervals[q]) - ) - } - ) - + for k in range(len(ipp.time_intervals[q])) + }, + ) + # add preselected arc from export node - + new_network.add_preexisting_directed_arc( node_key_a=node_key, node_key_b=new_network.export_nodes[0], # efficiency=[ # 1 for k in range(ipp.number_intraperiod_time_intervals)], efficiency={ - (q, k): 1 + (q, k): 1 for q in ipp.assessment_keys - for k in range( - len(ipp.time_intervals[q]) - ) - }, + for k in range(len(ipp.time_intervals[q])) + }, static_loss=None, capacity=math.inf, - capacity_is_instantaneous=False) - - #********************************************************************** - + capacity_is_instantaneous=False, + ) + + # ********************************************************************** + # prepare network object - + new_network.identify_node_types() - + # add network to mves object - - ipp.add_network(network_key=g,network=new_network) - - #********************************************************************** - + + ipp.add_network(network_key=g, network=new_network) + + # ********************************************************************** + # feasibility checks - - #********************************************************************** - - #************************************************************************** - #************************************************************************** - + + # ********************************************************************** + + # ************************************************************************** + # ************************************************************************** + return ipp - #************************************************************************** - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** \ No newline at end of file + # ************************************************************************** + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** diff --git a/tests/examples_esipp_network.py b/tests/examples_esipp_network.py index e1ab98f..d5f9290 100644 --- a/tests/examples_esipp_network.py +++ b/tests/examples_esipp_network.py @@ -18,2034 +18,2119 @@ from src.topupopt.problems.esipp.network import ArcsWithoutStaticLosses from src.topupopt.problems.esipp.resource import ResourcePrice -#****************************************************************************** -#****************************************************************************** +# ****************************************************************************** +# ****************************************************************************** + def examples(): - - #************************************************************************** - #************************************************************************** - + # ************************************************************************** + # ************************************************************************** + # test creating arc technology objects - + examples_arc_technologies() - + # test creating arc technology objects for technologies with static losses - + examples_arc_technologies_static_losses() - + # test peculiar subclasses - + example_arcs_without_losses() - + # test modifying nodes - + examples_modifying_nodes() - + # test to trigger special errors - + examples_network_disallowed_cases() - + # test key generation - + examples_pseudo_unique_key_generation() - + # test creating a network with a tree topology - + examples_tree_topology() - - #************************************************************************** - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** + + # ************************************************************************** + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def examples_tree_topology(): - # create a network object with a tree topology - + tree_network = binomial_tree(3, create_using=MultiDiGraph) - + network = Network(tree_network) - + for edge_key in network.edges(keys=True): - arc = ArcsWithoutLosses( - name=str(edge_key), - capacity=[5, 10], - minimum_cost=[3, 6], - specific_capacity_cost=0, - capacity_is_instantaneous=False - ) - - network.add_edge( - *edge_key, - **{Network.KEY_ARC_TECH: arc} - ) - + name=str(edge_key), + capacity=[5, 10], + minimum_cost=[3, 6], + specific_capacity_cost=0, + capacity_is_instantaneous=False, + ) + + network.add_edge(*edge_key, **{Network.KEY_ARC_TECH: arc}) + # assert that it does not have a tree topology - + assert not network.has_tree_topology() # select all the nodes - + for edge_key in network.edges(keys=True): - - network.edges[edge_key][ - Network.KEY_ARC_TECH].options_selected[0] = True - + network.edges[edge_key][Network.KEY_ARC_TECH].options_selected[0] = True + # assert that it has a tree topology assert network.has_tree_topology() - -#****************************************************************************** -#****************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def examples_arc_technologies_static_losses(): - - #************************************************************************** - - + # ************************************************************************** + number_time_intervals = 3 number_scenarios = 2 number_options = 4 - + efficiency_dict = { - (q,k): 0.95 + (q, k): 0.95 for q in range(number_scenarios) for k in range(number_time_intervals) - } - + } + static_loss_dict = { - (h,q,k): 1 + (h, q, k): 1 for h in range(number_options) for q in range(number_scenarios) for k in range(number_time_intervals) - } - + } + for capacity_is_instantaneous in (True, False): - arc_tech = Arcs( - name='any', - efficiency=efficiency_dict, + name="any", + efficiency=efficiency_dict, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss=static_loss_dict, - validate=True - ) - + validate=True, + ) + assert arc_tech.has_proportional_losses() assert arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + # isotropic - + arc_tech = Arcs( - name='any', - efficiency=None, + name="any", + efficiency=None, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss=static_loss_dict, - validate=True - ) - + validate=True, + ) + assert not arc_tech.has_proportional_losses() assert arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + # create arc technology with only one option - + arc_tech = Arcs( - name='any', - efficiency=efficiency_dict, + name="any", + efficiency=efficiency_dict, efficiency_reverse=None, capacity=(1,), minimum_cost=(1,), - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss={ - (0,q,k): 1 - #for h in range(number_options) + (0, q, k): 1 + # for h in range(number_options) for q in range(number_scenarios) for k in range(number_time_intervals) - }, - validate=True - ) - + }, + validate=True, + ) + assert arc_tech.has_proportional_losses() assert arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + # create arc technology for one time interval - + arc_tech = Arcs( - name='any', + name="any", efficiency={ (q, 0): 0.5 for q in range(number_scenarios) - #for k in range(number_time_intervals) - }, + # for k in range(number_time_intervals) + }, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss={ - (h,q,0): 1 + (h, q, 0): 1 for h in range(number_options) for q in range(number_scenarios) - #for k in range(number_time_intervals) - }, - validate=True - ) - + # for k in range(number_time_intervals) + }, + validate=True, + ) + assert arc_tech.has_proportional_losses() assert arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=False) - - #********************************************************************** - + + # ********************************************************************** + # TypeError: The static losses should be given as a dict or None. - + error_triggered = False try: _ = Arcs( - name='any', - efficiency=None, + name="any", + efficiency=None, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss=tuple( - [k for k in range(number_time_intervals)] - for o in range(number_options)), - validate=True - ) + [k for k in range(number_time_intervals)] + for o in range(number_options) + ), + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - - # ValueError('The static losses should be specified for each arc + + # ValueError('The static losses should be specified for each arc # option.') - + error_triggered = False try: _ = Arcs( - name='any', - efficiency=None, + name="any", + efficiency=None, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss={ - (h, q,): 1 + ( + h, + q, + ): 1 for h in range(number_options) for q in range(number_scenarios) - }, - validate=True - ) + }, + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - + # TypeError('The static losses must be specified via a list of lists.') - + error_triggered = False try: _ = Arcs( - name='any', - efficiency=None, + name="any", + efficiency=None, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss=[ tuple(k for k in range(number_time_intervals)) - for o in range(number_options)], - validate=True - ) + for o in range(number_options) + ], + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ValueError('The static loss values are inconsistent with the number ' # 'of options, scenarios and intervals.') - + error_triggered = False try: arc_tech = Arcs( - name='any', - efficiency=None, + name="any", + efficiency=None, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss={ - (h,q,k): 1 + (h, q, k): 1 for h in range(number_options) for q in range(number_scenarios) - for k in range(number_time_intervals-1) - }, - validate=True - ) - - arc_tech.validate_sizes(number_options=number_options, - number_scenarios=number_scenarios, - number_intervals=[ - number_time_intervals - for _ in range(number_scenarios)]) + for k in range(number_time_intervals - 1) + }, + validate=True, + ) + + arc_tech.validate_sizes( + number_options=number_options, + number_scenarios=number_scenarios, + number_intervals=[ + number_time_intervals for _ in range(number_scenarios) + ], + ) except ValueError: error_triggered = True assert error_triggered - + # TypeError('The static losses were not provided as numbers.') - + error_triggered = False try: _ = Arcs( - name='any', - efficiency=None, + name="any", + efficiency=None, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss={ - (h,q,k): str(3.54) + (h, q, k): str(3.54) for h in range(number_options) for q in range(number_scenarios) for k in range(number_time_intervals) - }, - validate=True - ) + }, + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ValueError('The static losses must be positive or zero.') - + error_triggered = False try: _ = Arcs( - name='any', - efficiency=None, + name="any", + efficiency=None, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss={ - (h,q,k): -random.randint(0, 1)*random.random() + (h, q, k): -random.randint(0, 1) * random.random() for h in range(number_options) for q in range(number_scenarios) for k in range(number_time_intervals) - }, - validate=True - ) + }, + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - + # TypeError: The static loss dict keys must be tuples - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', + name="hey", efficiency=None, efficiency_reverse=None, - static_loss={k:1 for k in range(number_time_intervals)}, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + static_loss={k: 1 for k in range(number_time_intervals)}, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - - #ValueError( 'The static loss dict keys must be tuples of size 3.') - - error_triggered = False + + # ValueError( 'The static loss dict keys must be tuples of size 3.') + + error_triggered = False try: _ = Arcs( - name='hey', + name="hey", efficiency=None, efficiency_reverse=None, - static_loss={(k,3): 1 for k in range(number_time_intervals)}, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + static_loss={(k, 3): 1 for k in range(number_time_intervals)}, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - + # TypeError(The staticl osses should be given as a dict or None.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=None, + name="hey", + efficiency=None, efficiency_reverse=None, static_loss=[1 for k in range(number_time_intervals)], - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ValueError( # 'No static loss values were provided. There should be one'+ # ' value per option, scenario and time interval.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', + name="hey", efficiency=None, efficiency_reverse=None, - static_loss={}, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + static_loss={}, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** + + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def examples_arc_technologies(): + # ************************************************************************** - #************************************************************************** - # create arc technology using instantaneous capacities - + number_scenarios = 2 number_options = 4 number_time_intervals = 3 - + efficiency_dict = { - (q,k): 0.85 + (q, k): 0.85 for q in range(number_scenarios) for k in range(number_time_intervals) - } - + } + for capacity_is_instantaneous in (True, False): - arc_tech = Arcs( - name='any', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="any", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) - + validate=True, + ) + assert arc_tech.has_proportional_losses() - + assert not arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + # create arc technology with only one option - + arc_tech = Arcs( - name='any', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="any", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, capacity=(1,), minimum_cost=(1,), - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) - + validate=True, + ) + assert arc_tech.has_proportional_losses() - + assert not arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + # create arc technology for one time interval - + arc_tech = Arcs( - name='any', - efficiency={(0,0): 0.95}, - efficiency_reverse=None, + name="any", + efficiency={(0, 0): 0.95}, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) - + validate=True, + ) + assert arc_tech.has_proportional_losses() - + assert not arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + # create arc technology for one time interval and isotropic - + arc_tech = Arcs( - name='any', - efficiency={(0,0): 0.95}, - efficiency_reverse={(0,0): 0.95}, + name="any", + efficiency={(0, 0): 0.95}, + efficiency_reverse={(0, 0): 0.95}, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) - + validate=True, + ) + assert arc_tech.has_proportional_losses() - + assert not arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + # create arc technology for one time interval and anisotropic - + arc_tech = Arcs( - name='any', - efficiency={(0,0): 0.95}, - efficiency_reverse={(0,0): 1}, + name="any", + efficiency={(0, 0): 0.95}, + efficiency_reverse={(0, 0): 1}, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) - + validate=True, + ) + assert arc_tech.has_proportional_losses() - + assert not arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + # create arc technology for one time interval and anisotropic - + arc_tech = Arcs( - name='any', - efficiency={(0,0): 1}, - efficiency_reverse={(0,0): 0.95}, + name="any", + efficiency={(0, 0): 1}, + efficiency_reverse={(0, 0): 0.95}, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) - + validate=True, + ) + assert arc_tech.has_proportional_losses() - + assert not arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=False) - - #********************************************************************** - + + # ********************************************************************** + # trigger errors - + # TypeError('The name attribute is not hashable.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name=[1,2,3], - efficiency=efficiency_dict, - efficiency_reverse=None, + name=[1, 2, 3], + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - - #TypeError:The efficiency dict keys must be (scenario, interval) tuples - - error_triggered = False + + # TypeError:The efficiency dict keys must be (scenario, interval) tuples + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency={k:1 for k in range(number_time_intervals)}, - efficiency_reverse=None, + name="hey", + efficiency={k: 1 for k in range(number_time_intervals)}, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - - #ValueError( 'The efficiency dict keys must be tuples of size 2.') - - error_triggered = False + + # ValueError( 'The efficiency dict keys must be tuples of size 2.') + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency={(k,3,4) :1 for k in range(number_time_intervals)}, - efficiency_reverse=None, + name="hey", + efficiency={(k, 3, 4): 1 for k in range(number_time_intervals)}, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - + # TypeError(The efficiency should be given as a dict or None.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=[1 for k in range(number_time_intervals)], - efficiency_reverse=None, + name="hey", + efficiency=[1 for k in range(number_time_intervals)], + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # TypeError('The reverse efficiency has to match the nominal'+ # ' one when there are no proportional losses.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=None, - efficiency_reverse={}, + name="hey", + efficiency=None, + efficiency_reverse={}, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # TypeError:'The reverse efficiency should be given as a dict or None.' - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=[1 for k in range(number_time_intervals)], + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=[1 for k in range(number_time_intervals)], static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ValueError( # 'No efficiency values were provided. There should be '+ # 'one value per scenario and time interval.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse={}, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse={}, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - + # ValueError: The keys for the efficiency dicts do not match. - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, + name="hey", + efficiency=efficiency_dict, efficiency_reverse={ - (key[1],key[0]): value - for key, value in efficiency_dict.items()}, + (key[1], key[0]): value for key, value in efficiency_dict.items() + }, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - + # TypeError: Efficiency values must be provided as numeric types. - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, + name="hey", + efficiency=efficiency_dict, efficiency_reverse={ - (key[0],key[1]): str(value) - for key, value in efficiency_dict.items()}, + (key[0], key[1]): str(value) + for key, value in efficiency_dict.items() + }, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ValueError('Efficiency values must be positive.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, + name="hey", + efficiency=efficiency_dict, efficiency_reverse={ - (key[0],key[1]): -1 - for key, value in efficiency_dict.items()}, + (key[0], key[1]): -1 for key, value in efficiency_dict.items() + }, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - - #TypeError('The capacity should be given as a list or tuple.') - - error_triggered = False + + # TypeError('The capacity should be given as a list or tuple.') + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity={o: 1+o for o in range(number_options)}, - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity={o: 1 + o for o in range(number_options)}, + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # TypeError: The minimum cost values should be given as a list or tuple - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost={o: 1+o for o in range(number_options)}, - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost={o: 1 + o for o in range(number_options)}, + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True - ) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # TypeError: The specific capacity cost was not given as a numeric type - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, - static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=[1], + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, + static_loss=None, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=[1], capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ValueError:The number of capacity and minimum cost entries must match - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options+1)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options + 1)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - - # ValueError: No entries for capacity and minimum cost were provided. + + # ValueError: No entries for capacity and minimum cost were provided. # At least one option should be provided. - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, capacity=tuple(), minimum_cost=tuple(), - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - - # ValueError: No entries for efficiency were provided. There should be + + # ValueError: No entries for efficiency were provided. There should be # one entry per time interval. - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency={}, - efficiency_reverse=None, + name="hey", + efficiency={}, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - - # ValueError('The number of efficiency values must match the number of + + # ValueError('The number of efficiency values must match the number of # time intervals.') - + arc_tech = Arcs( - name='hey', + name="hey", efficiency={ - (q,k): 0.85 + (q, k): 0.85 for q in range(number_scenarios) - for k in range(number_time_intervals+1) - }, - efficiency_reverse=None, + for k in range(number_time_intervals + 1) + }, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) - - error_triggered = False + validate=True, + ) + + error_triggered = False try: - arc_tech.validate_sizes(number_options=number_options, - number_scenarios=number_scenarios, - number_intervals=[ - number_time_intervals - for _ in range(number_scenarios)]) + arc_tech.validate_sizes( + number_options=number_options, + number_scenarios=number_scenarios, + number_intervals=[ + number_time_intervals for _ in range(number_scenarios) + ], + ) except ValueError: error_triggered = True assert error_triggered - - # ValueError('The number of efficiency values must match the number of + + # ValueError('The number of efficiency values must match the number of # time intervals.') - - error_triggered = False + + error_triggered = False try: arc_tech = Arcs( - name='hey', + name="hey", efficiency={ - (q,k): 0.85 + (q, k): 0.85 for q in range(number_scenarios) for k in range(number_time_intervals) - }, + }, efficiency_reverse={ - (q,k): 0.85 + (q, k): 0.85 for q in range(number_scenarios) - for k in range(number_time_intervals-1) - }, + for k in range(number_time_intervals - 1) + }, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) - arc_tech.validate_sizes(number_options=number_options, - number_scenarios=number_scenarios, - number_intervals=[ - number_time_intervals - for _ in range(number_scenarios)]) + validate=True, + ) + arc_tech.validate_sizes( + number_options=number_options, + number_scenarios=number_scenarios, + number_intervals=[ + number_time_intervals for _ in range(number_scenarios) + ], + ) except ValueError: error_triggered = True assert error_triggered - - # ValueError('The number of capacity values must match the number of + + # ValueError('The number of capacity values must match the number of # options.') - + arc_tech = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options+1)), - minimum_cost=tuple(1+o for o in range(number_options+1)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options + 1)), + minimum_cost=tuple(1 + o for o in range(number_options + 1)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True - ) - - error_triggered = False + validate=True, + ) + + error_triggered = False try: - arc_tech.validate_sizes(number_options=number_options, - number_scenarios=number_scenarios, - number_intervals=[ - number_time_intervals - for _ in range(number_scenarios)]) + arc_tech.validate_sizes( + number_options=number_options, + number_scenarios=number_scenarios, + number_intervals=[ + number_time_intervals for _ in range(number_scenarios) + ], + ) except ValueError: error_triggered = True assert error_triggered - - # ValueError: The minimum cost values are inconsistent with the number + + # ValueError: The minimum cost values are inconsistent with the number # of options. - + arc_tech = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options+1)), - minimum_cost=tuple(1+o for o in range(number_options+1)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options + 1)), + minimum_cost=tuple(1 + o for o in range(number_options + 1)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True - ) - - error_triggered = False + validate=True, + ) + + error_triggered = False try: - arc_tech.validate_sizes(number_options=number_options, - number_scenarios=number_scenarios, - number_intervals=[ - number_time_intervals - for _ in range(number_scenarios)]) + arc_tech.validate_sizes( + number_options=number_options, + number_scenarios=number_scenarios, + number_intervals=[ + number_time_intervals for _ in range(number_scenarios) + ], + ) except ValueError: error_triggered = True assert error_triggered - + # TypeError('Efficiency values must be provided as numeric types.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency={key: str(value) - for key, value in efficiency_dict.items()}, - efficiency_reverse=None, + name="hey", + efficiency={key: str(value) for key, value in efficiency_dict.items()}, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ValueError('Efficiency values must be positive.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency={key: -value*random.randint(0,1) - for key, value in efficiency_dict.items()}, - efficiency_reverse=None, + name="hey", + efficiency={ + key: -value * random.randint(0, 1) + for key, value in efficiency_dict.items() + }, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - + # TypeError('Capacity values must be provided as numeric types.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', + name="hey", efficiency=efficiency_dict, - efficiency_reverse=None, + efficiency_reverse=None, static_loss=None, - capacity=tuple(str(1+o) for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(str(1 + o) for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ValueError('Capacity values must be positive.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(-random.randint(0,1) - for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(-random.randint(0, 1) for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - + # TypeError('Minimum cost values must be provided as numeric types.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', + name="hey", efficiency=efficiency_dict, - efficiency_reverse=None, - static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(str(1+o) for o in range(number_options)), - specific_capacity_cost=1, + efficiency_reverse=None, + static_loss=None, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(str(1 + o) for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ValueError('Minimum cost values must be positive or zero.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o - for o in range(number_options)), - minimum_cost=tuple(-1 - for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(-1 for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - - # TypeError('The information about capacities being instantaneous or not + + # TypeError('The information about capacities being instantaneous or not # should be given as a boolean variable.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=1, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - - #************************************************************************** - #************************************************************************** - + + # ************************************************************************** + # ************************************************************************** + # Network - + arc_tech_AB = Arcs( - name='AB', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="AB", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=False, - validate=True) - + validate=True, + ) + arc_tech_AB.options_selected[0] = True - + assert arc_tech_AB.number_options() == number_options - + net = Network() - + # add undirected arc - - net.add_undirected_arc( - node_key_a='A', - node_key_b='B', - arcs=arc_tech_AB) - + + net.add_undirected_arc(node_key_a="A", node_key_b="B", arcs=arc_tech_AB) + # add directed arc - - net.add_directed_arc( - node_key_a='A', - node_key_b='B', - arcs=arc_tech_AB) - + + net.add_directed_arc(node_key_a="A", node_key_b="B", arcs=arc_tech_AB) + # add infinite capacity arc - + net.add_infinite_capacity_arc( - node_key_a='C', - node_key_b='D', - efficiency={ - (i, j): 1 - for i in range(3) - for j in range(4)}, - static_loss=None) - + node_key_a="C", + node_key_b="D", + efficiency={(i, j): 1 for i in range(3) for j in range(4)}, + static_loss=None, + ) + # add pre-existing directed arc - + net.add_preexisting_directed_arc( - node_key_a='E', - node_key_b='F', - efficiency=efficiency_dict, - static_loss=None, - capacity=3, - capacity_is_instantaneous=True) - + node_key_a="E", + node_key_b="F", + efficiency=efficiency_dict, + static_loss=None, + capacity=3, + capacity_is_instantaneous=True, + ) + # add pre-existing undirected arc - + net.add_preexisting_undirected_arc( - node_key_a='A', - node_key_b='C', - efficiency=efficiency_dict, - efficiency_reverse=efficiency_dict, - static_loss=None, - capacity=3, - capacity_is_instantaneous=True) - + node_key_a="A", + node_key_b="C", + efficiency=efficiency_dict, + efficiency_reverse=efficiency_dict, + static_loss=None, + capacity=3, + capacity_is_instantaneous=True, + ) + net.modify_network_arc( - node_key_a='A', - node_key_b='C', - arc_key_ab='AC', - data_dict={net.KEY_ARC_TECH: arc_tech_AB, net.KEY_ARC_UND: False}) - - #************************************************************************** - + node_key_a="A", + node_key_b="C", + arc_key_ab="AC", + data_dict={net.KEY_ARC_TECH: arc_tech_AB, net.KEY_ARC_UND: False}, + ) + + # ************************************************************************** + # add import node - + imp_resource_price = ResourcePrice( - prices=[random.random() - for k in range(number_time_intervals)], - volumes=[ *[random.random() for k in range(number_time_intervals-1)], None] - ) - - net.add_import_node(node_key='G', prices={(0,0,0): imp_resource_price}) + prices=[random.random() for k in range(number_time_intervals)], + volumes=[*[random.random() for k in range(number_time_intervals - 1)], None], + ) + + net.add_import_node(node_key="G", prices={(0, 0, 0): imp_resource_price}) # add export node - + exp_resource_price = ResourcePrice( - prices=[random.random() - for k in range(number_time_intervals)], - volumes=[ *[random.random() for k in range(number_time_intervals-1)], None] - ) - - net.add_export_node(node_key='H', prices={(0,0,0): exp_resource_price}) - - net.add_waypoint_node(node_key='Z') - - base_flow = { - (i,j): random.random() - for i in range(3) - for j in range(4) - } - - net.add_source_sink_node(node_key='Y', base_flow=base_flow) - - base_flow[(2,3)] = random.random() - - net.modify_network_node( - node_key='Y', - node_data={net.KEY_NODE_BASE_FLOW: base_flow} - ) - + prices=[random.random() for k in range(number_time_intervals)], + volumes=[*[random.random() for k in range(number_time_intervals - 1)], None], + ) + + net.add_export_node(node_key="H", prices={(0, 0, 0): exp_resource_price}) + + net.add_waypoint_node(node_key="Z") + + base_flow = {(i, j): random.random() for i in range(3) for j in range(4)} + + net.add_source_sink_node(node_key="Y", base_flow=base_flow) + + base_flow[(2, 3)] = random.random() + + net.modify_network_node(node_key="Y", node_data={net.KEY_NODE_BASE_FLOW: base_flow}) + net.identify_node_types() - - assert 'Z' in net.waypoint_nodes - - assert 'G' in net.import_nodes - - assert 'H' in net.export_nodes - - assert 'Y' in net.source_sink_nodes - - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** + + assert "Z" in net.waypoint_nodes + + assert "G" in net.import_nodes + + assert "H" in net.export_nodes + + assert "Y" in net.source_sink_nodes + + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def example_arcs_without_losses(): - # test arc without (static and proportional) losses - + arc_tech = ArcsWithoutLosses( - name='AB', - capacity=(1,2,3), - minimum_cost=(4,5,6), - specific_capacity_cost=6, + name="AB", + capacity=(1, 2, 3), + minimum_cost=(4, 5, 6), + specific_capacity_cost=6, capacity_is_instantaneous=False, - validate=True - ) - + validate=True, + ) + assert not arc_tech.has_proportional_losses() assert not arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + # test arc without static losses - + arc_tech = ArcsWithoutStaticLosses( - name='AB', - efficiency={(0,0):1, (0,1):0.9, (0,2):0.8}, - efficiency_reverse=None, - capacity=(1,2,3), - minimum_cost=(4,5,6), - specific_capacity_cost=6, + name="AB", + efficiency={(0, 0): 1, (0, 1): 0.9, (0, 2): 0.8}, + efficiency_reverse=None, + capacity=(1, 2, 3), + minimum_cost=(4, 5, 6), + specific_capacity_cost=6, capacity_is_instantaneous=False, - validate=True - ) - + validate=True, + ) + assert arc_tech.has_proportional_losses() assert not arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + # test arc without proportional losses - + arc_tech = ArcsWithoutProportionalLosses( - name='AB', - static_loss={(0,0,0):0.1, (0,0,1):0.2, (0,0,2):0.3, - (1,0,0):0.15, (1,0,1):0.25, (1,0,2):0.35, - (2,0,0):0.16, (2,0,1):0.26, (2,0,2):0.36}, - capacity=(1,2,3), - minimum_cost=(4,5,6), - specific_capacity_cost=6, + name="AB", + static_loss={ + (0, 0, 0): 0.1, + (0, 0, 1): 0.2, + (0, 0, 2): 0.3, + (1, 0, 0): 0.15, + (1, 0, 1): 0.25, + (1, 0, 2): 0.35, + (2, 0, 0): 0.16, + (2, 0, 1): 0.26, + (2, 0, 2): 0.36, + }, + capacity=(1, 2, 3), + minimum_cost=(4, 5, 6), + specific_capacity_cost=6, capacity_is_instantaneous=False, - validate=True - ) - + validate=True, + ) + assert not arc_tech.has_proportional_losses() assert arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - -#****************************************************************************** -#****************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def examples_modifying_nodes(): - - #************************************************************************** - + # ************************************************************************** + net = Network() - + number_intervals = 3 - + resource_price = ResourcePrice( - prices=[random.random() for k in range(number_intervals)], - volumes=[ - *[random.random() for k in range(number_intervals-1)], None - ] - ) - - base_flow = { - (0,k): random.random() - for k in range(number_intervals)} - + prices=[random.random() for k in range(number_intervals)], + volumes=[*[random.random() for k in range(number_intervals - 1)], None], + ) + + base_flow = {(0, k): random.random() for k in range(number_intervals)} + arc_tech = ArcsWithoutLosses( - name='hello', - capacity=[5], - minimum_cost=[3], - specific_capacity_cost=3, - capacity_is_instantaneous=False - ) - + name="hello", + capacity=[5], + minimum_cost=[3], + specific_capacity_cost=3, + capacity_is_instantaneous=False, + ) + # add isolated import node - - net.add_import_node(node_key='I_iso', - prices={(0,0,0): resource_price}) - + + net.add_import_node(node_key="I_iso", prices={(0, 0, 0): resource_price}) + # add import node with outgoing arcs - net.add_import_node(node_key='I', - prices={(0,0,0): resource_price}) - + net.add_import_node(node_key="I", prices={(0, 0, 0): resource_price}) + # add isolated export node - - net.add_import_node(node_key='E_iso', - prices={(0,0,0): resource_price}) - + + net.add_import_node(node_key="E_iso", prices={(0, 0, 0): resource_price}) + # add export node with incoming arcs - net.add_export_node(node_key='E', - prices={(0,0,0): resource_price}) - + net.add_export_node(node_key="E", prices={(0, 0, 0): resource_price}) + # add isolated normal node - - net.add_source_sink_node(node_key='A_iso', - base_flow=base_flow) + + net.add_source_sink_node(node_key="A_iso", base_flow=base_flow) # add normal node with incoming arcs - - net.add_source_sink_node(node_key='A_in', - base_flow=base_flow) + + net.add_source_sink_node(node_key="A_in", base_flow=base_flow) # add normal node with outgoing arcs - - net.add_source_sink_node(node_key='A_out', - base_flow=base_flow) + + net.add_source_sink_node(node_key="A_out", base_flow=base_flow) # add normal node with incoming and outgoing arcs - - net.add_source_sink_node(node_key='A', - base_flow=base_flow) - - #************************************************************************** - + + net.add_source_sink_node(node_key="A", base_flow=base_flow) + + # ************************************************************************** + # arcs - - net.add_directed_arc(node_key_a='I', - node_key_b='A_in', - arcs=arc_tech) - - net.add_directed_arc(node_key_a='I', - node_key_b='A', - arcs=arc_tech) - - net.add_directed_arc(node_key_a='A_out', - node_key_b='E', - arcs=arc_tech) - - net.add_directed_arc(node_key_a='A', - node_key_b='E', - arcs=arc_tech) - - #************************************************************************** - + + net.add_directed_arc(node_key_a="I", node_key_b="A_in", arcs=arc_tech) + + net.add_directed_arc(node_key_a="I", node_key_b="A", arcs=arc_tech) + + net.add_directed_arc(node_key_a="A_out", node_key_b="E", arcs=arc_tech) + + net.add_directed_arc(node_key_a="A", node_key_b="E", arcs=arc_tech) + + # ************************************************************************** + # change I_iso to regular: okay - + net.modify_network_node( - node_key='I_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="I_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='I_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="I_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # change I_iso to export: okay - + net.modify_network_node( - node_key='I_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="I_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='I_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="I_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # change I_iso to waypoint: okay - + net.modify_network_node( - node_key='I_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY}) - + node_key="I_iso", node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY} + ) + # reverse: okay - + net.modify_network_node( - node_key='I_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price}) - - #************************************************************************** - + node_key="I_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + + # ************************************************************************** + # change E_iso to regular: okay - + net.modify_network_node( - node_key='E_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="E_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='E_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="E_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # change E_iso to import: okay - + net.modify_network_node( - node_key='E_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="E_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='E_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="E_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # change E_iso to waypoint: okay - + net.modify_network_node( - node_key='E_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY}) - + node_key="E_iso", node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY} + ) + # reverse: okay - + net.modify_network_node( - node_key='E_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price}) - - #************************************************************************** - + node_key="E_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + + # ************************************************************************** + # change A_iso to export: okay - + net.modify_network_node( - node_key='A_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="A_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='A_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="A_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # change A_iso to import: okay - + net.modify_network_node( - node_key='A_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="A_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='A_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="A_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # change A_iso to waypoint: okay - + net.modify_network_node( - node_key='A_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY}) - + node_key="A_iso", node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY} + ) + # reverse: okay - + net.modify_network_node( - node_key='A_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) + node_key="A_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + + # ************************************************************************** - #************************************************************************** - # change I to regular: okay - + net.modify_network_node( - node_key='I', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="I", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='I', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="I", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # change I to waypoint: okay - + net.modify_network_node( - node_key='I', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY}) - + node_key="I", node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY} + ) + # reverse: okay - + net.modify_network_node( - node_key='I', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price}) + node_key="I", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + + # ************************************************************************** - #************************************************************************** - # change E to regular: okay - + net.modify_network_node( - node_key='E', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="E", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='E', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="E", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # change E to waypoint: okay - + net.modify_network_node( - node_key='E', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY}) - + node_key="E", node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY} + ) + # reverse: okay - + net.modify_network_node( - node_key='E', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price}) + node_key="E", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + + # ************************************************************************** - #************************************************************************** - # change A_in to export: okay - + net.modify_network_node( - node_key='A_in', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="A_in", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='A_in', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="A_in", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # change A_in to waypoint: okay - + net.modify_network_node( - node_key='A_in', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY}) - + node_key="A_in", node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY} + ) + # reverse: okay - + net.modify_network_node( - node_key='A_in', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - - #************************************************************************** - + node_key="A_in", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + + # ************************************************************************** + # change A_out to import: okay - + net.modify_network_node( - node_key='A_out', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="A_out", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='A_out', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="A_out", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # change A_out to waypoint: okay - + net.modify_network_node( - node_key='A_out', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY}) - + node_key="A_out", node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY} + ) + # reverse: okay - + net.modify_network_node( - node_key='A_out', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) + node_key="A_out", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + + # ************************************************************************** - #************************************************************************** - # change I to export: fail - + error_triggered = False try: net.modify_network_node( - node_key='I', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price} - ) + node_key="I", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) except ValueError: error_triggered = True assert error_triggered - + # change E to import: fail - + error_triggered = False try: net.modify_network_node( - node_key='E', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price} - ) + node_key="E", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) except ValueError: error_triggered = True assert error_triggered - + # change A_out to export: fail - + error_triggered = False try: net.modify_network_node( - node_key='A_out', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price} - ) + node_key="A_out", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) except ValueError: error_triggered = True assert error_triggered - + # change A_in to import: fail - + error_triggered = False try: net.modify_network_node( - node_key='A_in', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price} - ) + node_key="A_in", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) except ValueError: error_triggered = True assert error_triggered - + # change A to export: fail - + error_triggered = False try: net.modify_network_node( - node_key='A', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price} - ) + node_key="A", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) except ValueError: error_triggered = True assert error_triggered - + # change A to import: fail - + error_triggered = False try: net.modify_network_node( - node_key='A', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price} - ) + node_key="A", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) except ValueError: error_triggered = True assert error_triggered - #************************************************************************** - + # ************************************************************************** + # try to modify a non-existent node - + error_triggered = False try: net.modify_network_node( - node_key='ABCD', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY} - ) + node_key="ABCD", node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY} + ) except ValueError: error_triggered = True assert error_triggered - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def examples_network_disallowed_cases(): - - #************************************************************************** - + # ************************************************************************** + net = Network() - + number_intervals = 3 - + resource_price = ResourcePrice( - prices=[random.random() for k in range(number_intervals)], - volumes=[ - *[random.random() for k in range(number_intervals-1)], None - ] - ) - - base_flow = { - (0,k): random.random() - for k in range(number_intervals)} - + prices=[random.random() for k in range(number_intervals)], + volumes=[*[random.random() for k in range(number_intervals - 1)], None], + ) + + base_flow = {(0, k): random.random() for k in range(number_intervals)} + lossless_arcs = ArcsWithoutLosses( - name='hello', - capacity=[5], - minimum_cost=[3], - specific_capacity_cost=3, - capacity_is_instantaneous=False - ) - + name="hello", + capacity=[5], + minimum_cost=[3], + specific_capacity_cost=3, + capacity_is_instantaneous=False, + ) + lossy_arcs = ArcsWithoutProportionalLosses( - name='hello back', - static_loss={ - (0,0,k): random.random() - for k in range(number_intervals) - }, + name="hello back", + static_loss={(0, 0, k): random.random() for k in range(number_intervals)}, capacity=(1,), - minimum_cost=(5,), + minimum_cost=(5,), specific_capacity_cost=0, - capacity_is_instantaneous=False - ) - + capacity_is_instantaneous=False, + ) + # add import node I - - net.add_import_node(node_key='I', - prices={(0,0,0): resource_price}) - + + net.add_import_node(node_key="I", prices={(0, 0, 0): resource_price}) + # add export node E - - net.add_export_node(node_key='E', - prices={(0,0,0): resource_price}) - + + net.add_export_node(node_key="E", prices={(0, 0, 0): resource_price}) + # add regular node A - - net.add_source_sink_node(node_key='A', - base_flow=base_flow) - + + net.add_source_sink_node(node_key="A", base_flow=base_flow) + # add regular node B - - net.add_source_sink_node(node_key='B', - base_flow=base_flow) - + + net.add_source_sink_node(node_key="B", base_flow=base_flow) + # add a valid import-export arc - - net.add_directed_arc(node_key_a='I', - node_key_b='E', - arcs=lossless_arcs) - + + net.add_directed_arc(node_key_a="I", node_key_b="E", arcs=lossless_arcs) + # identify the nodes and validate - + net.identify_node_types() - - #************************************************************************** - #************************************************************************** - + + # ************************************************************************** + # ************************************************************************** + # trigger errors using pre-identified nodes - + # directed arcs cannot start in an export node: E -> B - + error_triggered = False try: - net.add_directed_arc(node_key_a='E', - node_key_b='B', - arcs=lossless_arcs) + net.add_directed_arc(node_key_a="E", node_key_b="B", arcs=lossless_arcs) except ValueError: error_triggered = True assert error_triggered - + # directed arcs cannot end on an import node: A -> I - + error_triggered = False try: - net.add_directed_arc(node_key_a='A', - node_key_b='I', - arcs=lossless_arcs) + net.add_directed_arc(node_key_a="A", node_key_b="I", arcs=lossless_arcs) except ValueError: error_triggered = True assert error_triggered - + # import-export nodes cannot have static losses - + error_triggered = False try: - net.add_directed_arc(node_key_a='I', - node_key_b='E', - arcs=lossy_arcs) + net.add_directed_arc(node_key_a="I", node_key_b="E", arcs=lossy_arcs) except ValueError: error_triggered = True assert error_triggered - + # undirected arcs cannot involve import nor export nodes - + error_triggered = False try: - net.add_undirected_arc(node_key_a='I', - node_key_b='A', - arcs=lossless_arcs) + net.add_undirected_arc(node_key_a="I", node_key_b="A", arcs=lossless_arcs) except ValueError: error_triggered = True assert error_triggered - + # undirected arcs cannot involve import nor export nodes - + error_triggered = False try: - net.add_undirected_arc(node_key_a='B', - node_key_b='E', - arcs=lossless_arcs) + net.add_undirected_arc(node_key_a="B", node_key_b="E", arcs=lossless_arcs) except ValueError: error_triggered = True assert error_triggered - - #************************************************************************** - #************************************************************************** - + + # ************************************************************************** + # ************************************************************************** + # trigger errors using non-identified nodes - - #************************************************************************** - + + # ************************************************************************** + # create a new export node - - net.add_export_node(node_key='E1', - prices={(0,0,0): resource_price}) - + + net.add_export_node(node_key="E1", prices={(0, 0, 0): resource_price}) + # create an arc starting in that export node - + error_triggered = False try: - net.add_directed_arc(node_key_a='E1', - node_key_b='B', - arcs=lossless_arcs) + net.add_directed_arc(node_key_a="E1", node_key_b="B", arcs=lossless_arcs) net.identify_node_types() except ValueError: error_triggered = True assert error_triggered - + # remove the troublesome arc - - net.remove_edge(u='E1', v='B') - #************************************************************************** - + net.remove_edge(u="E1", v="B") + + # ************************************************************************** + # create a new import node - - net.add_import_node(node_key='I1', - prices={(0,0,0): resource_price}) - + + net.add_import_node(node_key="I1", prices={(0, 0, 0): resource_price}) + # create an arc ending in that import node - + error_triggered = False try: - net.add_directed_arc(node_key_a='A', - node_key_b='I1', - arcs=lossless_arcs) + net.add_directed_arc(node_key_a="A", node_key_b="I1", arcs=lossless_arcs) net.identify_node_types() except ValueError: error_triggered = True assert error_triggered - + # remove the troublesome arc - - net.remove_edge(u='A', v='I1') - - #************************************************************************** - + + net.remove_edge(u="A", v="I1") + + # ************************************************************************** + # check non-existent arc - - net.arc_is_undirected(('X','Y', 1)) - -#****************************************************************************** -#****************************************************************************** + + net.arc_is_undirected(("X", "Y", 1)) + + +# ****************************************************************************** +# ****************************************************************************** + def examples_pseudo_unique_key_generation(): - # create network - + network = Network() - + # add node A - - network.add_waypoint_node(node_key='A') - + + network.add_waypoint_node(node_key="A") + # add node B - - network.add_waypoint_node(node_key='B') - + + network.add_waypoint_node(node_key="B") + # identify nodes - + network.identify_node_types() - + # add arcs - - key_list = ['3e225573-4e78-48c8-bb08-efbeeb795c22', - 'f6d30428-15d1-41e9-a952-0742eaaa5a31', - '8c29b906-2518-41c5-ada8-07b83508b5b8', - 'f9a72a39-1422-4a02-af97-906ce79c32a3', - 'b6941a48-10cc-465d-bf53-178bd2939bd1'] - + + key_list = [ + "3e225573-4e78-48c8-bb08-efbeeb795c22", + "f6d30428-15d1-41e9-a952-0742eaaa5a31", + "8c29b906-2518-41c5-ada8-07b83508b5b8", + "f9a72a39-1422-4a02-af97-906ce79c32a3", + "b6941a48-10cc-465d-bf53-178bd2939bd1", + ] + for key in key_list: - network.add_edge( - u_for_edge='A', - v_for_edge='B', + u_for_edge="A", + v_for_edge="B", key=key, - **{network.KEY_ARC_UND: False, - network.KEY_ARC_TECH: None} - ) - + **{network.KEY_ARC_UND: False, network.KEY_ARC_TECH: None} + ) + # use a seed number to trigger more iterations - + import uuid + rand = random.Random() rand.seed(360) uuid.uuid4 = lambda: uuid.UUID(int=rand.getrandbits(128), version=4) - + error_triggered = False try: _ = network.get_pseudo_unique_arc_key( - node_key_start='A', - node_key_end='B', - max_iterations=len(key_list)-1) + node_key_start="A", node_key_end="B", max_iterations=len(key_list) - 1 + ) except Exception: error_triggered = True assert error_triggered - - #************************************************************************** - #************************************************************************** -#****************************************************************************** -#****************************************************************************** \ No newline at end of file + # ************************************************************************** + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** diff --git a/tests/examples_esipp_problem.py b/tests/examples_esipp_problem.py index d541fc9..8c480f9 100644 --- a/tests/examples_esipp_problem.py +++ b/tests/examples_esipp_problem.py @@ -15,7 +15,7 @@ import random # import networkx as nx import pyomo.environ as pyo - + # import src.topupopt.problems.esipp.utils as utils from src.topupopt.data.misc.utils import generate_pseudo_unique_key @@ -30,63 +30,62 @@ from src.topupopt.problems.esipp.resource import ResourcePrice from src.topupopt.problems.esipp.utils import compute_cost_volume_metrics -#****************************************************************************** -#****************************************************************************** +# ****************************************************************************** +# ****************************************************************************** + + +def examples(solver: str, solver_options: dict = None, init_aux_sets: bool = False): + # ************************************************************************** -def examples(solver: str, - solver_options: dict = None, - init_aux_sets: bool = False): - - #************************************************************************** - # solver details - + # termination criteria - + solver_timelimit = 60 - + solver_abs_mip_gap = 0.001 - + solver_rel_mip_gap = 0.01 if type(solver_options) == dict: - - solver_options.update({ - 'time_limit':solver_timelimit, - 'relative_mip_gap':solver_rel_mip_gap, - 'absolute_mip_gap':solver_abs_mip_gap - }) - + solver_options.update( + { + "time_limit": solver_timelimit, + "relative_mip_gap": solver_rel_mip_gap, + "absolute_mip_gap": solver_abs_mip_gap, + } + ) + else: - solver_options = { - 'time_limit':solver_timelimit, - 'relative_mip_gap':solver_rel_mip_gap, - 'absolute_mip_gap':solver_abs_mip_gap - } - - #************************************************************************** - + "time_limit": solver_timelimit, + "relative_mip_gap": solver_rel_mip_gap, + "absolute_mip_gap": solver_abs_mip_gap, + } + + # ************************************************************************** + # problem with two scenarios - + example_single_network_single_arc_problem_two_scenarios( - solver=solver, - solver_options=solver_options, - irregular_time_intervals=False, - use_sos_arcs=False, - sos_weight_key=None, - use_real_variables_if_possible=False, - use_sos_sense=False, - sense_sos_weight_key=None, - sense_use_real_variables_if_possible=False, - use_arc_interfaces=False, - print_model=False, - init_aux_sets=init_aux_sets) - - #************************************************************************** - + solver=solver, + solver_options=solver_options, + irregular_time_intervals=False, + use_sos_arcs=False, + sos_weight_key=None, + use_real_variables_if_possible=False, + use_sos_sense=False, + sense_sos_weight_key=None, + sense_use_real_variables_if_possible=False, + use_arc_interfaces=False, + print_model=False, + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # problem with one import node, one regular node and one arc - + example_single_network_single_arc_problem( solver=solver, solver_options=solver_options, @@ -99,12 +98,13 @@ def examples(solver: str, sense_use_real_variables_if_possible=False, use_arc_interfaces=False, print_model=False, - init_aux_sets=init_aux_sets) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # problem with two symmetrical nodes and one undirected arc - + example_isolated_undirected_network( solver=solver, solver_options=solver_options, @@ -117,10 +117,11 @@ def examples(solver: str, sense_use_real_variables_if_possible=False, use_arc_interfaces=False, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # problem with symmetrical nodes and one undirected arc with diff. tech. - + example_isolated_undirected_network_diff_tech( solver=solver, solver_options=solver_options, @@ -133,10 +134,11 @@ def examples(solver: str, sense_use_real_variables_if_possible=False, use_arc_interfaces=False, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # problem with symmetrical nodes and one undirected arc, irregular steps - + example_isolated_undirected_network( solver=solver, solver_options=solver_options, @@ -149,10 +151,11 @@ def examples(solver: str, sense_use_real_variables_if_possible=False, use_arc_interfaces=False, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # same problem as the previous one, except with interface variables - + example_isolated_undirected_network( solver=solver, solver_options=solver_options, @@ -165,12 +168,13 @@ def examples(solver: str, sense_use_real_variables_if_possible=False, use_arc_interfaces=True, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # problem with two symmetrical nodes and one undirected arc, w/ simple sos1 - + sos_weight_key = None - + example_isolated_undirected_network( solver=solver, solver_options=solver_options, @@ -183,10 +187,11 @@ def examples(solver: str, sense_use_real_variables_if_possible=False, use_arc_interfaces=False, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + sos_weight_key = InfrastructurePlanningProblem.SOS1_ARC_WEIGHTS_COST - + example_isolated_undirected_network( solver=solver, solver_options=solver_options, @@ -199,10 +204,11 @@ def examples(solver: str, sense_use_real_variables_if_possible=False, use_arc_interfaces=False, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + sos_weight_key = InfrastructurePlanningProblem.SOS1_ARC_WEIGHTS_CAP - + example_isolated_undirected_network( solver=solver, solver_options=solver_options, @@ -215,10 +221,11 @@ def examples(solver: str, sense_use_real_variables_if_possible=False, use_arc_interfaces=False, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + sos_weight_key = InfrastructurePlanningProblem.SOS1_ARC_WEIGHTS_SPEC_COST - + example_isolated_undirected_network( solver=solver, solver_options=solver_options, @@ -231,10 +238,11 @@ def examples(solver: str, sense_use_real_variables_if_possible=False, use_arc_interfaces=False, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + sos_weight_key = InfrastructurePlanningProblem.SOS1_ARC_WEIGHTS_SPEC_CAP - + example_isolated_undirected_network( solver=solver, solver_options=solver_options, @@ -247,10 +255,11 @@ def examples(solver: str, sense_use_real_variables_if_possible=False, use_arc_interfaces=False, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + sos_weight_key = InfrastructurePlanningProblem.SOS1_ARC_WEIGHTS_SPEC_CAP - + example_isolated_undirected_network( solver=solver, solver_options=solver_options, @@ -263,14 +272,15 @@ def examples(solver: str, sense_use_real_variables_if_possible=False, use_arc_interfaces=True, print_model=False, - init_aux_sets=init_aux_sets) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # use sos1 for flow sense determination, nominal weights sos_weight_key = InfrastructurePlanningProblem.SOS1_SENSE_WEIGHT_NOMINAL_HIGHER - + example_isolated_undirected_network( solver=solver, solver_options=solver_options, @@ -283,12 +293,13 @@ def examples(solver: str, sense_use_real_variables_if_possible=False, use_arc_interfaces=False, print_model=False, - init_aux_sets=init_aux_sets) + init_aux_sets=init_aux_sets, + ) # use sos1 for flow sense determination, reverse weights - + sos_weight_key = InfrastructurePlanningProblem.SOS1_SENSE_WEIGHT_REVERSE_HIGHER - + example_isolated_undirected_network( solver=solver, solver_options=solver_options, @@ -301,12 +312,13 @@ def examples(solver: str, sense_use_real_variables_if_possible=False, use_arc_interfaces=False, print_model=False, - init_aux_sets=init_aux_sets) + init_aux_sets=init_aux_sets, + ) # use sos1 for flow sense determination, use real variables - + sos_weight_key = InfrastructurePlanningProblem.SOS1_SENSE_WEIGHT_NOMINAL_HIGHER - + example_isolated_undirected_network( solver=solver, solver_options=solver_options, @@ -319,12 +331,13 @@ def examples(solver: str, sense_use_real_variables_if_possible=True, use_arc_interfaces=False, print_model=False, - init_aux_sets=init_aux_sets) + init_aux_sets=init_aux_sets, + ) # use sos1 for flow sense determination, use real variables and inter. var. - + sos_weight_key = InfrastructurePlanningProblem.SOS1_SENSE_WEIGHT_NOMINAL_HIGHER - + example_isolated_undirected_network( solver=solver, solver_options=solver_options, @@ -337,16 +350,17 @@ def examples(solver: str, sense_use_real_variables_if_possible=True, use_arc_interfaces=True, print_model=False, - init_aux_sets=init_aux_sets) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # sos1 for flow sense determination involving directed arcs as well - + sos_weight_key = InfrastructurePlanningProblem.SOS1_SENSE_WEIGHT_NOMINAL_HIGHER - + example_nonisolated_undirected_network( - solver=solver, + solver=solver, solver_options=solver_options, different_technologies=False, irregular_time_intervals=False, @@ -360,12 +374,13 @@ def examples(solver: str, undirected_arc_imports=False, undirected_arc_exports=False, print_model=False, - init_aux_sets=init_aux_sets) + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** - #************************************************************************** - # preexisting, reference - + example_isolated_preexisting_undirected_network( solver=solver, solver_options=solver_options, @@ -380,8 +395,9 @@ def examples(solver: str, use_arc_interfaces=False, capacity_is_instantaneous=False, use_specific_method=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # capacity is instantaneous example_isolated_preexisting_undirected_network( @@ -398,8 +414,9 @@ def examples(solver: str, use_arc_interfaces=False, capacity_is_instantaneous=True, use_specific_method=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # use dedicated method for preexisting arcs example_isolated_preexisting_undirected_network( @@ -416,10 +433,11 @@ def examples(solver: str, use_arc_interfaces=False, capacity_is_instantaneous=False, use_specific_method=True, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # capacity is instantaneous, using dedicated method - + example_isolated_preexisting_undirected_network( solver=solver, solver_options=solver_options, @@ -434,10 +452,11 @@ def examples(solver: str, use_arc_interfaces=False, capacity_is_instantaneous=True, use_specific_method=True, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # use different technologies for the undirected arc - + example_isolated_preexisting_undirected_network( solver=solver, solver_options=solver_options, @@ -452,8 +471,9 @@ def examples(solver: str, use_arc_interfaces=False, capacity_is_instantaneous=False, use_specific_method=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # use different technologies for the undirected arc, capacity is instant. example_isolated_preexisting_undirected_network( @@ -470,8 +490,9 @@ def examples(solver: str, use_arc_interfaces=False, capacity_is_instantaneous=True, use_specific_method=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # use different technologies for the undirected arc, using specific method example_isolated_preexisting_undirected_network( @@ -488,10 +509,11 @@ def examples(solver: str, use_arc_interfaces=False, capacity_is_instantaneous=False, use_specific_method=True, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # same as before but assuming the capacity is instantaneous - + example_isolated_preexisting_undirected_network( solver=solver, solver_options=solver_options, @@ -506,12 +528,13 @@ def examples(solver: str, use_arc_interfaces=False, capacity_is_instantaneous=True, use_specific_method=True, - init_aux_sets=init_aux_sets) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # problem with two symmetrical nodes, one undirected arc, imports and exp. - + example_nonisolated_undirected_network( solver=solver, solver_options=solver_options, @@ -527,10 +550,11 @@ def examples(solver: str, undirected_arc_imports=False, undirected_arc_exports=False, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # same problem as before buth with interface variables - + example_nonisolated_undirected_network( solver=solver, solver_options=solver_options, @@ -546,10 +570,11 @@ def examples(solver: str, undirected_arc_imports=False, undirected_arc_exports=False, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # same problem as before buth with different technologies for the und. arc - + example_nonisolated_undirected_network( solver=solver, solver_options=solver_options, @@ -565,12 +590,13 @@ def examples(solver: str, undirected_arc_imports=False, undirected_arc_exports=False, print_model=False, - init_aux_sets=init_aux_sets) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # preexisting directed arcs, undirected with same tech. in both directions - + example_nonisolated_network_preexisting_directed_arcs( solver=solver, solver_options=solver_options, @@ -583,10 +609,11 @@ def examples(solver: str, sense_sos_weight_key=None, sense_use_real_variables_if_possible=False, use_arc_interfaces=False, - init_aux_sets=init_aux_sets) + init_aux_sets=init_aux_sets, + ) # preexisting directed arcs - + example_nonisolated_network_preexisting_directed_arcs( solver=solver, solver_options=solver_options, @@ -599,12 +626,13 @@ def examples(solver: str, sense_sos_weight_key=None, sense_use_real_variables_if_possible=False, use_arc_interfaces=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # same as before but with sos for arc selection and interfaces sos_weight_key = InfrastructurePlanningProblem.SOS1_ARC_WEIGHTS_SPEC_CAP - + example_nonisolated_network_preexisting_directed_arcs( solver=solver, solver_options=solver_options, @@ -617,12 +645,13 @@ def examples(solver: str, sense_sos_weight_key=None, sense_use_real_variables_if_possible=False, use_arc_interfaces=True, - init_aux_sets=init_aux_sets) + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** - #************************************************************************** - # test using preexisting infinite capacity arcs - + example_preexisting_infinite_capacity_directed_arcs( solver=solver, solver_options=solver_options, @@ -635,8 +664,9 @@ def examples(solver: str, sense_sos_weight_key=None, sense_use_real_variables_if_possible=False, use_arc_interfaces=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + example_preexisting_infinite_capacity_directed_arcs( solver=solver, solver_options=solver_options, @@ -649,14 +679,15 @@ def examples(solver: str, sense_sos_weight_key=None, sense_use_real_variables_if_possible=False, use_arc_interfaces=False, - init_aux_sets=init_aux_sets) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # test using mandatory arcs with directed arcs - + example_network_mandatory_arcs( - solver=solver, + solver=solver, solver_options=solver_options, different_technologies=True, irregular_time_intervals=False, @@ -669,12 +700,13 @@ def examples(solver: str, use_arc_interfaces=False, use_undirected_arcs=False, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # test using mandatory arcs with directed arcs and sos1 for arc selection - + example_network_mandatory_arcs( - solver=solver, + solver=solver, solver_options=solver_options, different_technologies=True, irregular_time_intervals=False, @@ -687,12 +719,13 @@ def examples(solver: str, use_arc_interfaces=False, use_undirected_arcs=False, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # test using mandatory arcs with undirected arcs - + example_network_mandatory_arcs( - solver=solver, + solver=solver, solver_options=solver_options, different_technologies=True, irregular_time_intervals=False, @@ -705,12 +738,13 @@ def examples(solver: str, use_arc_interfaces=False, use_undirected_arcs=True, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # test using mandatory arcs with undirected arcs and sos1 for arc selection - + example_network_mandatory_arcs( - solver=solver, + solver=solver, solver_options=solver_options, different_technologies=True, irregular_time_intervals=False, @@ -723,16 +757,17 @@ def examples(solver: str, use_arc_interfaces=False, use_undirected_arcs=True, print_model=False, - init_aux_sets=init_aux_sets) + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** - #************************************************************************** - # test using static losses with directed arcs - + # using only arc technologies with fixed losses (upstream, if possible) - + example_directed_network_static_losses( - solver=solver, + solver=solver, solver_options=solver_options, irregular_time_intervals=False, use_sos_arcs=False, @@ -747,12 +782,13 @@ def examples(solver: str, use_arc_techs_without_fixed_losses=False, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # using only arc technologies without fixed losses - + example_directed_network_static_losses( - solver=solver, + solver=solver, solver_options=solver_options, irregular_time_intervals=False, use_sos_arcs=False, @@ -767,12 +803,13 @@ def examples(solver: str, use_arc_techs_without_fixed_losses=True, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # using arc technologies with and without fixed losses simultaneously - + example_directed_network_static_losses( - solver=solver, + solver=solver, solver_options=solver_options, irregular_time_intervals=False, use_sos_arcs=False, @@ -787,12 +824,13 @@ def examples(solver: str, use_arc_techs_without_fixed_losses=True, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # using only arc technologies without fixed losses (yet downstream?) - + example_directed_network_static_losses( - solver=solver, + solver=solver, solver_options=solver_options, irregular_time_intervals=False, use_sos_arcs=False, @@ -807,12 +845,13 @@ def examples(solver: str, use_arc_techs_without_fixed_losses=True, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_ARR, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # using only arc technologies with fixed losses (downstream, if possible) - + example_directed_network_static_losses( - solver=solver, + solver=solver, solver_options=solver_options, irregular_time_intervals=False, use_sos_arcs=False, @@ -827,202 +866,218 @@ def examples(solver: str, use_arc_techs_without_fixed_losses=False, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_ARR, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # example from the report: new directed arc with losses in the source - + example_report_directed_network_static_losses( - solver=solver, + solver=solver, solver_options=solver_options, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP, use_new_arcs=True, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # example from the report: new directed arc with losses in the source - + example_report_directed_network_static_losses( - solver=solver, + solver=solver, solver_options=solver_options, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP, use_new_arcs=True, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # example from the report: pre-existing directed arc with losses in the end - + example_report_directed_network_static_losses( - solver=solver, + solver=solver, solver_options=solver_options, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_ARR, use_new_arcs=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # example from the report: pre-existing directed arc with losses in the source - + example_report_directed_network_static_losses( - solver=solver, + solver=solver, solver_options=solver_options, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP, use_new_arcs=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # pre-existing directed arcs with losses downstream - + example_directed_arc_static_downstream_pre( - solver=solver, - solver_options=solver_options, - init_aux_sets=init_aux_sets) - + solver=solver, solver_options=solver_options, init_aux_sets=init_aux_sets + ) + # new directed arcs with losses downstream - + example_directed_arc_static_downstream_new( - solver=solver, - solver_options=solver_options, - init_aux_sets=init_aux_sets) - + solver=solver, solver_options=solver_options, init_aux_sets=init_aux_sets + ) + # new directed arcs with losses upstream - + example_directed_arc_static_upstream( - solver=solver, - solver_options=solver_options, - use_new_arcs=True, - init_aux_sets=init_aux_sets) - + solver=solver, + solver_options=solver_options, + use_new_arcs=True, + init_aux_sets=init_aux_sets, + ) + # pre-existing directed arcs with losses upstream - + example_directed_arc_static_upstream( - solver=solver, - solver_options=solver_options, - use_new_arcs=False, - init_aux_sets=init_aux_sets) - - #************************************************************************** - + solver=solver, + solver_options=solver_options, + use_new_arcs=False, + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # static losses on undirected arcs (example from the report) - + for mode in InfrastructurePlanningProblem.STATIC_LOSS_MODES: - # pre-existing arcs, original arc direction - + example_report_undirected_network_static_losses( solver=solver, solver_options=solver_options, static_losses_mode=mode, use_new_arcs=False, invert_original_direction=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # new arcs, original arc direction - + example_report_undirected_network_static_losses( solver=solver, solver_options=solver_options, static_losses_mode=mode, use_new_arcs=True, invert_original_direction=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # pre-existing arcs, inverted arc direction - + example_report_undirected_network_static_losses( solver=solver, solver_options=solver_options, static_losses_mode=mode, use_new_arcs=False, invert_original_direction=True, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # new arcs, inverted arc direction - + example_report_undirected_network_static_losses( solver=solver, solver_options=solver_options, static_losses_mode=mode, use_new_arcs=True, invert_original_direction=True, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # capacity reduction - + # pre-existing arcs, original arc direction - + example_undirected_arc_static_upstream( solver=solver, solver_options=solver_options, static_losses_mode=mode, use_new_arcs=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # new arcs, original arc direction - + example_undirected_arc_static_upstream( solver=solver, solver_options=solver_options, static_losses_mode=mode, use_new_arcs=True, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # pre-existing arcs, inverted arc direction - + example_undirected_arc_static_upstream( solver=solver, solver_options=solver_options, static_losses_mode=mode, use_new_arcs=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # new arcs, inverted arc direction - + example_undirected_arc_static_upstream( solver=solver, solver_options=solver_options, static_losses_mode=mode, use_new_arcs=True, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # minimum flow - + # pre-existing arcs, original arc direction - + example_undirected_arc_static_downstream( solver=solver, solver_options=solver_options, static_losses_mode=mode, use_new_arcs=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # new arcs, original arc direction - + example_undirected_arc_static_downstream( solver=solver, solver_options=solver_options, static_losses_mode=mode, use_new_arcs=True, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # pre-existing arcs, inverted arc direction - + example_undirected_arc_static_downstream( solver=solver, solver_options=solver_options, static_losses_mode=mode, use_new_arcs=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # new arcs, inverted arc direction - + example_undirected_arc_static_downstream( solver=solver, solver_options=solver_options, static_losses_mode=mode, use_new_arcs=True, - init_aux_sets=init_aux_sets) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # try network with a direct import-export arc, with higher import prices - + example_direct_imp_exp_network( - solver=solver, + solver=solver, solver_options=solver_options, irregular_time_intervals=False, use_sos_arcs=True, @@ -1036,12 +1091,13 @@ def examples(solver: str, use_static_losses=False, use_higher_export_prices=False, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # try network with a direct import-export arc, with higher export prices - + example_direct_imp_exp_network( - solver=solver, + solver=solver, solver_options=solver_options, irregular_time_intervals=False, use_sos_arcs=True, @@ -1055,14 +1111,15 @@ def examples(solver: str, use_static_losses=False, use_higher_export_prices=True, print_model=False, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # try network with a direct import-export arc (with static losses) - + error_triggered = False try: example_direct_imp_exp_network( - solver=solver, + solver=solver, solver_options=solver_options, irregular_time_intervals=False, use_sos_arcs=True, @@ -1076,17 +1133,18 @@ def examples(solver: str, use_static_losses=True, use_higher_export_prices=False, print_model=False, - init_aux_sets=init_aux_sets) + init_aux_sets=init_aux_sets, + ) except ValueError: error_triggered = True assert error_triggered - #************************************************************************** - + # ************************************************************************** + # test using undirected arcs involving import and export nodes - + # import nodes - + error_triggered = False try: example_nonisolated_undirected_network( @@ -1104,13 +1162,14 @@ def examples(solver: str, undirected_arc_imports=True, undirected_arc_exports=False, print_model=False, - init_aux_sets=init_aux_sets) + init_aux_sets=init_aux_sets, + ) except ValueError: error_triggered = True assert error_triggered - + # export nodes - + error_triggered = False try: example_nonisolated_undirected_network( @@ -1128,67 +1187,71 @@ def examples(solver: str, undirected_arc_imports=False, undirected_arc_exports=True, print_model=False, - init_aux_sets=init_aux_sets) + init_aux_sets=init_aux_sets, + ) except ValueError: error_triggered = True assert error_triggered - #************************************************************************** - #************************************************************************** - + # ************************************************************************** + # ************************************************************************** + # test using groups of arcs - + example_arc_groups_individual( - solver=solver, - solver_options=solver_options, - use_arc_groups=False, + solver=solver, + solver_options=solver_options, + use_arc_groups=False, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_ARR, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + example_arc_groups_individual( - solver=solver, - solver_options=solver_options, - use_arc_groups=True, + solver=solver, + solver_options=solver_options, + use_arc_groups=True, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_ARR, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + # TODO: perform additional tests involving groups of arcs - + for mode in InfrastructurePlanningProblem.STATIC_LOSS_MODES: - example_arc_groups_individual_undirected( solver=solver, solver_options=solver_options, use_arc_groups=False, static_losses_mode=mode, - init_aux_sets=init_aux_sets) - + init_aux_sets=init_aux_sets, + ) + example_arc_groups_individual_undirected( solver=solver, solver_options=solver_options, use_arc_groups=True, static_losses_mode=mode, - init_aux_sets=init_aux_sets) - - #************************************************************************** - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # ************************************************************************** + # test using a maximum number of parallel arcs - + # TODO: test using the restriction in different directions - + # there are 3 possible outcomes: - # 1) the number of preexisting and mandatory arcs is above the limit + # 1) the number of preexisting and mandatory arcs is above the limit # >> the problem is infeasible - # 2) maximum number of arcs lower than or equal to the limit + # 2) maximum number of arcs lower than or equal to the limit # >> the constraint is skipped # 3) maximum number of arcs above the limit, the number of preexisting and # mandatory arcs is below the limit >> the constraint is used - - #************************************************************************** - + + # ************************************************************************** + # TODO: test using the constraint - + # how to test case 3: # a) use only preexisting directed arcs # b) use only preexisting undirected arcs @@ -1205,11 +1268,11 @@ def examples(solver: str, # m) use preexi. directed arcs and mandatory directed and undirected arcs # n) use preexi. undirected arcs and mandatory directed and undirected arcs # o) use preselelected and mandatory directed and undirected arcs - - #************************************************************************** - + + # ************************************************************************** + # case 2: skip constraint - + # how to test case 2: # a) use only preexisting directed arcs # b) use only preexisting undirected arcs @@ -1229,49 +1292,49 @@ def examples(solver: str, # p) TODO: use pre-existing undirected arcs in both directions # q) TODO: use mandatory undirected arcs in both directions # r) TODO: use pre-existing and mandatory arcs in both directions - + # TODO: use groups of arcs - + skip_test_cases = ( - '2_a', - '2_b', - '2_c', - '2_d', - '2_e', - '2_f', - '2_g', - '2_h', - '2_i', - '2_j', - '2_k', - '2_l', - '2_m', - '2_n', - '2_o' - ) - + "2_a", + "2_b", + "2_c", + "2_d", + "2_e", + "2_f", + "2_g", + "2_h", + "2_i", + "2_j", + "2_k", + "2_l", + "2_m", + "2_n", + "2_o", + ) + for test_case in skip_test_cases: - example_problem_max_arc_limits_skip( - solver=solver, - solver_options=solver_options, - different_technologies=True, - irregular_time_intervals=False, - use_sos_arcs=False, - sos_weight_key=None, - use_real_variables_if_possible=False, - use_sos_sense=False, - sense_sos_weight_key=None, - sense_use_real_variables_if_possible=False, - use_arc_interfaces=False, - case=test_case, - print_model=False, - init_aux_sets=init_aux_sets) + solver=solver, + solver_options=solver_options, + different_technologies=True, + irregular_time_intervals=False, + use_sos_arcs=False, + sos_weight_key=None, + use_real_variables_if_possible=False, + use_sos_sense=False, + sense_sos_weight_key=None, + sense_use_real_variables_if_possible=False, + use_arc_interfaces=False, + case=test_case, + print_model=False, + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** - #************************************************************************** - # case 1: infeasible (too many mandatory or pre-existing arcs) - + # how to test case 1: # a) use only preexisting directed arcs # b) use only preexisting undirected arcs @@ -1293,443 +1356,407 @@ def examples(solver: str, # r) use mandatory and pre-existing undirected arcs in both directions # s) TODO: use groups of arcs that include mandatory arcs # t) TODO: use mandatory groups of arcs - + infeasible_test_cases = ( - '1_a', - '1_b', - '1_c', - '1_d', - '1_e', - '1_f', - '1_g', - '1_h', - '1_i', - '1_j', - '1_k', - '1_l', - '1_m', - '1_n', - '1_o', - '1_p', - '1_q', - '1_r', - '1_s', - '1_t' - ) - + "1_a", + "1_b", + "1_c", + "1_d", + "1_e", + "1_f", + "1_g", + "1_h", + "1_i", + "1_j", + "1_k", + "1_l", + "1_m", + "1_n", + "1_o", + "1_p", + "1_q", + "1_r", + "1_s", + "1_t", + ) + for test_case in infeasible_test_cases: - error_triggered = False try: example_problem_max_arc_limits_infeasible( - solver=solver, - solver_options=solver_options, - different_technologies=True, - irregular_time_intervals=False, - use_sos_arcs=False, - sos_weight_key=None, - use_real_variables_if_possible=False, - use_sos_sense=False, - sense_sos_weight_key=None, - sense_use_real_variables_if_possible=False, - use_arc_interfaces=False, - case=test_case, - print_model=False, - init_aux_sets=init_aux_sets) + solver=solver, + solver_options=solver_options, + different_technologies=True, + irregular_time_intervals=False, + use_sos_arcs=False, + sos_weight_key=None, + use_real_variables_if_possible=False, + use_sos_sense=False, + sense_sos_weight_key=None, + sense_use_real_variables_if_possible=False, + use_arc_interfaces=False, + case=test_case, + print_model=False, + init_aux_sets=init_aux_sets, + ) except ValueError: error_triggered = True assert error_triggered - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def build_solve_ipp( - solver: str = 'glpk', - solver_options: dict = None, - use_sos_arcs: bool = False, - arc_sos_weight_key: str = ( - InfrastructurePlanningProblem.SOS1_ARC_WEIGHTS_NONE), - arc_use_real_variables_if_possible: bool = False, - use_sos_sense: bool = False, - sense_sos_weight_key: int = ( - InfrastructurePlanningProblem.SOS1_SENSE_WEIGHT_NOMINAL_HIGHER - ), - sense_use_real_variables_if_possible: bool = False, - sense_use_arc_interfaces: bool = False, - perform_analysis: bool = False, - plot_results: bool = False, - print_solver_output: bool = False, - irregular_time_intervals: bool = False, - networks: dict = None, - number_intraperiod_time_intervals: int = 4, - static_losses_mode = None, - mandatory_arcs: list = None, - max_number_parallel_arcs: dict = None, - arc_groups_dict: dict = None, - init_aux_sets: bool = False, - discount_rates: dict = None, - reporting_periods: dict = None, - time_intervals: dict = None, - assessment_weights: dict = None): - - reporting_period_duration = 365*24*3600 - + solver: str = "glpk", + solver_options: dict = None, + use_sos_arcs: bool = False, + arc_sos_weight_key: str = (InfrastructurePlanningProblem.SOS1_ARC_WEIGHTS_NONE), + arc_use_real_variables_if_possible: bool = False, + use_sos_sense: bool = False, + sense_sos_weight_key: int = ( + InfrastructurePlanningProblem.SOS1_SENSE_WEIGHT_NOMINAL_HIGHER + ), + sense_use_real_variables_if_possible: bool = False, + sense_use_arc_interfaces: bool = False, + perform_analysis: bool = False, + plot_results: bool = False, + print_solver_output: bool = False, + irregular_time_intervals: bool = False, + networks: dict = None, + number_intraperiod_time_intervals: int = 4, + static_losses_mode=None, + mandatory_arcs: list = None, + max_number_parallel_arcs: dict = None, + arc_groups_dict: dict = None, + init_aux_sets: bool = False, + discount_rates: dict = None, + reporting_periods: dict = None, + time_intervals: dict = None, + assessment_weights: dict = None, +): + reporting_period_duration = 365 * 24 * 3600 + if type(discount_rates) != dict: - - discount_rates = { - 0: tuple([0.035, 0.035]) - } - + discount_rates = {0: tuple([0.035, 0.035])} + if type(assessment_weights) != dict: - - assessment_weights = {} # default - + assessment_weights = {} # default + if type(reporting_periods) != dict: - - reporting_periods = {0: (0,1)} - + reporting_periods = {0: (0, 1)} + # time intervals - + if type(time_intervals) != dict: - if irregular_time_intervals: - time_step_max_relative_variation = 0.25 - + intraperiod_time_interval_duration = [ - (reporting_period_duration/number_intraperiod_time_intervals)* - (1+(k/(number_intraperiod_time_intervals-1)-0.5)* - time_step_max_relative_variation) - for k in range(number_intraperiod_time_intervals)] - + (reporting_period_duration / number_intraperiod_time_intervals) + * ( + 1 + + (k / (number_intraperiod_time_intervals - 1) - 0.5) + * time_step_max_relative_variation + ) + for k in range(number_intraperiod_time_intervals) + ] + else: - intraperiod_time_interval_duration = [ - reporting_period_duration/number_intraperiod_time_intervals - for k in range(number_intraperiod_time_intervals)] - + reporting_period_duration / number_intraperiod_time_intervals + for k in range(number_intraperiod_time_intervals) + ] + # average time interval duration - - average_time_interval_duration = round( - mean( - intraperiod_time_interval_duration - ) - ) - - time_intervals = { - 0: tuple(dt for dt in intraperiod_time_interval_duration) - } - + + average_time_interval_duration = round(mean(intraperiod_time_interval_duration)) + + time_intervals = {0: tuple(dt for dt in intraperiod_time_interval_duration)} + # time weights - + # relative weight of time period - + # one interval twice as long as the average is worth twice # one interval half as long as the average is worth half - + # time_weights = [ - # [time_period_duration/average_time_interval_duration - # for time_period_duration in intraperiod_time_interval_duration] + # [time_period_duration/average_time_interval_duration + # for time_period_duration in intraperiod_time_interval_duration] # for p in range(number_periods)] - - time_weights = None # nothing yet - normalised_time_interval_duration = None # nothing yet - + time_weights = None # nothing yet + + normalised_time_interval_duration = None # nothing yet + # create problem object - + ipp = InfrastructurePlanningProblem( - name='problem', - discount_rates=discount_rates, + name="problem", + discount_rates=discount_rates, reporting_periods=reporting_periods, time_intervals=time_intervals, time_weights=time_weights, normalised_time_interval_duration=normalised_time_interval_duration, - assessment_weights=assessment_weights - ) - + assessment_weights=assessment_weights, + ) + # add networks and systems - + for netkey, net in networks.items(): - ipp.add_network(network_key=netkey, network=net) - + # define arcs as mandatory - + if type(mandatory_arcs) == list: - for full_arc_key in mandatory_arcs: - ipp.make_arc_mandatory(full_arc_key[0], full_arc_key[1:]) - + # if make_all_arcs_mandatory: - + # for network_key in ipp.networks: - + # for arc_key in ipp.networks[network_key].edges(keys=True): - + # # preexisting arcs are no good - + # if ipp.networks[network_key].edges[arc_key][ # Network.KEY_ARC_TECH].has_been_selected(): - - # continue - + + # continue + # ipp.make_arc_mandatory(network_key, arc_key) - + # set up the use of sos for arc selection - + if use_sos_arcs: - for network_key in ipp.networks: - for arc_key in ipp.networks[network_key].edges(keys=True): - - if ipp.networks[network_key].edges[arc_key][ - Network.KEY_ARC_TECH].has_been_selected(): - + if ( + ipp.networks[network_key] + .edges[arc_key][Network.KEY_ARC_TECH] + .has_been_selected() + ): continue - + ipp.use_sos1_for_arc_selection( - network_key, + network_key, arc_key, - use_real_variables_if_possible=( - arc_use_real_variables_if_possible), - sos1_weight_method=arc_sos_weight_key) - - + use_real_variables_if_possible=(arc_use_real_variables_if_possible), + sos1_weight_method=arc_sos_weight_key, + ) + # set up the use of sos for flow sense determination - + if use_sos_sense: - for network_key in ipp.networks: - for arc_key in ipp.networks[network_key].edges(keys=True): - - if not ipp.networks[network_key].edges[arc_key][ - Network.KEY_ARC_UND]: - + if not ipp.networks[network_key].edges[arc_key][Network.KEY_ARC_UND]: continue - + ipp.use_sos1_for_flow_senses( - network_key, + network_key, arc_key, use_real_variables_if_possible=( sense_use_real_variables_if_possible - ), + ), use_interface_variables=sense_use_arc_interfaces, - sos1_weight_method=sense_sos_weight_key) - - elif sense_use_arc_interfaces: # set up the use of arc interfaces w/o sos1 - + sos1_weight_method=sense_sos_weight_key, + ) + + elif sense_use_arc_interfaces: # set up the use of arc interfaces w/o sos1 for network_key in ipp.networks: - for arc_key in ipp.networks[network_key].edges(keys=True): - - if ipp.networks[network_key].edges[arc_key][ - Network.KEY_ARC_TECH].has_been_selected(): - + if ( + ipp.networks[network_key] + .edges[arc_key][Network.KEY_ARC_TECH] + .has_been_selected() + ): continue - - ipp.use_interface_variables_for_arc_selection( - network_key, - arc_key - ) - + + ipp.use_interface_variables_for_arc_selection(network_key, arc_key) + # static losses - + if static_losses_mode == ipp.STATIC_LOSS_MODE_ARR: - ipp.place_static_losses_arrival_node() - + elif static_losses_mode == ipp.STATIC_LOSS_MODE_DEP: - ipp.place_static_losses_departure_node() - + elif static_losses_mode == ipp.STATIC_LOSS_MODE_US: - ipp.place_static_losses_upstream() - + elif static_losses_mode == ipp.STATIC_LOSS_MODE_DS: - ipp.place_static_losses_downstream() - + else: - - raise ValueError('Unknown static loss modelling mode.') - - #************************************************************************** - + raise ValueError("Unknown static loss modelling mode.") + + # ************************************************************************** + # groups - + if type(arc_groups_dict) != type(None): - for key in arc_groups_dict: - ipp.create_arc_group(arc_groups_dict[key]) - - #************************************************************************** - + + # ************************************************************************** + # maximum number of parallel arcs - + for key in max_number_parallel_arcs: - ipp.set_maximum_number_parallel_arcs( - network_key=key[0], - node_a=key[1], - node_b=key[2], - limit=max_number_parallel_arcs[key]) - - #************************************************************************** - + network_key=key[0], + node_a=key[1], + node_b=key[2], + limit=max_number_parallel_arcs[key], + ) + + # ************************************************************************** + # instantiate (disable the default case v-a-v fixed losses) - + # ipp.instantiate(place_fixed_losses_upstream_if_possible=False) - + ipp.instantiate(initialise_ancillary_sets=init_aux_sets) - + # optimise - - ipp.optimise(solver_name=solver, - solver_options=solver_options, - output_options={}, - print_solver_output=print_solver_output) - + + ipp.optimise( + solver_name=solver, + solver_options=solver_options, + output_options={}, + print_solver_output=print_solver_output, + ) + # return the problem object - + return ipp - - #************************************************************************** - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** + + # ************************************************************************** + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def example_single_network_single_arc_problem( - solver, - solver_options, - irregular_time_intervals, - use_sos_arcs, - sos_weight_key, - use_real_variables_if_possible, - use_sos_sense, - sense_sos_weight_key, - sense_use_real_variables_if_possible, - use_arc_interfaces, - print_model, - init_aux_sets): - + solver, + solver_options, + irregular_time_intervals, + use_sos_arcs, + sos_weight_key, + use_real_variables_if_possible, + use_sos_sense, + sense_sos_weight_key, + sense_use_real_variables_if_possible, + use_arc_interfaces, + print_model, + init_aux_sets, +): # scenario - + q = 0 - + # number_periods = 2 - + # # number_intraperiod_time_intervals = 4 - + # discount_rates = tuple([0.035 for p in range(number_periods)]) - + # period_duration = [365*24*3600 for p in range(number_periods)] - + # if irregular_time_intervals: - + # time_step_max_relative_variation = 0.25 - + # intraperiod_time_interval_duration = [ # (planning_horizon/number_intraperiod_time_intervals)* # (1+(k/(number_intraperiod_time_intervals-1)-0.5)* # time_step_max_relative_variation) # for k in range(number_intraperiod_time_intervals)] - + # else: - + # intraperiod_time_interval_duration = [ # planning_horizon/number_intraperiod_time_intervals # for k in range(number_intraperiod_time_intervals)] - + # # create problem object - + # ipp = InfrastructurePlanningProblem( - # name='problem', - # discount_rates=discount_rates, + # name='problem', + # discount_rates=discount_rates, # intraperiod_time_interval_duration=intraperiod_time_interval_duration, # period_duration=planning_horizon) - + # time - + number_intervals = 3 # 2 nodes: one import, one regular - + mynet = Network() - + # import node - + # res_price = ResourcePrice( # prices=[1.0 for i in range(number_intervals)], # volumes=None # ) - + number_periods = 2 - + node_IMP = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=node_IMP, + node_key=node_IMP, prices={ - (q,p,k): ResourcePrice( - prices=1.0, - volumes=None - ) + (q, p, k): ResourcePrice(prices=1.0, volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_A, - # base_flow=[0.5, 0.0, 1.0], - base_flow={ - (q,0):0.50, - (q,1):0.00, - (q,2):1.00} - ) - + node_key=node_A, + # base_flow=[0.5, 0.0, 1.0], + base_flow={(q, 0): 0.50, (q, 1): 0.00, (q, 2): 1.00}, + ) + # arc IA - + arc_tech_IA = Arcs( - name='any', - #efficiency=[0.5, 0.5, 0.5], - efficiency={ - (q,0): 0.5, - (q,1): 0.5, - (q,2): 0.5 - }, + name="any", + # efficiency=[0.5, 0.5, 0.5], + efficiency={(q, 0): 0.5, (q, 1): 0.5, (q, 2): 0.5}, efficiency_reverse=None, static_loss=None, capacity=[3], minimum_cost=[2], - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=False, - validate=False) - - mynet.add_directed_arc( - node_key_a=node_IMP, - node_key_b=node_A, - arcs=arc_tech_IA) - + validate=False, + ) + + mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -1741,186 +1768,171 @@ def example_single_network_single_arc_problem( sense_use_real_variables_if_possible=sense_use_real_variables_if_possible, sense_use_arc_interfaces=use_arc_interfaces, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=irregular_time_intervals, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, - static_losses_mode=True, # just to reach a line, + static_losses_mode=True, # just to reach a line, mandatory_arcs=[], max_number_parallel_arcs={}, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # validation - + # the arc should be installed since it is the only feasible solution - - assert True in ipp.networks['mynet'].edges[(node_IMP, node_A, 0)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # the flows should be 1.0, 0.0 and 2.0 - + assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[ - ('mynet', node_IMP, node_A, 0, q, 0) - ] - ), + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]), 1.0, - abs_tol=1e-6) - + abs_tol=1e-6, + ) + assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[ - ('mynet', node_IMP, node_A, 0, q, 1) - ] - ), + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 1)]), 0.0, - abs_tol=1e-6) - + abs_tol=1e-6, + ) + assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[ - ('mynet', node_IMP, node_A, 0, q, 2) - ] - ), + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 2)]), 2.0, - abs_tol=1e-6) - + abs_tol=1e-6, + ) + # arc amplitude should be two - + assert math.isclose( - pyo.value( - ipp.instance.var_v_amp_gllj[('mynet', node_IMP, node_A, 0)] - ), + pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]), 2.0, - abs_tol=0.01) - + abs_tol=0.01, + ) + # capex should be four - + assert math.isclose(pyo.value(ipp.instance.var_capex), 4.0, abs_tol=1e-3) - + # sdncf should be -5.7 - - assert math.isclose( - pyo.value(ipp.instance.var_sdncf_q[q]), -5.7, abs_tol=1e-3 - ) - + + assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -5.7, abs_tol=1e-3) + # the objective function should be -9.7 - + assert math.isclose(pyo.value(ipp.instance.obj_f), -9.7, abs_tol=1e-3) - - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** + + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def example_single_network_single_arc_problem_two_scenarios( - solver, - solver_options, - irregular_time_intervals, - use_sos_arcs, - sos_weight_key, - use_real_variables_if_possible, - use_sos_sense, - sense_sos_weight_key, - sense_use_real_variables_if_possible, - use_arc_interfaces, - print_model, - init_aux_sets): - + solver, + solver_options, + irregular_time_intervals, + use_sos_arcs, + sos_weight_key, + use_real_variables_if_possible, + use_sos_sense, + sense_sos_weight_key, + sense_use_real_variables_if_possible, + use_arc_interfaces, + print_model, + init_aux_sets, +): # number_intraperiod_time_intervals = 4 - + nominal_discount_rate = 0.035 - + assessment_weights = {0: 0.7, 1: 0.3} - number_reporting_periods = 3 # total - - reporting_periods = {0: (0,1), 1: (0,1,2)} # 2 and 3 + number_reporting_periods = 3 # total + + reporting_periods = {0: (0, 1), 1: (0, 1, 2)} # 2 and 3 number_time_intervals = {0: 3, 1: 2} - + discount_rates = { - q: tuple(nominal_discount_rate - for p in range(number_reporting_periods)) - for q in assessment_weights - } - - time_intervals = {0: (1,1,1), 1: (1,1)} + q: tuple(nominal_discount_rate for p in range(number_reporting_periods)) + for q in assessment_weights + } + + time_intervals = {0: (1, 1, 1), 1: (1, 1)} # 2 nodes: one import, one regular - + mynet = Network() - + # import node - - # res_price = { + + # res_price = { # q: ResourcePrice(prices=[1.0 for i in range(number_time_intervals[q])], # volumes=None) # for q in assessment_weights} - + node_IMP = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=node_IMP, - prices={ - (q,p,k): ResourcePrice(prices=1.0, volumes=None) + node_key=node_IMP, + prices={ + (q, p, k): ResourcePrice(prices=1.0, volumes=None) for q in assessment_weights for p in range(number_reporting_periods) for k in range(number_time_intervals[q]) - } - ) - + }, + ) + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_A, + node_key=node_A, base_flow={ - (0,0):0.50, - (0,1):0.00, - (0,2):1.00, - (1,0):1.25, - (1,1):0.30 - } - ) - + (0, 0): 0.50, + (0, 1): 0.00, + (0, 2): 1.00, + (1, 0): 1.25, + (1, 1): 0.30, + }, + ) + # arc IA - + arc_tech_IA = Arcs( - name='any', - #efficiency=[0.5, 0.5, 0.5], - efficiency={ - (0,0): 0.5, - (0,1): 0.5, - (0,2): 0.5, - (1,0): 0.5, - (1,1): 0.5 - }, + name="any", + # efficiency=[0.5, 0.5, 0.5], + efficiency={(0, 0): 0.5, (0, 1): 0.5, (0, 2): 0.5, (1, 0): 0.5, (1, 1): 0.5}, efficiency_reverse=None, static_loss=None, capacity=[3], minimum_cost=[2], - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=False, - validate=False) - - mynet.add_directed_arc( - node_key_a=node_IMP, - node_key_b=node_A, - arcs=arc_tech_IA) - + validate=False, + ) + + mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -1932,186 +1944,158 @@ def example_single_network_single_arc_problem_two_scenarios( sense_use_real_variables_if_possible=sense_use_real_variables_if_possible, sense_use_arc_interfaces=use_arc_interfaces, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=irregular_time_intervals, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=None, - static_losses_mode=True, # just to reach a line, + static_losses_mode=True, # just to reach a line, mandatory_arcs=[], max_number_parallel_arcs={}, init_aux_sets=init_aux_sets, discount_rates=discount_rates, reporting_periods=reporting_periods, time_intervals=time_intervals, - assessment_weights=assessment_weights - ) - - #************************************************************************** - + assessment_weights=assessment_weights, + ) + + # ************************************************************************** + # validation - + # the arc should be installed since it is the only feasible solution - - assert True in ipp.networks['mynet'].edges[(node_IMP, node_A, 0)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # the flows should be 1.0, 0.0 and 2.0 - + assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[ - ('mynet', node_IMP, node_A, 0, 0, 0) - ] - ), + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, 0, 0)]), 1.0, - abs_tol=1e-6) - + abs_tol=1e-6, + ) + assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[ - ('mynet', node_IMP, node_A, 0, 0, 1) - ] - ), + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, 0, 1)]), 0.0, - abs_tol=1e-6) - + abs_tol=1e-6, + ) + assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[ - ('mynet', node_IMP, node_A, 0, 0, 2) - ] - ), + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, 0, 2)]), 2.0, - abs_tol=1e-6) - + abs_tol=1e-6, + ) + assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[ - ('mynet', node_IMP, node_A, 0, 1, 0) - ] - ), + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, 1, 0)]), 2.5, - abs_tol=1e-6) - + abs_tol=1e-6, + ) + assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[ - ('mynet', node_IMP, node_A, 0, 1, 1) - ] - ), + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, 1, 1)]), 0.6, - abs_tol=1e-6) - - + abs_tol=1e-6, + ) + # arc amplitude should be two - + assert math.isclose( - pyo.value( - ipp.instance.var_v_amp_gllj[('mynet', node_IMP, node_A, 0)] - ), + pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]), 2.5, - abs_tol=0.01) - + abs_tol=0.01, + ) + # capex should be four - + assert math.isclose(pyo.value(ipp.instance.var_capex), 4.5, abs_tol=1e-3) - + # sdncf_q[0] should be -5.7 - - assert math.isclose( - pyo.value(ipp.instance.var_sdncf_q[0]), -5.7, abs_tol=1e-3 - ) - + + assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[0]), -5.7, abs_tol=1e-3) + # the objective function should be -9.7 - + assert math.isclose(pyo.value(ipp.instance.obj_f), -11.096, abs_tol=3e-3) - - -#****************************************************************************** -#****************************************************************************** - -def example_isolated_undirected_network(solver, - solver_options, - irregular_time_intervals, - use_sos_arcs, - sos_weight_key, - use_real_variables_if_possible, - use_sos_sense, - sense_sos_weight_key, - sense_use_real_variables_if_possible, - use_arc_interfaces, - print_model, - init_aux_sets): - + + +# ****************************************************************************** +# ****************************************************************************** + + +def example_isolated_undirected_network( + solver, + solver_options, + irregular_time_intervals, + use_sos_arcs, + sos_weight_key, + use_real_variables_if_possible, + use_sos_sense, + sense_sos_weight_key, + sense_use_real_variables_if_possible, + use_arc_interfaces, + print_model, + init_aux_sets, +): q = 0 - + # time - + number_intervals = 4 # 4 nodes: one import, one export, two supply/demand nodes - + mynet = Network() - + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_A, - #base_flow=[1, -1, 0.5, -0.5], - base_flow={ - (0,0): 1, - (0,1): -1, - (0,2): 0.5, - (0,3): -0.5 - } - ) - + node_key=node_A, + # base_flow=[1, -1, 0.5, -0.5], + base_flow={(0, 0): 1, (0, 1): -1, (0, 2): 0.5, (0, 3): -0.5}, + ) + node_B = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_B, - #base_flow=[-1, 1, -0.5, 0.5], - base_flow={ - (0,0): -1, - (0,1): 1, - (0,2): -0.5, - (0,3): 0.5 - } - ) - + node_key=node_B, + # base_flow=[-1, 1, -0.5, 0.5], + base_flow={(0, 0): -1, (0, 1): 1, (0, 2): -0.5, (0, 3): 0.5}, + ) + # add arcs - + # undirected arc - + arc_tech_AB = ArcsWithoutStaticLosses( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_key_AB_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB) - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB + ) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -2123,45 +2107,51 @@ def example_isolated_undirected_network(solver, sense_use_real_variables_if_possible=sense_use_real_variables_if_possible, sense_use_arc_interfaces=use_arc_interfaces, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=irregular_time_intervals, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, - static_losses_mode=True, # just to reach a line, + static_losses_mode=True, # just to reach a line, mandatory_arcs=[], max_number_parallel_arcs={}, - init_aux_sets=init_aux_sets - ) - - # # - + init_aux_sets=init_aux_sets, + ) + + # # + # if print_model: - + # ipp.instance.pprint() - - #************************************************************************** - + + # ************************************************************************** + # validation - + # the arc should be installed since it is the only feasible solution - - assert True in ipp.networks['mynet'].edges[ - (node_A, node_B, arc_key_AB_und)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, arc_key_AB_und)][Network.KEY_ARC_TECH] + .options_selected + ) + # there should be no opex (imports or exports), only capex from arcs - + assert pyo.value(ipp.instance.var_sdncf_q[q]) == 0 - + assert pyo.value(ipp.instance.var_capex) > 0 - - assert pyo.value( - ipp.instance.var_capex_arc_gllj[ - ('mynet', node_A, node_B, arc_key_AB_und)]) > 0 - + + assert ( + pyo.value( + ipp.instance.var_capex_arc_gllj[("mynet", node_A, node_B, arc_key_AB_und)] + ) + > 0 + ) + # the return amplitude should be the same as the the forward one - + # assert math.isclose( # pyo.value( # ipp.instance.var_v_amp_gllj[('mynet', node_A, node_B, 0)] @@ -2170,94 +2160,81 @@ def example_isolated_undirected_network(solver, # ipp.instance.var_v_amp_gllj[('mynet', node_B, node_A, 0)] # ), # abs_tol=0.01) - - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** + + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def example_isolated_undirected_network_diff_tech( - solver, - solver_options, - irregular_time_intervals, - use_sos_arcs, - sos_weight_key, - use_real_variables_if_possible, - use_sos_sense, - sense_sos_weight_key, - sense_use_real_variables_if_possible, - use_arc_interfaces, - print_model, - init_aux_sets): - + solver, + solver_options, + irregular_time_intervals, + use_sos_arcs, + sos_weight_key, + use_real_variables_if_possible, + use_sos_sense, + sense_sos_weight_key, + sense_use_real_variables_if_possible, + use_arc_interfaces, + print_model, + init_aux_sets, +): q = 0 - + # time - + number_intervals = 4 # 4 nodes: one import, one export, two supply/demand nodes - + mynet = Network() - + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_A, - #base_flow=[1, -1, 0.5, -0.5] - base_flow={ - (0,0):1, - (0,1):-1, - (0,2):0.5, - (0,3):-0.5 - }, - ) - + node_key=node_A, + # base_flow=[1, -1, 0.5, -0.5] + base_flow={(0, 0): 1, (0, 1): -1, (0, 2): 0.5, (0, 3): -0.5}, + ) + node_B = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_B, - #base_flow=[-1.25, 1, -0.625, 0.5] - base_flow={ - (0,0):-1.25, - (0,1):1.0, - (0,2):-0.625, - (0,3):0.5 - }, - ) - + node_key=node_B, + # base_flow=[-1.25, 1, -0.625, 0.5] + base_flow={(0, 0): -1.25, (0, 1): 1.0, (0, 2): -0.625, (0, 3): 0.5}, + ) + # add arcs - + # undirected arc - + arc_tech_AB = ArcsWithoutStaticLosses( - name='any', - #efficiency=[0.8, 1.0, 0.8, 1.0], - efficiency={ - (0,0):0.8, - (0,1):1.0, - (0,2):0.8, - (0,3):1.0 - }, + name="any", + # efficiency=[0.8, 1.0, 0.8, 1.0], + efficiency={(0, 0): 0.8, (0, 1): 1.0, (0, 2): 0.8, (0, 3): 1.0}, efficiency_reverse=None, capacity=[1.25, 2.5], minimum_cost=[10, 15], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_key_AB_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB) - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB + ) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -2269,269 +2246,229 @@ def example_isolated_undirected_network_diff_tech( sense_use_real_variables_if_possible=sense_use_real_variables_if_possible, sense_use_arc_interfaces=use_arc_interfaces, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=irregular_time_intervals, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP, mandatory_arcs=[], max_number_parallel_arcs={}, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # validation - + # the arc should be installed since it is the only feasible solution - - assert True in ipp.networks['mynet'].edges[ - (node_A, node_B, arc_key_AB_und)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, arc_key_AB_und)][Network.KEY_ARC_TECH] + .options_selected + ) + # there should be no opex (imports or exports), only capex from arcs - + assert pyo.value(ipp.instance.var_sdncf_q[q]) == 0 - + assert pyo.value(ipp.instance.var_capex) > 0 - - assert pyo.value( - ipp.instance.var_capex_arc_gllj[ - ('mynet', node_A, node_B, arc_key_AB_und)]) > 0 - - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** - -def example_nonisolated_undirected_network(solver, - solver_options, - different_technologies, - irregular_time_intervals, - use_sos_arcs, - sos_weight_key, - use_real_variables_if_possible, - use_sos_sense, - sense_sos_weight_key, - sense_use_real_variables_if_possible, - use_arc_interfaces, - undirected_arc_imports, - undirected_arc_exports, - print_model, - init_aux_sets): - + + assert ( + pyo.value( + ipp.instance.var_capex_arc_gllj[("mynet", node_A, node_B, arc_key_AB_und)] + ) + > 0 + ) + + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + + +def example_nonisolated_undirected_network( + solver, + solver_options, + different_technologies, + irregular_time_intervals, + use_sos_arcs, + sos_weight_key, + use_real_variables_if_possible, + use_sos_sense, + sense_sos_weight_key, + sense_use_real_variables_if_possible, + use_arc_interfaces, + undirected_arc_imports, + undirected_arc_exports, + print_model, + init_aux_sets, +): q = 0 - + # time - + number_intervals = 4 - + number_periods = 2 # 4 nodes: one import, one export, two supply/demand nodes - + mynet = Network() - + # import node - + # imp_prices = ResourcePrice( - # prices=[1+random.random() for i in range(number_intervals)], + # prices=[1+random.random() for i in range(number_intervals)], # volumes=None # ) - + imp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp_node_key, + node_key=imp_node_key, prices={ - (q,p,k): ResourcePrice( - prices=1+random.random(), - volumes=None - ) + (q, p, k): ResourcePrice(prices=1 + random.random(), volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # export node - + # exp_prices = ResourcePrice( - # prices=[random.random() for i in range(number_intervals)], + # prices=[random.random() for i in range(number_intervals)], # volumes=None) - + exp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_export_node( - node_key=exp_node_key, + node_key=exp_node_key, prices={ - (q,p,k): ResourcePrice( - prices=random.random(), - volumes=None - ) + (q, p, k): ResourcePrice(prices=random.random(), volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_A, - #base_flow=[1, -1, 0.5, -0.5] - base_flow={ - (0,0):1, - (0,1):-1, - (0,2):0.5, - (0,3):-0.5 - }, - ) - + node_key=node_A, + # base_flow=[1, -1, 0.5, -0.5] + base_flow={(0, 0): 1, (0, 1): -1, (0, 2): 0.5, (0, 3): -0.5}, + ) + node_B = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_B, - #base_flow=[-1, 1, -0.5, 0.5] - base_flow={ - (0,0):-1, - (0,1):1, - (0,2):-0.5, - (0,3):0.5 - }, - ) - + node_key=node_B, + # base_flow=[-1, 1, -0.5, 0.5] + base_flow={(0, 0): -1, (0, 1): 1, (0, 2): -0.5, (0, 3): 0.5}, + ) + # add arcs - + # import arc - + arc_tech_IA = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=False, efficiency_reverse=None, static_loss=None, - validate=False) - + validate=False, + ) + if undirected_arc_imports: - mynet.add_undirected_arc( - node_key_a=imp_node_key, - node_key_b=node_A, - arcs=arc_tech_IA) - + node_key_a=imp_node_key, node_key_b=node_A, arcs=arc_tech_IA + ) + else: - mynet.add_directed_arc( - node_key_a=imp_node_key, - node_key_b=node_A, - arcs=arc_tech_IA) - + node_key_a=imp_node_key, node_key_b=node_A, arcs=arc_tech_IA + ) + # export arc - + arc_tech_BE = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=False, efficiency_reverse=None, static_loss=None, - validate=False) - + validate=False, + ) + if undirected_arc_exports: - mynet.add_undirected_arc( - node_key_a=node_B, - node_key_b=exp_node_key, - arcs=arc_tech_BE) - + node_key_a=node_B, node_key_b=exp_node_key, arcs=arc_tech_BE + ) + else: - mynet.add_directed_arc( - node_key_a=node_B, - node_key_b=exp_node_key, - arcs=arc_tech_BE) - + node_key_a=node_B, node_key_b=exp_node_key, arcs=arc_tech_BE + ) + # undirected arc - + if different_technologies: - arc_tech_AB = Arcs( - name='any', - #efficiency=[0.95, 0.95, 0.95, 0.95], - efficiency={ - (0,0):0.95, - (0,1):0.95, - (0,2):0.95, - (0,3):0.95 - }, + name="any", + # efficiency=[0.95, 0.95, 0.95, 0.95], + efficiency={(0, 0): 0.95, (0, 1): 0.95, (0, 2): 0.95, (0, 3): 0.95}, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=False, - #efficiency_reverse=[0.85, 0.85, 0.85, 0.85], - efficiency_reverse={ - (0,0):0.85, - (0,1):0.85, - (0,2):0.85, - (0,3):0.85 - }, + # efficiency_reverse=[0.85, 0.85, 0.85, 0.85], + efficiency_reverse={(0, 0): 0.85, (0, 1): 0.85, (0, 2): 0.85, (0, 3): 0.85}, static_loss=None, - validate=False) - + validate=False, + ) + arc_key_AB_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB) - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB + ) + else: - arc_tech_AB = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10.0, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=False, efficiency_reverse=None, static_loss=None, - validate=False) - + validate=False, + ) + arc_key_AB_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB) - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB + ) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -2543,255 +2480,234 @@ def example_nonisolated_undirected_network(solver, sense_use_real_variables_if_possible=sense_use_real_variables_if_possible, sense_use_arc_interfaces=use_arc_interfaces, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=irregular_time_intervals, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP, mandatory_arcs=[], max_number_parallel_arcs={}, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # validation - + if different_technologies: - # the undirected arc should be installed since it is cheaper tham imp. - - assert True in ipp.networks['mynet'].edges[ - (node_A, node_B, arc_key_AB_und)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, arc_key_AB_und)][Network.KEY_ARC_TECH] + .options_selected + ) + # the directed arc from the import should also be installed since node # B cannot fullfil all the demand since it has an efficiency of 0.85<1 - - assert True in ipp.networks['mynet'].edges[(imp_node_key, node_A, 0)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp_node_key, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # there should be no opex (imports or exports), only capex from arcs - + assert pyo.value(ipp.instance.var_sdncf_q[q]) < 0 - + assert pyo.value(ipp.instance.var_capex) > 0 - - assert pyo.value( - ipp.instance.var_capex_arc_gllj[ - ('mynet', node_A, node_B, arc_key_AB_und)]) > 0 - - assert pyo.value( - ipp.instance.var_capex_arc_gllj[ - ('mynet', imp_node_key, node_A, 0)]) > 0 - - else: # same efficiency (and = 1) - + + assert ( + pyo.value( + ipp.instance.var_capex_arc_gllj[ + ("mynet", node_A, node_B, arc_key_AB_und) + ] + ) + > 0 + ) + + assert ( + pyo.value( + ipp.instance.var_capex_arc_gllj[("mynet", imp_node_key, node_A, 0)] + ) + > 0 + ) + + else: # same efficiency (and = 1) # network is still isolated - + # the import arc was not installed - - assert True not in ipp.networks['mynet'].edges[ - (imp_node_key, node_A, 0)][Network.KEY_ARC_TECH].options_selected - - # the export arc was not installed - - assert True not in ipp.networks['mynet'].edges[ - (node_B, exp_node_key, 0)][Network.KEY_ARC_TECH].options_selected - + + assert ( + True + not in ipp.networks["mynet"] + .edges[(imp_node_key, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + + # the export arc was not installed + + assert ( + True + not in ipp.networks["mynet"] + .edges[(node_B, exp_node_key, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # the undirected arc was installed - - assert True in ipp.networks['mynet'].edges[ - (node_A, node_B, arc_key_AB_und)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, arc_key_AB_und)][Network.KEY_ARC_TECH] + .options_selected + ) + # the opex should be zero - - assert math.isclose( - pyo.value(ipp.instance.var_sdncf_q[q]), 0, abs_tol=1e-6 - ) - + + assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), 0, abs_tol=1e-6) + # the capex should be positive - + assert pyo.value(ipp.instance.var_capex) > 0 - - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** + + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def example_isolated_preexisting_undirected_network( - solver, - solver_options, - different_technologies, - irregular_time_intervals, - use_sos_arcs, - sos_weight_key, - use_real_variables_if_possible, - use_sos_sense, - sense_sos_weight_key, - sense_use_real_variables_if_possible, - use_arc_interfaces, - capacity_is_instantaneous, - use_specific_method, - init_aux_sets): - + solver, + solver_options, + different_technologies, + irregular_time_intervals, + use_sos_arcs, + sos_weight_key, + use_real_variables_if_possible, + use_sos_sense, + sense_sos_weight_key, + sense_use_real_variables_if_possible, + use_arc_interfaces, + capacity_is_instantaneous, + use_specific_method, + init_aux_sets, +): q = 0 - + # time - + number_intervals = 4 # 4 nodes: one import, one export, two supply/demand nodes - + mynet = Network() - + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_A, - #base_flow=[1, -1, 0.5, -0.5] - base_flow={ - (0,0):1, - (0,1):-1, - (0,2):0.5, - (0,3):-0.5 - } - ) - + node_key=node_A, + # base_flow=[1, -1, 0.5, -0.5] + base_flow={(0, 0): 1, (0, 1): -1, (0, 2): 0.5, (0, 3): -0.5}, + ) + node_B = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_B, - #base_flow=[-1, 1, -0.5, 0.5], - base_flow={ - (0,0):-1, - (0,1):1, - (0,2):-0.5, - (0,3):0.5 - } - ) - + node_key=node_B, + # base_flow=[-1, 1, -0.5, 0.5], + base_flow={(0, 0): -1, (0, 1): 1, (0, 2): -0.5, (0, 3): 0.5}, + ) + # add arcs - + if different_technologies: - # anisotropic - + if use_specific_method: - mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[0.9, 1, 0.9, 1], - efficiency={ - (0,0):0.9, - (0,1):1, - (0,2):0.9, - (0,3):1 - }, - capacity=1.0, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[0.9, 1, 0.9, 1], + efficiency={(0, 0): 0.9, (0, 1): 1, (0, 2): 0.9, (0, 3): 1}, + capacity=1.0, capacity_is_instantaneous=capacity_is_instantaneous, - #efficiency_reverse=[1, 0.9, 1, 0.9], - efficiency_reverse={ - (0,0):1, - (0,1):0.9, - (0,2):1, - (0,3):0.9 - }, - static_loss=None - ) - + # efficiency_reverse=[1, 0.9, 1, 0.9], + efficiency_reverse={(0, 0): 1, (0, 1): 0.9, (0, 2): 1, (0, 3): 0.9}, + static_loss=None, + ) + else: - # undirected arc: - + arc_tech_AB = Arcs( - name='any', - #efficiency=[0.9, 1, 0.9, 1], - efficiency={ - (0,0):0.9, - (0,1):1, - (0,2):0.9, - (0,3):1 - }, + name="any", + # efficiency=[0.9, 1, 0.9, 1], + efficiency={(0, 0): 0.9, (0, 1): 1, (0, 2): 0.9, (0, 3): 1}, capacity=[1.0], minimum_cost=[0], - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - #efficiency_reverse=[1, 0.9, 1, 0.9], - efficiency_reverse={ - (0,0):1, - (0,1):0.9, - (0,2):1, - (0,3):0.9 - }, + # efficiency_reverse=[1, 0.9, 1, 0.9], + efficiency_reverse={(0, 0): 1, (0, 1): 0.9, (0, 2): 1, (0, 3): 0.9}, static_loss=None, - validate=False) - + validate=False, + ) + arc_tech_AB.options_selected[0] = True - + mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB) - - else: # isotropic - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB + ) + + else: # isotropic if use_specific_method: - mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, - capacity=1.0, - capacity_is_instantaneous=capacity_is_instantaneous) - + capacity=1.0, + capacity_is_instantaneous=capacity_is_instantaneous, + ) + else: - # undirected arc - + arc_tech_AB = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, efficiency_reverse=None, static_loss=None, - validate=False) - + validate=False, + ) + arc_tech_AB.options_selected[2] = True - + mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB) - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB + ) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -2803,253 +2719,206 @@ def example_isolated_preexisting_undirected_network( sense_use_real_variables_if_possible=sense_use_real_variables_if_possible, sense_use_arc_interfaces=use_arc_interfaces, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=irregular_time_intervals, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP, mandatory_arcs=[], max_number_parallel_arcs={}, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # validation - + if different_technologies: - # there should be no opex (imports or exports) and no capex - + assert pyo.value(ipp.instance.var_sdncf_q[q]) == 0 - + assert pyo.value(ipp.instance.var_capex) == 0 - + else: - # there should be no opex (imports or exports) and no capex - + assert pyo.value(ipp.instance.var_sdncf_q[q]) == 0 - + assert pyo.value(ipp.instance.var_capex) == 0 - - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** + + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def example_nonisolated_network_preexisting_directed_arcs( - solver, - solver_options, - different_technologies, - irregular_time_intervals, - use_sos_arcs, - sos_weight_key, - use_real_variables_if_possible, - use_sos_sense, - sense_sos_weight_key, - sense_use_real_variables_if_possible, - use_arc_interfaces, - init_aux_sets): - + solver, + solver_options, + different_technologies, + irregular_time_intervals, + use_sos_arcs, + sos_weight_key, + use_real_variables_if_possible, + use_sos_sense, + sense_sos_weight_key, + sense_use_real_variables_if_possible, + use_arc_interfaces, + init_aux_sets, +): q = 0 - + # time - + number_intervals = 4 number_periods = 2 # 4 nodes: one import, one export, two supply/demand nodes - + mynet = Network() - + # import node - + # imp_prices = ResourcePrice( - # prices=[1+random.random() for i in range(number_intervals)], + # prices=[1+random.random() for i in range(number_intervals)], # volumes=None) - + imp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp_node_key, + node_key=imp_node_key, prices={ - (q,p,k): ResourcePrice( - prices=1+random.random(), - volumes=None - ) + (q, p, k): ResourcePrice(prices=1 + random.random(), volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # export node - + # exp_prices = ResourcePrice( - # prices=[random.random() for i in range(number_intervals)], + # prices=[random.random() for i in range(number_intervals)], # volumes=None) - + exp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_export_node( - node_key=exp_node_key, + node_key=exp_node_key, prices={ - (q,p,k): ResourcePrice( - prices=random.random(), - volumes=None - ) + (q, p, k): ResourcePrice(prices=random.random(), volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_A, - #base_flow=[1, -1, 0.5, -0.5], - base_flow={ - (0,0):1, - (0,1):-1, - (0,2):0.5, - (0,3):-0.5 - } - ) - + node_key=node_A, + # base_flow=[1, -1, 0.5, -0.5], + base_flow={(0, 0): 1, (0, 1): -1, (0, 2): 0.5, (0, 3): -0.5}, + ) + node_B = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_B, - #base_flow=[-1, 1, -0.5, 0.5], - base_flow={ - (0,0):-1, - (0,1):1, - (0,2):-0.5, - (0,3):0.5 - } - ) - + node_key=node_B, + # base_flow=[-1, 1, -0.5, 0.5], + base_flow={(0, 0): -1, (0, 1): 1, (0, 2): -0.5, (0, 3): 0.5}, + ) + # add arcs - + # import arc - + arc_tech_IA = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_tech_IA.options_selected[0] = True - - mynet.add_directed_arc( - node_key_a=imp_node_key, - node_key_b=node_A, - arcs=arc_tech_IA) - + + mynet.add_directed_arc(node_key_a=imp_node_key, node_key_b=node_A, arcs=arc_tech_IA) + # export arc - + arc_tech_BE = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_tech_BE.options_selected[0] = True - - mynet.add_directed_arc( - node_key_a=node_B, - node_key_b=exp_node_key, - arcs=arc_tech_BE) - + + mynet.add_directed_arc(node_key_a=node_B, node_key_b=exp_node_key, arcs=arc_tech_BE) + # undirected arc - + if different_technologies: - # anisotropic arc - + arc_tech_AB = Arcs( - name='any', - #efficiency=[0.95, 0.95, 0.95, 0.95], - efficiency={ - (0,0):0.95, - (0,1):0.95, - (0,2):0.95, - (0,3):0.95 - }, + name="any", + # efficiency=[0.95, 0.95, 0.95, 0.95], + efficiency={(0, 0): 0.95, (0, 1): 0.95, (0, 2): 0.95, (0, 3): 0.95}, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10, 10.1, 10.2, 10.3, 10.4, 10.5], - #efficiency_reverse=[0.85, 0.85, 0.85, 0.85], - efficiency_reverse={ - (0,0):0.85, - (0,1):0.85, - (0,2):0.85, - (0,3):0.85 - }, + # efficiency_reverse=[0.85, 0.85, 0.85, 0.85], + efficiency_reverse={(0, 0): 0.85, (0, 1): 0.85, (0, 2): 0.85, (0, 3): 0.85}, static_loss=None, validate=False, - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_key_AB_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB) - - else: # isotropic arc - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB + ) + + else: # isotropic arc arc_tech_AB = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10.0, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_key_AB_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB) - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB + ) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -3061,279 +2930,246 @@ def example_nonisolated_network_preexisting_directed_arcs( sense_use_real_variables_if_possible=sense_use_real_variables_if_possible, sense_use_arc_interfaces=use_arc_interfaces, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=irregular_time_intervals, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP, mandatory_arcs=[], max_number_parallel_arcs={}, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # validation - + if different_technologies: - # the undirected arc should be installed since it is cheaper tham imp. - - assert True in ipp.networks['mynet'].edges[(node_A, node_B, arc_key_AB_und)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, arc_key_AB_und)][Network.KEY_ARC_TECH] + .options_selected + ) + # the directed arc from the import should also be installed since node # B cannot fullfil all the demand since it has an efficiency of 0.85<1 - - assert True in ipp.networks['mynet'].edges[(imp_node_key, node_A, 0)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp_node_key, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # there should be no opex (imports or exports), only capex from arcs - + assert pyo.value(ipp.instance.var_sdncf_q[q]) < 0 - + assert pyo.value(ipp.instance.var_capex) > 0 - - assert pyo.value( - ipp.instance.var_capex_arc_gllj[ - ('mynet', node_A, node_B, arc_key_AB_und)]) > 0 - - else: # same efficiency (and = 1) - + + assert ( + pyo.value( + ipp.instance.var_capex_arc_gllj[ + ("mynet", node_A, node_B, arc_key_AB_und) + ] + ) + > 0 + ) + + else: # same efficiency (and = 1) # network is still isolated - + # the undirected arc was installed - - assert True in ipp.networks['mynet'].edges[ - (node_A, node_B, arc_key_AB_und)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, arc_key_AB_und)][Network.KEY_ARC_TECH] + .options_selected + ) + # the opex should be zero - - assert math.isclose( - pyo.value(ipp.instance.var_sdncf_q[q]), 0, abs_tol=1e-3 - ) - + + assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), 0, abs_tol=1e-3) + # the capex should be positive - + assert pyo.value(ipp.instance.var_capex) > 0 - - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** + + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def example_preexisting_infinite_capacity_directed_arcs( - solver, - solver_options, - different_technologies, - irregular_time_intervals, - use_sos_arcs, - sos_weight_key, - use_real_variables_if_possible, - use_sos_sense, - sense_sos_weight_key, - sense_use_real_variables_if_possible, - use_arc_interfaces, - init_aux_sets): - + solver, + solver_options, + different_technologies, + irregular_time_intervals, + use_sos_arcs, + sos_weight_key, + use_real_variables_if_possible, + use_sos_sense, + sense_sos_weight_key, + sense_use_real_variables_if_possible, + use_arc_interfaces, + init_aux_sets, +): q = 0 - + # time - + number_intervals = 4 - + number_periods = 2 # 4 nodes: one import, one export, two supply/demand nodes - + mynet = Network() - + # import node - + # imp_prices = ResourcePrice( - # prices=[1+random.random() for i in range(number_intervals)], + # prices=[1+random.random() for i in range(number_intervals)], # volumes=None) - + imp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp_node_key, + node_key=imp_node_key, prices={ - (q,p,k): ResourcePrice( - prices=1+random.random(), - volumes=None - ) + (q, p, k): ResourcePrice(prices=1 + random.random(), volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # export node - + # exp_prices = ResourcePrice( - # prices=[random.random() for i in range(number_intervals)], + # prices=[random.random() for i in range(number_intervals)], # volumes=None) - + exp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_export_node( - node_key=exp_node_key, + node_key=exp_node_key, prices={ - (q,p,k): ResourcePrice( - prices=random.random(), - volumes=None - ) + (q, p, k): ResourcePrice(prices=random.random(), volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_A, - #base_flow=[1, -1, 0.5, -0.5], - base_flow={ - (0,0):1, - (0,1):-1, - (0,2):0.5, - (0,3):-0.5 - } - ) - + node_key=node_A, + # base_flow=[1, -1, 0.5, -0.5], + base_flow={(0, 0): 1, (0, 1): -1, (0, 2): 0.5, (0, 3): -0.5}, + ) + node_B = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_B, - #base_flow=[-1, 1, -0.5, 0.5], - base_flow={ - (0,0):-1, - (0,1):1, - (0,2):-0.5, - (0,3):0.5 - } - ) - + node_key=node_B, + # base_flow=[-1, 1, -0.5, 0.5], + base_flow={(0, 0): -1, (0, 1): 1, (0, 2): -0.5, (0, 3): 0.5}, + ) + # add arcs - + # import arc - + arc_tech_IA = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[math.inf, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_tech_IA.options_selected[0] = True - - mynet.add_directed_arc( - node_key_a=imp_node_key, - node_key_b=node_A, - arcs=arc_tech_IA) - + + mynet.add_directed_arc(node_key_a=imp_node_key, node_key_b=node_A, arcs=arc_tech_IA) + # export arc - + arc_tech_BE = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[math.inf, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_tech_BE.options_selected[0] = True - - mynet.add_directed_arc( - node_key_a=node_B, - node_key_b=exp_node_key, - arcs=arc_tech_BE) - + + mynet.add_directed_arc(node_key_a=node_B, node_key_b=exp_node_key, arcs=arc_tech_BE) + # undirected arc - + if different_technologies: - arc_tech_AB = Arcs( - name='any', - #efficiency=[0.95, 0.95, 0.95, 0.95], - efficiency={ - (0,0):0.95, - (0,1):0.95, - (0,2):0.95, - (0,3):0.95 - }, - #efficiency_reverse=[0.85, 0.85, 0.85, 0.85], - efficiency_reverse={ - (0,0):0.85, - (0,1):0.85, - (0,2):0.85, - (0,3):0.85 - }, + name="any", + # efficiency=[0.95, 0.95, 0.95, 0.95], + efficiency={(0, 0): 0.95, (0, 1): 0.95, (0, 2): 0.95, (0, 3): 0.95}, + # efficiency_reverse=[0.85, 0.85, 0.85, 0.85], + efficiency_reverse={(0, 0): 0.85, (0, 1): 0.85, (0, 2): 0.85, (0, 3): 0.85}, static_loss=None, validate=False, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_key_AB_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB) - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB + ) + else: - arc_tech_AB = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10.0, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_key_AB_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB) - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB + ) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -3345,375 +3181,329 @@ def example_preexisting_infinite_capacity_directed_arcs( sense_use_real_variables_if_possible=sense_use_real_variables_if_possible, sense_use_arc_interfaces=use_arc_interfaces, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=irregular_time_intervals, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP, mandatory_arcs=[], max_number_parallel_arcs={}, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # validation - + if different_technologies: - # the undirected arc should be installed since it is cheaper tham imp. - - assert True in ipp.networks['mynet'].edges[ - (node_A, node_B, arc_key_AB_und)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, arc_key_AB_und)][Network.KEY_ARC_TECH] + .options_selected + ) + # the directed arc from the import should also be installed since node # B cannot fullfil all the demand since it has an efficiency of 0.85<1 - - assert True in ipp.networks['mynet'].edges[(imp_node_key, node_A, 0)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp_node_key, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # there should be no opex (imports or exports), only capex from arcs - + assert pyo.value(ipp.instance.var_sdncf_q[q]) < 0 - + assert pyo.value(ipp.instance.var_capex) > 0 - - assert pyo.value( - ipp.instance.var_capex_arc_gllj[ - ('mynet', node_A, node_B, arc_key_AB_und)]) > 0 - - else: # same efficiency (and = 1) - + + assert ( + pyo.value( + ipp.instance.var_capex_arc_gllj[ + ("mynet", node_A, node_B, arc_key_AB_und) + ] + ) + > 0 + ) + + else: # same efficiency (and = 1) # network is still isolated - + # the undirected arc was installed - - assert True in ipp.networks['mynet'].edges[ - (node_A, node_B, arc_key_AB_und)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, arc_key_AB_und)][Network.KEY_ARC_TECH] + .options_selected + ) + # the opex should be zero - - assert math.isclose( - pyo.value(ipp.instance.var_sdncf_q[q]), - 0, - abs_tol=0.001) - + + assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), 0, abs_tol=0.001) + # the capex should be positive - + assert pyo.value(ipp.instance.var_capex) > 0 - - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** + + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def example_directed_network_static_losses( - solver, - solver_options, - irregular_time_intervals, - use_sos_arcs, - sos_weight_key, - use_real_variables_if_possible, - use_sos_sense, - sense_sos_weight_key, - sense_use_real_variables_if_possible, - use_arc_interfaces, - make_all_arcs_mandatory, - use_arc_techs_with_fixed_losses, - use_arc_techs_without_fixed_losses, - static_losses_mode, - print_model, - init_aux_sets): - + solver, + solver_options, + irregular_time_intervals, + use_sos_arcs, + sos_weight_key, + use_real_variables_if_possible, + use_sos_sense, + sense_sos_weight_key, + sense_use_real_variables_if_possible, + use_arc_interfaces, + make_all_arcs_mandatory, + use_arc_techs_with_fixed_losses, + use_arc_techs_without_fixed_losses, + static_losses_mode, + print_model, + init_aux_sets, +): # if (not use_arc_techs_with_fixed_losses and # not use_arc_techs_without_fixed_losses): - + # return - + # case 1: # if two arc technologies for a given arc are available, and one is with # and the other is without fixed losses, ceteris paribus, the one without # fixed losses will be selected, since it is less onerous # how to check? via the arcs installed - + # case 2: # if only technologies with fixed losses are available, then the demand # will be higher and the supply lower than if there were only technologies # without fixed losses, since the losses must offset the results # how to check? via the imports and exports for both situations - + # case 3: # placing arcs with fixed losses downstream or upstream should have no # impact on the imports and exports - + q = 0 - + # time - - number_intervals = 4 - + + number_intervals = 4 + number_periods = 2 - + # 4 nodes: one import, one export, two supply/demand nodes - + mynet = Network() - + # import node - - imp_prices = [ - ResourcePrice( - prices=1+random.random(), - volumes=None - ) + + imp_prices = [ + ResourcePrice(prices=1 + random.random(), volumes=None) for i in range(number_intervals) - ] - + ] + imp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp_node_key, + node_key=imp_node_key, prices={ - (q,p,k): imp_prices[k] + (q, p, k): imp_prices[k] for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # export node - - exp_prices = [ - ResourcePrice( - prices=random.random(), - volumes=None - ) + + exp_prices = [ + ResourcePrice(prices=random.random(), volumes=None) for i in range(number_intervals) - ] - + ] + exp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_export_node( - node_key=exp_node_key, + node_key=exp_node_key, prices={ - (q,p,k): exp_prices[k] + (q, p, k): exp_prices[k] for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_A, - #base_flow=[0.5, 0.6, 0.7, 0.8], - base_flow={ - (0,0):0.5, - (0,1):0.6, - (0,2):0.7, - (0,3):0.8 - } - ) - + node_key=node_A, + # base_flow=[0.5, 0.6, 0.7, 0.8], + base_flow={(0, 0): 0.5, (0, 1): 0.6, (0, 2): 0.7, (0, 3): 0.8}, + ) + node_B = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_B, - #base_flow=[0.8, -0.7, -0.6, 0.5], - base_flow={ - (0,0):0.8, - (0,1):-0.7, - (0,2):-0.6, - (0,3):0.5 - } - ) - + node_key=node_B, + # base_flow=[0.8, -0.7, -0.6, 0.5], + base_flow={(0, 0): 0.8, (0, 1): -0.7, (0, 2): -0.6, (0, 3): 0.5}, + ) + # add arcs - + # import arc - + if use_arc_techs_without_fixed_losses: - arc_tech_IA = Arcs( - name='IA', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):0.9, # (0,2):1, - (0,3):1 - }, + name="IA", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 0.9, (0, 3): 1}, # (0,2):1, efficiency_reverse=None, static_loss=None, validate=False, - capacity=[0.5, 1.0, 2.0], - minimum_cost=[10, 10.1, 10.2], - specific_capacity_cost=1, - capacity_is_instantaneous=False - ) - + capacity=[0.5, 1.0, 2.0], + minimum_cost=[10, 10.1, 10.2], + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + mynet.add_directed_arc( - node_key_a=imp_node_key, - node_key_b=node_A, - arcs=arc_tech_IA) - + node_key_a=imp_node_key, node_key_b=node_A, arcs=arc_tech_IA + ) + if use_arc_techs_with_fixed_losses: - arc_tech_IA_fix = Arcs( - name='IA_fix', - #efficiency=[1, 1, 1, 1], - efficiency={ - (q,0):1, - (q,1):1, - (q,2):0.9, # (0,2):1, - (q,3):1 - }, + name="IA_fix", + # efficiency=[1, 1, 1, 1], + efficiency={(q, 0): 1, (q, 1): 1, (q, 2): 0.9, (q, 3): 1}, # (0,2):1, efficiency_reverse=None, validate=False, - capacity=[0.5, 1.0, 2.0], - minimum_cost=[10, 10.1, 10.2], - specific_capacity_cost=1, - capacity_is_instantaneous=False, + capacity=[0.5, 1.0, 2.0], + minimum_cost=[10, 10.1, 10.2], + specific_capacity_cost=1, + capacity_is_instantaneous=False, # static_losses=[ # [0.10, 0.15, 0.20, 0.25], # [0.15, 0.20, 0.25, 0.30], # [0.20, 0.25, 0.30, 0.35]] static_loss={ - (0,q,0):0.10, - (0,q,1):0.15, - (0,q,2):0.20, - (0,q,3):0.25, - (1,q,0):0.15, - (1,q,1):0.20, - (1,q,2):0.25, - (1,q,3):0.30, - (2,q,0):0.20, - (2,q,1):0.25, - (2,q,2):0.30, - (2,q,3):0.35 - }, - ) - + (0, q, 0): 0.10, + (0, q, 1): 0.15, + (0, q, 2): 0.20, + (0, q, 3): 0.25, + (1, q, 0): 0.15, + (1, q, 1): 0.20, + (1, q, 2): 0.25, + (1, q, 3): 0.30, + (2, q, 0): 0.20, + (2, q, 1): 0.25, + (2, q, 2): 0.30, + (2, q, 3): 0.35, + }, + ) + mynet.add_directed_arc( - node_key_a=imp_node_key, - node_key_b=node_A, - arcs=arc_tech_IA_fix) - + node_key_a=imp_node_key, node_key_b=node_A, arcs=arc_tech_IA_fix + ) + # export arc - + if use_arc_techs_without_fixed_losses: - arc_tech_BE = Arcs( - name='BE', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):0.9, # (0,2):1, - (0,3):1 - }, + name="BE", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 0.9, (0, 3): 1}, # (0,2):1, validate=False, efficiency_reverse=None, static_loss=None, - capacity=[0.5, 1.0, 2.0], - minimum_cost=[10, 10.1, 10.2], - specific_capacity_cost=1, - capacity_is_instantaneous=False - ) - + capacity=[0.5, 1.0, 2.0], + minimum_cost=[10, 10.1, 10.2], + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + mynet.add_directed_arc( - node_key_a=node_B, - node_key_b=exp_node_key, - arcs=arc_tech_BE) - + node_key_a=node_B, node_key_b=exp_node_key, arcs=arc_tech_BE + ) + if use_arc_techs_with_fixed_losses: - arc_tech_BE_fix = Arcs( - name='BE_fix', - #efficiency=[1, 1, 1, 1], - efficiency={ - (q,0):1, - (q,1):1, - (q,2):0.9, # (0,2):1, - (q,3):1 - }, + name="BE_fix", + # efficiency=[1, 1, 1, 1], + efficiency={(q, 0): 1, (q, 1): 1, (q, 2): 0.9, (q, 3): 1}, # (0,2):1, validate=False, efficiency_reverse=None, - capacity=[0.5, 1.0, 2.0], - minimum_cost=[10, 10.1, 10.2], - specific_capacity_cost=1, - capacity_is_instantaneous=False, + capacity=[0.5, 1.0, 2.0], + minimum_cost=[10, 10.1, 10.2], + specific_capacity_cost=1, + capacity_is_instantaneous=False, # static_losses=[ # [0.10, 0.15, 0.20, 0.25], # [0.15, 0.20, 0.25, 0.30], # [0.20, 0.25, 0.30, 0.35]] static_loss={ - (0,q,0):0.10, - (0,q,1):0.15, - (0,q,2):0.20, - (0,q,3):0.25, - (1,q,0):0.15, - (1,q,1):0.20, - (1,q,2):0.25, - (1,q,3):0.30, - (2,q,0):0.20, - (2,q,1):0.25, - (2,q,2):0.30, - (2,q,3):0.35 - }, - ) - + (0, q, 0): 0.10, + (0, q, 1): 0.15, + (0, q, 2): 0.20, + (0, q, 3): 0.25, + (1, q, 0): 0.15, + (1, q, 1): 0.20, + (1, q, 2): 0.25, + (1, q, 3): 0.30, + (2, q, 0): 0.20, + (2, q, 1): 0.25, + (2, q, 2): 0.30, + (2, q, 3): 0.35, + }, + ) + mynet.add_directed_arc( - node_key_a=node_B, - node_key_b=exp_node_key, - arcs=arc_tech_BE_fix) - + node_key_a=node_B, node_key_b=exp_node_key, arcs=arc_tech_BE_fix + ) + # directed arc between A and B - + if use_arc_techs_without_fixed_losses: - arc_tech_AB = Arcs( - name='AB_BA', - # efficiency=[1, 0.7, 1, 1], - efficiency={ - (0,0):1, - (0,1):0.7, - (0,2):1, - (0,3):1 - }, + name="AB_BA", + # efficiency=[1, 0.7, 1, 1], + efficiency={(0, 0): 1, (0, 1): 0.7, (0, 2): 1, (0, 3): 1}, validate=False, efficiency_reverse=None, static_loss=None, - capacity=[1.5, 2.0, 2.5, 3.0], + capacity=[1.5, 2.0, 2.5, 3.0], minimum_cost=[10.0, 10.1, 10.2, 10.3], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB) + if use_arc_techs_with_fixed_losses: - arc_tech_AB_fix = Arcs( - name='AB_BA_fix', - # efficiency=[1, 0.7, 1, 1], - efficiency={ - (q,0):1, - (q,1):0.7, - (q,2):1, - (q,3):1 - }, + name="AB_BA_fix", + # efficiency=[1, 0.7, 1, 1], + efficiency={(q, 0): 1, (q, 1): 0.7, (q, 2): 1, (q, 3): 1}, validate=False, efficiency_reverse=None, - capacity=[1.5, 2.0, 2.5, 3.0], - minimum_cost=[10, 10.1, 10.2, 10.3], - specific_capacity_cost=1, - capacity_is_instantaneous=False, + capacity=[1.5, 2.0, 2.5, 3.0], + minimum_cost=[10, 10.1, 10.2, 10.3], + specific_capacity_cost=1, + capacity_is_instantaneous=False, # static_losses=[ # [0.01, 0.02, 0.03, 0.04], # [0.02, 0.03, 0.04, 0.05], @@ -3721,37 +3511,35 @@ def example_directed_network_static_losses( # [0.04, 0.05, 0.06, 0.07] # ] static_loss={ - (0,q,0):0.01, - (0,q,1):0.02, - (0,q,2):0.03, - (0,q,3):0.04, - (1,q,0):0.02, - (1,q,1):0.03, - (1,q,2):0.04, - (1,q,3):0.05, - (2,q,0):0.03, - (2,q,1):0.04, - (2,q,2):0.05, - (2,q,3):0.06, - (3,q,0):0.04, - (3,q,1):0.05, - (3,q,2):0.06, - (3,q,3):0.07 - }, - - ) + (0, q, 0): 0.01, + (0, q, 1): 0.02, + (0, q, 2): 0.03, + (0, q, 3): 0.04, + (1, q, 0): 0.02, + (1, q, 1): 0.03, + (1, q, 2): 0.04, + (1, q, 3): 0.05, + (2, q, 0): 0.03, + (2, q, 1): 0.04, + (2, q, 2): 0.05, + (2, q, 3): 0.06, + (3, q, 0): 0.04, + (3, q, 1): 0.05, + (3, q, 2): 0.06, + (3, q, 3): 0.07, + }, + ) mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB_fix) - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB_fix + ) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -3763,380 +3551,422 @@ def example_directed_network_static_losses( sense_use_real_variables_if_possible=sense_use_real_variables_if_possible, sense_use_arc_interfaces=use_arc_interfaces, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=irregular_time_intervals, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=static_losses_mode, mandatory_arcs=[], max_number_parallel_arcs={}, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # if print_model: - + # ipp.instance.pprint() - + # validation - + # only arc techs with fixed losses - - if (use_arc_techs_with_fixed_losses and - not use_arc_techs_without_fixed_losses): - + + if use_arc_techs_with_fixed_losses and not use_arc_techs_without_fixed_losses: # all arcs should be installed - - assert True in ipp.networks['mynet'].edges[ - (imp_node_key, node_A, 0)][Network.KEY_ARC_TECH].options_selected - - assert True in ipp.networks['mynet'].edges[ - (node_B, exp_node_key, 0)][Network.KEY_ARC_TECH].options_selected - - assert True in ipp.networks['mynet'].edges[ - (node_A, node_B, 0)][Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp_node_key, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_B, exp_node_key, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # overview - - (flow_in, - flow_in_k, - flow_out, - flow_in_cost, - flow_out_revenue) = compute_cost_volume_metrics(ipp.instance, True) - + + ( + flow_in, + flow_in_k, + flow_out, + flow_in_cost, + flow_out_revenue, + ) = compute_cost_volume_metrics(ipp.instance, True) + # there should be imports - + abs_tol = 1e-6 # print('hey') # print(flow_in[('mynet',0,0)]) # print(flow_in) # ipp.instance.pprint() assert math.isclose( - flow_in[('mynet',0,0)], - 5.631111111111111, - abs_tol=abs_tol - ) + flow_in[("mynet", 0, 0)], 5.631111111111111, abs_tol=abs_tol + ) # assert math.isclose(flow_in[('mynet',0,0)], 5.55, abs_tol=abs_tol) - + # there should be exports - + abs_tol = 1e-2 - - assert math.isclose(flow_out[('mynet',0,0)], 0.815, abs_tol=abs_tol) - #assert math.isclose(flow_out[('mynet',0,0)], 0.85, abs_tol=abs_tol) - + + assert math.isclose(flow_out[("mynet", 0, 0)], 0.815, abs_tol=abs_tol) + # assert math.isclose(flow_out[('mynet',0,0)], 0.85, abs_tol=abs_tol) + # the opex should be negative (costs outweigh the revenue) - + abs_tol = 1e-6 - - assert flow_in_cost[('mynet',0,0)] > flow_out_revenue[('mynet',0,0)] - abs_tol - + + assert ( + flow_in_cost[("mynet", 0, 0)] > flow_out_revenue[("mynet", 0, 0)] - abs_tol + ) + # there should be capex - + abs_tol = 1e-6 - + assert pyo.value(ipp.instance.var_capex) > 0 - abs_tol - + # only arc techs without fixed losses - - if (not use_arc_techs_with_fixed_losses and - use_arc_techs_without_fixed_losses): - + + if not use_arc_techs_with_fixed_losses and use_arc_techs_without_fixed_losses: # all arcs should be installed - - assert True in ipp.networks['mynet'].edges[ - (imp_node_key, node_A, 0)][Network.KEY_ARC_TECH].options_selected - - assert True in ipp.networks['mynet'].edges[ - (node_B, exp_node_key, 0)][Network.KEY_ARC_TECH].options_selected - - assert True in ipp.networks['mynet'].edges[ - (node_A, node_B, 0)][Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp_node_key, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_B, exp_node_key, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # overview - - (flow_in, - flow_in_k, - flow_out, - flow_in_cost, - flow_out_revenue) = compute_cost_volume_metrics(ipp.instance, True) - + + ( + flow_in, + flow_in_k, + flow_out, + flow_in_cost, + flow_out_revenue, + ) = compute_cost_volume_metrics(ipp.instance, True) + # there should be imports (lower than with fixed losses, cet. parib.) - + abs_tol = 1e-6 - - assert math.isclose(flow_in[('mynet',0,0)], - 3.977777777777778, - abs_tol=abs_tol) - # assert math.isclose(flow_in[('mynet',0,0)], - # 3.8999999999999995, + + assert math.isclose( + flow_in[("mynet", 0, 0)], 3.977777777777778, abs_tol=abs_tol + ) + # assert math.isclose(flow_in[('mynet',0,0)], + # 3.8999999999999995, # abs_tol=abs_tol) - + # there should be exports (higher than with fixed losses, cet. parib.) - + abs_tol = 1e-2 - - assert math.isclose(flow_out[('mynet',0,0)], - 1.2400000000000002, - abs_tol=abs_tol) - # assert math.isclose(flow_out[('mynet',0,0)], - # 1.2999999999999998, + + assert math.isclose( + flow_out[("mynet", 0, 0)], 1.2400000000000002, abs_tol=abs_tol + ) + # assert math.isclose(flow_out[('mynet',0,0)], + # 1.2999999999999998, # abs_tol=abs_tol) - + # the opex should be negative (costs outweigh the revenue) - + abs_tol = 1e-6 - - assert flow_in_cost[('mynet',0,0)] > flow_out_revenue[('mynet',0,0)] - abs_tol - + + assert ( + flow_in_cost[("mynet", 0, 0)] > flow_out_revenue[("mynet", 0, 0)] - abs_tol + ) + # there should be capex - + abs_tol = 1e-6 - + assert pyo.value(ipp.instance.var_capex) > 0 - abs_tol - + # arc techs with and without fixed losses - + # (verifies that arcs without losses take precedence, due to fewer costs) - - if (use_arc_techs_with_fixed_losses and - use_arc_techs_without_fixed_losses): - + + if use_arc_techs_with_fixed_losses and use_arc_techs_without_fixed_losses: # the arcs without losses should be installed (those get index 0) - - assert True in ipp.networks['mynet'].edges[ - (imp_node_key, node_A, 0)][Network.KEY_ARC_TECH].options_selected - - assert True in ipp.networks['mynet'].edges[ - (node_B, exp_node_key, 0)][Network.KEY_ARC_TECH].options_selected - - assert True in ipp.networks['mynet'].edges[ - (node_A, node_B, 0)][Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp_node_key, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_B, exp_node_key, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # the arcs with losses should not be installed - - assert True not in ipp.networks['mynet'].edges[ - (imp_node_key, node_A, 1)][Network.KEY_ARC_TECH].options_selected - - assert True not in ipp.networks['mynet'].edges[ - (node_B, exp_node_key, 1)][Network.KEY_ARC_TECH].options_selected - - assert True not in ipp.networks['mynet'].edges[ - (node_A, node_B, 1)][Network.KEY_ARC_TECH].options_selected - + + assert ( + True + not in ipp.networks["mynet"] + .edges[(imp_node_key, node_A, 1)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + not in ipp.networks["mynet"] + .edges[(node_B, exp_node_key, 1)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + not in ipp.networks["mynet"] + .edges[(node_A, node_B, 1)][Network.KEY_ARC_TECH] + .options_selected + ) + # overview - - (flow_in, - flow_in_k, - flow_out, - flow_in_cost, - flow_out_revenue) = compute_cost_volume_metrics(ipp.instance, True) - + + ( + flow_in, + flow_in_k, + flow_out, + flow_in_cost, + flow_out_revenue, + ) = compute_cost_volume_metrics(ipp.instance, True) + # there should be imports - + abs_tol = 1e-6 - - assert math.isclose(flow_in[('mynet',0,0)], - 3.977777777777778, - abs_tol=abs_tol) - # assert math.isclose(flow_in[('mynet',0,0)], - # 3.8999999999999995, + + assert math.isclose( + flow_in[("mynet", 0, 0)], 3.977777777777778, abs_tol=abs_tol + ) + # assert math.isclose(flow_in[('mynet',0,0)], + # 3.8999999999999995, # abs_tol=abs_tol) - + # there should be exports - + abs_tol = 1e-2 - - assert math.isclose(flow_out[('mynet',0,0)], - 1.2400000000000002, - abs_tol=abs_tol) - # assert math.isclose(flow_out[('mynet',0,0)], - # 1.2999999999999998, + + assert math.isclose( + flow_out[("mynet", 0, 0)], 1.2400000000000002, abs_tol=abs_tol + ) + # assert math.isclose(flow_out[('mynet',0,0)], + # 1.2999999999999998, # abs_tol=abs_tol) - + # the opex should be negative (costs outweigh the revenue) - + abs_tol = 1e-6 - - assert flow_in_cost[('mynet',0,0)] > flow_out_revenue[('mynet',0,0)] - abs_tol - + + assert ( + flow_in_cost[("mynet", 0, 0)] > flow_out_revenue[("mynet", 0, 0)] - abs_tol + ) + # there should be capex - + abs_tol = 1e-6 - + assert pyo.value(ipp.instance.var_capex) > 0 - abs_tol - -#****************************************************************************** -#****************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def example_direct_imp_exp_network( - solver, - solver_options, - irregular_time_intervals, - use_sos_arcs, - sos_weight_key, - use_real_variables_if_possible, - use_sos_sense, - sense_sos_weight_key, - sense_use_real_variables_if_possible, - use_arc_interfaces, - make_all_arcs_mandatory, - use_static_losses, - use_higher_export_prices, - print_model, - init_aux_sets): - + solver, + solver_options, + irregular_time_intervals, + use_sos_arcs, + sos_weight_key, + use_real_variables_if_possible, + use_sos_sense, + sense_sos_weight_key, + sense_use_real_variables_if_possible, + use_arc_interfaces, + make_all_arcs_mandatory, + use_static_losses, + use_higher_export_prices, + print_model, + init_aux_sets, +): q = 0 - + # time - + number_intervals = 4 - + number_periods = 2 # 4 nodes: one import, one export, two supply/demand nodes - + mynet = Network() - + # prices - + # if use_higher_export_prices: - + # imp_prices = ResourcePrice( - # prices=[0+random.random() for i in range(number_intervals)], + # prices=[0+random.random() for i in range(number_intervals)], # volumes=None) - + # exp_prices = ResourcePrice( - # prices=[1.5+random.random() for i in range(number_intervals)], + # prices=[1.5+random.random() for i in range(number_intervals)], # volumes=None) - + # else: - + # imp_prices = ResourcePrice( - # prices=[1.5+random.random() for i in range(number_intervals)], + # prices=[1.5+random.random() for i in range(number_intervals)], # volumes=None) - + # exp_prices = ResourcePrice( - # prices=[random.random() for i in range(number_intervals)], + # prices=[random.random() for i in range(number_intervals)], # volumes=None) - + # import node - + imp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp_node_key, + node_key=imp_node_key, prices={ - (q,p,k): ResourcePrice( + (q, p, k): ResourcePrice( prices=( - 0+random.random() - if use_higher_export_prices else 1.5+random.random() - ), - volumes=None - ) + 0 + random.random() + if use_higher_export_prices + else 1.5 + random.random() + ), + volumes=None, + ) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # export node - + exp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_export_node( - node_key=exp_node_key, + node_key=exp_node_key, prices={ - (q,p,k): ResourcePrice( + (q, p, k): ResourcePrice( prices=( - 1.5+random.random() - if use_higher_export_prices else 0+random.random() - ), - volumes=None - ) + 1.5 + random.random() + if use_higher_export_prices + else 0 + random.random() + ), + volumes=None, + ) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + if use_static_losses: - # add arc with fixed losses from import node to export - + arc_tech_IE_fix = Arcs( - name='IE_fix', - #efficiency=[1, 1, 1, 1], - efficiency={ - (q,0):1, - (q,1):1, - (q,2):1, - (q,3):1 - }, + name="IE_fix", + # efficiency=[1, 1, 1, 1], + efficiency={(q, 0): 1, (q, 1): 1, (q, 2): 1, (q, 3): 1}, efficiency_reverse=None, validate=False, - capacity=[0.5, 1.0, 2.0], - minimum_cost=[5, 5.1, 5.2], - specific_capacity_cost=1, - capacity_is_instantaneous=False, + capacity=[0.5, 1.0, 2.0], + minimum_cost=[5, 5.1, 5.2], + specific_capacity_cost=1, + capacity_is_instantaneous=False, # static_losses=[ # [0.10, 0.15, 0.20, 0.25], # [0.15, 0.20, 0.25, 0.30], # [0.20, 0.25, 0.30, 0.35]] static_loss={ - (0,q,0):0.10, - (0,q,1):0.15, - (0,q,2):0.20, - (0,q,3):0.25, - (1,q,0):0.15, - (1,q,1):0.20, - (1,q,2):0.25, - (1,q,3):0.30, - (2,q,0):0.20, - (2,q,1):0.25, - (2,q,2):0.30, - (2,q,3):0.35 - }, - ) - + (0, q, 0): 0.10, + (0, q, 1): 0.15, + (0, q, 2): 0.20, + (0, q, 3): 0.25, + (1, q, 0): 0.15, + (1, q, 1): 0.20, + (1, q, 2): 0.25, + (1, q, 3): 0.30, + (2, q, 0): 0.20, + (2, q, 1): 0.25, + (2, q, 2): 0.30, + (2, q, 3): 0.35, + }, + ) + mynet.add_directed_arc( - node_key_a=imp_node_key, - node_key_b=exp_node_key, - arcs=arc_tech_IE_fix) - + node_key_a=imp_node_key, node_key_b=exp_node_key, arcs=arc_tech_IE_fix + ) + else: - # add arc without fixed losses from import node to export - + arc_tech_IE = Arcs( - name='IE', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="IE", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, - capacity=[0.5, 1.0, 2.0], - minimum_cost=[5, 5.1, 5.2], - specific_capacity_cost=1, - capacity_is_instantaneous=False - ) - + capacity=[0.5, 1.0, 2.0], + minimum_cost=[5, 5.1, 5.2], + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + mynet.add_directed_arc( - node_key_a=imp_node_key, - node_key_b=exp_node_key, - arcs=arc_tech_IE) + node_key_a=imp_node_key, node_key_b=exp_node_key, arcs=arc_tech_IE + ) # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -4148,390 +3978,326 @@ def example_direct_imp_exp_network( sense_use_real_variables_if_possible=sense_use_real_variables_if_possible, sense_use_arc_interfaces=use_arc_interfaces, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=irregular_time_intervals, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP, mandatory_arcs=[], max_number_parallel_arcs={}, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + if use_higher_export_prices: - # export prices are higher: it makes sense to install the arc since the # revenue (@ max. cap.) exceeds the cost of installing the arc - - assert True in ipp.networks['mynet'].edges[ - (imp_node_key, exp_node_key, 0)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp_node_key, exp_node_key, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # overview - - (flow_in, - flow_in_k, - flow_out, - flow_in_cost, - flow_out_revenue) = compute_cost_volume_metrics(ipp.instance, True) - + + ( + flow_in, + flow_in_k, + flow_out, + flow_in_cost, + flow_out_revenue, + ) = compute_cost_volume_metrics(ipp.instance, True) + # there should be no imports - + abs_tol = 1e-6 - - assert flow_in[('mynet',0,0)] > 0.0 - abs_tol - - assert flow_in_cost[('mynet',0,0)] > 0.0 - abs_tol - + + assert flow_in[("mynet", 0, 0)] > 0.0 - abs_tol + + assert flow_in_cost[("mynet", 0, 0)] > 0.0 - abs_tol + # there should be no exports - + abs_tol = 1e-2 - - assert flow_out[('mynet',0,0)] > 0.0 - abs_tol - - assert flow_out_revenue[('mynet',0,0)] > 0.0 - abs_tol - + + assert flow_out[("mynet", 0, 0)] > 0.0 - abs_tol + + assert flow_out_revenue[("mynet", 0, 0)] > 0.0 - abs_tol + # the revenue should exceed the costs - + abs_tol = 1e-2 - - assert flow_out_revenue[('mynet',0,0)] > flow_in_cost[('mynet',0,0)] - abs_tol - + + assert ( + flow_out_revenue[("mynet", 0, 0)] > flow_in_cost[("mynet", 0, 0)] - abs_tol + ) + # the capex should be positive - + abs_tol = 1e-6 - + assert pyo.value(ipp.instance.var_capex) > 0 - abs_tol - - else: # import prices are higher: it makes no sense to install the arc - + + else: # import prices are higher: it makes no sense to install the arc # the arc should not be installed (unless prices allow for it) - - assert True not in ipp.networks['mynet'].edges[ - (imp_node_key, exp_node_key, 0)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + not in ipp.networks["mynet"] + .edges[(imp_node_key, exp_node_key, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # overview - - (flow_in, - flow_in_k, - flow_out, - flow_in_cost, - flow_out_revenue) = compute_cost_volume_metrics(ipp.instance, True) - + + ( + flow_in, + flow_in_k, + flow_out, + flow_in_cost, + flow_out_revenue, + ) = compute_cost_volume_metrics(ipp.instance, True) + # there should be no imports - + abs_tol = 1e-6 - - assert math.isclose(flow_in[('mynet',0,0)], - 0.0, - abs_tol=abs_tol) - - assert math.isclose(flow_in_cost[('mynet',0,0)], - 0.0, - abs_tol=abs_tol) - + + assert math.isclose(flow_in[("mynet", 0, 0)], 0.0, abs_tol=abs_tol) + + assert math.isclose(flow_in_cost[("mynet", 0, 0)], 0.0, abs_tol=abs_tol) + # there should be no exports - + abs_tol = 1e-2 - - assert math.isclose(flow_out[('mynet',0,0)], - 0.0, - abs_tol=abs_tol) - - assert math.isclose(flow_out_revenue[('mynet',0,0)], - 0.0, - abs_tol=abs_tol) - + + assert math.isclose(flow_out[("mynet", 0, 0)], 0.0, abs_tol=abs_tol) + + assert math.isclose(flow_out_revenue[("mynet", 0, 0)], 0.0, abs_tol=abs_tol) + # there should be no capex - + abs_tol = 1e-6 - - assert math.isclose(pyo.value(ipp.instance.var_capex), - 0.0, - abs_tol=abs_tol) - - -#****************************************************************************** -#****************************************************************************** - -def example_network_mandatory_arcs(solver, - solver_options, - different_technologies, - irregular_time_intervals, - use_sos_arcs, - sos_weight_key, - use_real_variables_if_possible, - use_sos_sense, - sense_sos_weight_key, - sense_use_real_variables_if_possible, - use_arc_interfaces, - use_undirected_arcs, - print_model, - init_aux_sets): - + + assert math.isclose(pyo.value(ipp.instance.var_capex), 0.0, abs_tol=abs_tol) + + +# ****************************************************************************** +# ****************************************************************************** + + +def example_network_mandatory_arcs( + solver, + solver_options, + different_technologies, + irregular_time_intervals, + use_sos_arcs, + sos_weight_key, + use_real_variables_if_possible, + use_sos_sense, + sense_sos_weight_key, + sense_use_real_variables_if_possible, + use_arc_interfaces, + use_undirected_arcs, + print_model, + init_aux_sets, +): q = 0 - + # time - + number_intervals = 4 number_periods = 2 # 4 nodes: one import, one export, two supply/demand nodes - + mynet = Network() - + # import node - + # imp_prices = ResourcePrice( - # prices=[1+random.random() for i in range(number_intervals)], + # prices=[1+random.random() for i in range(number_intervals)], # volumes=None) - + imp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp_node_key, + node_key=imp_node_key, prices={ - (q,p,k): ResourcePrice( - prices=1+random.random(), - volumes=None - ) + (q, p, k): ResourcePrice(prices=1 + random.random(), volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # export node - + # exp_prices = ResourcePrice( - # prices=[random.random() for i in range(number_intervals)], + # prices=[random.random() for i in range(number_intervals)], # volumes=None) - + exp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_export_node( - node_key=exp_node_key, + node_key=exp_node_key, prices={ - (q,p,k): ResourcePrice( - prices=random.random(), - volumes=None - ) + (q, p, k): ResourcePrice(prices=random.random(), volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_A, - #base_flow=[1.00, 1.25, 0.75, 0.5], - base_flow={ - (0,0):1.0, - (0,1):1.25, - (0,2):0.75, - (0,3):0.5 - } - ) - + node_key=node_A, + # base_flow=[1.00, 1.25, 0.75, 0.5], + base_flow={(0, 0): 1.0, (0, 1): 1.25, (0, 2): 0.75, (0, 3): 0.5}, + ) + node_B = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_B, - #base_flow=[0.50, 0.25, 0.35, 0.45], - base_flow={ - (0,0):0.50, - (0,1):0.25, - (0,2):0.35, - (0,3):0.45 - } - ) - + node_key=node_B, + # base_flow=[0.50, 0.25, 0.35, 0.45], + base_flow={(0, 0): 0.50, (0, 1): 0.25, (0, 2): 0.35, (0, 3): 0.45}, + ) + node_C = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_C, - #base_flow=[-1, -0.1, -1.5, -0.25], - base_flow={ - (0,0):-1, - (0,1):-0.1, - (0,2):-1.5, - (0,3):-0.25 - } - ) - + node_key=node_C, + # base_flow=[-1, -0.1, -1.5, -0.25], + base_flow={(0, 0): -1, (0, 1): -0.1, (0, 2): -1.5, (0, 3): -0.25}, + ) + node_D = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_D, - #base_flow=[-1, -1.25, -0.25, -0.5], - base_flow={ - (0,0):-1, - (0,1):-1.25, - (0,2):-0.25, - (0,3):-0.5 - } - ) - + node_key=node_D, + # base_flow=[-1, -1.25, -0.25, -0.5], + base_flow={(0, 0): -1, (0, 1): -1.25, (0, 2): -0.25, (0, 3): -0.5}, + ) + # add arcs - + # import arc - + arc_tech_IA = Arcs( - name='IA', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="IA", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=imp_node_key, - node_key_b=node_A, - arcs=arc_tech_IA) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=imp_node_key, node_key_b=node_A, arcs=arc_tech_IA) + arc_tech_AB = Arcs( - name='AB', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="AB", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + if use_undirected_arcs: - arc_key_AB_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB) - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB + ) + else: - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB) - + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB) + arc_key_AB_und = 0 - + # export arc - + arc_tech_CD = Arcs( - name='CD', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="CD", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_tech_DE = Arcs( - name='DE', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="DE", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + if use_undirected_arcs: - arc_key_CD_und = mynet.add_undirected_arc( - node_key_a=node_C, - node_key_b=node_D, - arcs=arc_tech_CD) - + node_key_a=node_C, node_key_b=node_D, arcs=arc_tech_CD + ) + else: - - mynet.add_directed_arc( - node_key_a=node_C, - node_key_b=node_D, - arcs=arc_tech_CD) - + mynet.add_directed_arc(node_key_a=node_C, node_key_b=node_D, arcs=arc_tech_CD) + arc_key_CD_und = 0 - - mynet.add_directed_arc( - node_key_a=node_D, - node_key_b=exp_node_key, - arcs=arc_tech_DE) - - mandatory_arcs = [('mynet', imp_node_key, node_A, 0), - ('mynet', node_D, exp_node_key, 0), - ('mynet', node_A, node_B, arc_key_AB_und), - ('mynet', node_C, node_D, arc_key_CD_und)] - + + mynet.add_directed_arc(node_key_a=node_D, node_key_b=exp_node_key, arcs=arc_tech_DE) + + mandatory_arcs = [ + ("mynet", imp_node_key, node_A, 0), + ("mynet", node_D, exp_node_key, 0), + ("mynet", node_A, node_B, arc_key_AB_und), + ("mynet", node_C, node_D, arc_key_CD_und), + ] + # add two nodes and one preexisting arc to cover a special case - + node_G = generate_pseudo_unique_key(mynet.nodes()) - - mynet.add_waypoint_node( - node_key=node_G - ) - + + mynet.add_waypoint_node(node_key=node_G) + node_H = generate_pseudo_unique_key(mynet.nodes()) - - mynet.add_waypoint_node( - node_key=node_H - ) - + + mynet.add_waypoint_node(node_key=node_H) + mynet.add_preexisting_directed_arc( - node_key_a=node_G, - node_key_b=node_H, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_G, + node_key_b=node_H, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, - capacity=1, - capacity_is_instantaneous=False) - + capacity=1, + capacity_is_instantaneous=False, + ) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -4543,114 +4309,135 @@ def example_network_mandatory_arcs(solver, sense_use_real_variables_if_possible=sense_use_real_variables_if_possible, sense_use_arc_interfaces=use_arc_interfaces, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=irregular_time_intervals, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP, mandatory_arcs=mandatory_arcs, max_number_parallel_arcs={}, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # validation - + # IA - - assert True in ipp.networks['mynet'].edges[ - (imp_node_key, node_A, 0)][Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp_node_key, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # AB - - assert True in ipp.networks['mynet'].edges[ - (node_A, node_B, arc_key_AB_und)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, arc_key_AB_und)][Network.KEY_ARC_TECH] + .options_selected + ) + # CD - - assert True in ipp.networks['mynet'].edges[ - (node_C, node_D, arc_key_CD_und)][ - Network.KEY_ARC_TECH].options_selected + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_C, node_D, arc_key_CD_und)][Network.KEY_ARC_TECH] + .options_selected + ) # DE - - assert True in ipp.networks['mynet'].edges[ - (node_D, exp_node_key, 0)][Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_D, exp_node_key, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # the undirected arc was installed - - assert True in ipp.networks['mynet'].edges[ - (node_A, node_B, arc_key_AB_und)][ - Network.KEY_ARC_TECH].options_selected + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, arc_key_AB_und)][Network.KEY_ARC_TECH] + .options_selected + ) # overview - - (flow_in, - flow_in_k, - flow_out, - flow_in_cost, - flow_out_revenue) = compute_cost_volume_metrics(ipp.instance, True) - + + ( + flow_in, + flow_in_k, + flow_out, + flow_in_cost, + flow_out_revenue, + ) = compute_cost_volume_metrics(ipp.instance, True) + # there should be imports - + abs_tol = 1e-6 - - assert flow_in[('mynet',0,0)] > 0.0 - abs_tol - - assert flow_in_cost[('mynet',0,0)] > 0.0 - abs_tol - + + assert flow_in[("mynet", 0, 0)] > 0.0 - abs_tol + + assert flow_in_cost[("mynet", 0, 0)] > 0.0 - abs_tol + # there should be exports - + abs_tol = 1e-2 - - assert flow_out[('mynet',0,0)] > 0.0 - abs_tol - - assert flow_out_revenue[('mynet',0,0)] > 0.0 - abs_tol - + + assert flow_out[("mynet", 0, 0)] > 0.0 - abs_tol + + assert flow_out_revenue[("mynet", 0, 0)] > 0.0 - abs_tol + # the capex should be positive - + assert pyo.value(ipp.instance.var_capex) > 0 - - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** + + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def example_problem_max_arc_limits_infeasible( - solver, - solver_options, - different_technologies, - irregular_time_intervals, - use_sos_arcs, - sos_weight_key, - use_real_variables_if_possible, - use_sos_sense, - sense_sos_weight_key, - sense_use_real_variables_if_possible, - use_arc_interfaces, - case, - print_model, - init_aux_sets): - - #************************************************************************** - #************************************************************************** - + solver, + solver_options, + different_technologies, + irregular_time_intervals, + use_sos_arcs, + sos_weight_key, + use_real_variables_if_possible, + use_sos_sense, + sense_sos_weight_key, + sense_use_real_variables_if_possible, + use_arc_interfaces, + case, + print_model, + init_aux_sets, +): + # ************************************************************************** + # ************************************************************************** + # there are 3 possible outcomes: - # 1) the number of preexisting and mandatory arcs is above the limit + # 1) the number of preexisting and mandatory arcs is above the limit # >> the problem is infeasible - # 2) maximum number of arcs lower than or equal to the limit + # 2) maximum number of arcs lower than or equal to the limit # >> the constraint is skipped # 3) maximum number of arcs above the limit, the number of preexisting and # mandatory arcs is below the limit >> the constraint is used - + # various ways to test the cases: # 1) preexisting vs selectable arcs # 2) mandatory vs optional arcs # 3) directed vs undirected arcs - + # how to test case 1: # a) use only preexisting directed arcs # b) use only preexisting undirected arcs @@ -4668,1307 +4455,1000 @@ def example_problem_max_arc_limits_infeasible( # n) use preexi. undirected arcs and mandatory directed and undirected arcs # o) use preselelected and mandatory directed and undirected arcs - #************************************************************************** - #************************************************************************** - + # ************************************************************************** + # ************************************************************************** + q = 0 - + # time - + number_intervals = 4 - + number_periods = 2 # 4 nodes: one import, one export, two supply/demand nodes - + mynet = Network() - + # import node - + # imp_prices = ResourcePrice( - # prices=[0+random.random() for i in range(number_intervals)], + # prices=[0+random.random() for i in range(number_intervals)], # volumes=None) - + imp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp_node_key, + node_key=imp_node_key, prices={ - (q,p,k): ResourcePrice( - prices=0+random.random(), - volumes=None - ) + (q, p, k): ResourcePrice(prices=0 + random.random(), volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # export node - + # exp_prices = ResourcePrice( - # prices=[1+random.random() for i in range(number_intervals)], + # prices=[1+random.random() for i in range(number_intervals)], # volumes=None) - + exp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_export_node( - node_key=exp_node_key, + node_key=exp_node_key, prices={ - (q,p,k): ResourcePrice( - prices=1+random.random(), - volumes=None - ) + (q, p, k): ResourcePrice(prices=1 + random.random(), volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_A, - #base_flow=[0.1, 0.2, 0.3, 0.4], - base_flow={ - (0,0):0.1, - (0,1):0.2, - (0,2):0.3, - (0,3):0.4 - } - ) - + node_key=node_A, + # base_flow=[0.1, 0.2, 0.3, 0.4], + base_flow={(0, 0): 0.1, (0, 1): 0.2, (0, 2): 0.3, (0, 3): 0.4}, + ) + node_B = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_B, - #base_flow=[0.4, 0.3, 0.2, 0.1], - base_flow={ - (0,0):0.4, - (0,1):0.3, - (0,2):0.2, - (0,3):0.1 - } - ) - + node_key=node_B, + # base_flow=[0.4, 0.3, 0.2, 0.1], + base_flow={(0, 0): 0.4, (0, 1): 0.3, (0, 2): 0.2, (0, 3): 0.1}, + ) + # add arcs - + # import arc - + arc_tech_IA = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=imp_node_key, - node_key_b=node_A, - arcs=arc_tech_IA) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=imp_node_key, node_key_b=node_A, arcs=arc_tech_IA) + # export arc - + arc_tech_BE = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[1, 1.1, 1.2, 1.3, 1.4, 1.5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_B, - node_key_b=exp_node_key, - arcs=arc_tech_BE) - - #************************************************************************** - #************************************************************************** - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_B, node_key_b=exp_node_key, arcs=arc_tech_BE) + + # ************************************************************************** + # ************************************************************************** + # arcs between A and B - + max_number_arcs_AB = 1 - + arc_tech_AB = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) arc_key_AB = mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB) - - if case == '1_a': - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB + ) + + if case == "1_a": # a) use only preexisting directed arcs - + max_number_arcs_AB = 1 - + mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, - capacity=10, - capacity_is_instantaneous=False - ) - + capacity=10, + capacity_is_instantaneous=False, + ) + mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, static_loss=None, - capacity=5, - capacity_is_instantaneous=False - ) - + capacity=5, + capacity_is_instantaneous=False, + ) + mandatory_arcs = [] - + arc_groups_dict = {} - - elif case == '1_b': - + + elif case == "1_b": # b) use only preexisting undirected arcs - + max_number_arcs_AB = 1 - + mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, efficiency_reverse=None, - capacity=10, - capacity_is_instantaneous=False - ) - + capacity=10, + capacity_is_instantaneous=False, + ) + mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, static_loss=None, efficiency_reverse=None, - capacity=5, - capacity_is_instantaneous=False - ) - + capacity=5, + capacity_is_instantaneous=False, + ) + mandatory_arcs = [] - + arc_groups_dict = {} - - elif case == '1_c': - + + elif case == "1_c": # c) use preexisting directed and undirected arcs - + max_number_arcs_AB = 1 - + mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, - capacity=10, - capacity_is_instantaneous=False - ) - + capacity=10, + capacity_is_instantaneous=False, + ) + mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, static_loss=None, efficiency_reverse=None, - capacity=5, - capacity_is_instantaneous=False - ) - + capacity=5, + capacity_is_instantaneous=False, + ) + mandatory_arcs = [] - + arc_groups_dict = {} - - elif case == '1_d': - + + elif case == "1_d": # d) use only mandatory directed arcs - + max_number_arcs_AB = 1 - + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1) + arc_tech_AB2 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1.1, 2.1, 3.1, 4.1, 5.1], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1.1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB2) - - mandatory_arcs = [('mynet', node_A, node_B, 1), - ('mynet', node_A, node_B, 2)] - + specific_capacity_cost=1.1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB2) + + mandatory_arcs = [("mynet", node_A, node_B, 1), ("mynet", node_A, node_B, 2)] + arc_groups_dict = {} - - elif case == '1_e': - + + elif case == "1_e": # e) use only mandatory undirected arcs - + max_number_arcs_AB = 1 - + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1 + ) + arc_tech_AB2 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1.1, 2.1, 3.1, 4.1, 5.1], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1.1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1.1, + capacity_is_instantaneous=False, + ) + mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB2) - - mandatory_arcs = [('mynet', node_A, node_B, 1), - ('mynet', node_A, node_B, 2)] - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB2 + ) + + mandatory_arcs = [("mynet", node_A, node_B, 1), ("mynet", node_A, node_B, 2)] + arc_groups_dict = {} - - elif case == '1_f': - + + elif case == "1_f": # f) use mandatory directed and undirected arcs - + max_number_arcs_AB = 1 - + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1) + arc_tech_AB2 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1.1, 2.1, 3.1, 4.1, 5.1], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1.1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1.1, + capacity_is_instantaneous=False, + ) + mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB2) - - mandatory_arcs = [('mynet', node_A, node_B, 1), - ('mynet', node_A, node_B, 2)] - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB2 + ) + + mandatory_arcs = [("mynet", node_A, node_B, 1), ("mynet", node_A, node_B, 2)] + arc_groups_dict = {} - - elif case == '1_g': - + + elif case == "1_g": # g) use preexisting directed arcs and mandatory directed arcs - + max_number_arcs_AB = 1 - + mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, - capacity=10, - capacity_is_instantaneous=False - ) - + capacity=10, + capacity_is_instantaneous=False, + ) + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - - mandatory_arcs = [('mynet', node_A, node_B, 1)] - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1) + + mandatory_arcs = [("mynet", node_A, node_B, 1)] + arc_groups_dict = {} - - elif case == '1_h': - + + elif case == "1_h": # h) use preexisting undirected arcs and mandatory undirected arcs - + max_number_arcs_AB = 1 - + mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, efficiency_reverse=None, - capacity=10, - capacity_is_instantaneous=False - ) - + capacity=10, + capacity_is_instantaneous=False, + ) + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - - mandatory_arcs = [('mynet', node_A, node_B, 1)] - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1 + ) + + mandatory_arcs = [("mynet", node_A, node_B, 1)] + arc_groups_dict = {} - - elif case == '1_i': - + + elif case == "1_i": # i) use preexisting undirected arcs and mandatory directed arcs - + max_number_arcs_AB = 1 - + mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, efficiency_reverse=None, - capacity=10, - capacity_is_instantaneous=False - ) - + capacity=10, + capacity_is_instantaneous=False, + ) + arc_tech_AB1 = Arcs( - name='any', - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - - mandatory_arcs = [('mynet', node_A, node_B, 1)] - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1) + + mandatory_arcs = [("mynet", node_A, node_B, 1)] + arc_groups_dict = {} - - elif case == '1_j': - + + elif case == "1_j": # j) use preexisting directed arcs and mandatory undirected arcs - + max_number_arcs_AB = 1 - + mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, - capacity=10, - capacity_is_instantaneous=False - ) - + capacity=10, + capacity_is_instantaneous=False, + ) + arc_tech_AB1 = Arcs( - name='any', - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - - mandatory_arcs = [('mynet', node_A, node_B, 1)] - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1 + ) + + mandatory_arcs = [("mynet", node_A, node_B, 1)] + arc_groups_dict = {} - - elif case == '1_k': - + + elif case == "1_k": # k) use preexi. directed and undirected arcs and mandat. directed arcs - + max_number_arcs_AB = 2 - + mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, - capacity=10, - capacity_is_instantaneous=False - ) - + capacity=10, + capacity_is_instantaneous=False, + ) + mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, efficiency_reverse=None, - capacity=5, - capacity_is_instantaneous=False - ) - + capacity=5, + capacity_is_instantaneous=False, + ) + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - - mandatory_arcs = [('mynet', node_A, node_B, 1)] - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1) + + mandatory_arcs = [("mynet", node_A, node_B, 1)] + arc_groups_dict = {} - - elif case == '1_l': - + + elif case == "1_l": # l) use preexi. directed and undir. arcs and mandatory undirected arcs - + max_number_arcs_AB = 2 - + mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, - capacity=10, - capacity_is_instantaneous=False - ) - + capacity=10, + capacity_is_instantaneous=False, + ) + mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, efficiency_reverse=None, - capacity=5, - capacity_is_instantaneous=False - ) - + capacity=5, + capacity_is_instantaneous=False, + ) + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - - mandatory_arcs = [('mynet', node_A, node_B, 1)] - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1 + ) + + mandatory_arcs = [("mynet", node_A, node_B, 1)] + arc_groups_dict = {} - - elif case == '1_m': - + + elif case == "1_m": # m) use preexi. directed arcs and mandat. directed and undirected arcs - + max_number_arcs_AB = 2 - + mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, - capacity=10, - capacity_is_instantaneous=False - ) - + capacity=10, + capacity_is_instantaneous=False, + ) + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1) + arc_tech_AB2 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1.1, 2.1, 3.1, 4.1, 5.1], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1.1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1.1, + capacity_is_instantaneous=False, + ) + mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB2) - - mandatory_arcs = [('mynet', node_A, node_B, 1), - ('mynet', node_A, node_B, 2)] - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB2 + ) + + mandatory_arcs = [("mynet", node_A, node_B, 1), ("mynet", node_A, node_B, 2)] + arc_groups_dict = {} - - elif case == '1_n': - + + elif case == "1_n": # n) use preexi. undirected arcs and man. directed and undirected arcs - + max_number_arcs_AB = 2 - + mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, efficiency_reverse=None, - capacity=10, - capacity_is_instantaneous=False - ) - + capacity=10, + capacity_is_instantaneous=False, + ) + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, validate=False, static_loss=None, efficiency_reverse=None, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1) + arc_tech_AB2 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1.1, 2.1, 3.1, 4.1, 5.1], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1.1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1.1, + capacity_is_instantaneous=False, + ) + mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB2) - - mandatory_arcs = [('mynet', node_A, node_B, 1), - ('mynet', node_A, node_B, 2)] - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB2 + ) + + mandatory_arcs = [("mynet", node_A, node_B, 1), ("mynet", node_A, node_B, 2)] + arc_groups_dict = {} - - elif case == '1_o': - + + elif case == "1_o": # o) use preselelected and mandatory directed and undirected arcs - + max_number_arcs_AB = 3 - + mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, - capacity=10, - capacity_is_instantaneous=False - ) - + capacity=10, + capacity_is_instantaneous=False, + ) + mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, efficiency_reverse=None, - capacity=5, - capacity_is_instantaneous=False - ) - + capacity=5, + capacity_is_instantaneous=False, + ) + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1) + arc_tech_AB2 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1.1, 2.1, 3.1, 4.1, 5.1], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1.1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1.1, + capacity_is_instantaneous=False, + ) + mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB2) - - mandatory_arcs = [('mynet', node_A, node_B, 1), - ('mynet', node_A, node_B, 2)] - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB2 + ) + + mandatory_arcs = [("mynet", node_A, node_B, 1), ("mynet", node_A, node_B, 2)] + arc_groups_dict = {} - - elif case == '1_p': - + + elif case == "1_p": # p) use pre-existing undirected arcs in both directions - + max_number_arcs_AB = 2 - + arc_key_AB1 = mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, - efficiency_reverse=None, + node_key_a=node_A, + node_key_b=node_B, + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, + efficiency_reverse=None, static_loss=None, - capacity=10, - capacity_is_instantaneous=False - ) - + capacity=10, + capacity_is_instantaneous=False, + ) + arc_key_AB2 = mynet.add_preexisting_undirected_arc( - node_key_a=node_B, - node_key_b=node_A, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_B, + node_key_b=node_A, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, efficiency_reverse=None, - capacity=5, - capacity_is_instantaneous=False - ) - + capacity=5, + capacity_is_instantaneous=False, + ) + mandatory_arcs = [] - + arc_groups_dict = {} - - elif case == '1_q': - + + elif case == "1_q": # q) use mandatory undirected arcs in both directions - + max_number_arcs_AB = 4 - + arc_key_AB1 = mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, - efficiency_reverse=None, + node_key_a=node_A, + node_key_b=node_B, + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, + efficiency_reverse=None, static_loss=None, - capacity=10, - capacity_is_instantaneous=False - ) - + capacity=10, + capacity_is_instantaneous=False, + ) + arc_key_AB2 = mynet.add_preexisting_undirected_arc( - node_key_a=node_B, - node_key_b=node_A, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_B, + node_key_b=node_A, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, efficiency_reverse=None, - capacity=5, - capacity_is_instantaneous=False - ) - + capacity=5, + capacity_is_instantaneous=False, + ) + arc_tech_AB3 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_key_AB3 = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB3) - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB3 + ) + arc_tech_AB4 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1.1, 2.1, 3.1, 4.1, 5.1], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1.1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1.1, + capacity_is_instantaneous=False, + ) + arc_key_AB4 = mynet.add_undirected_arc( - node_key_a=node_B, - node_key_b=node_A, - arcs=arc_tech_AB4) - - mandatory_arcs = [('mynet', node_A, node_B, arc_key_AB3), - ('mynet', node_B, node_A, arc_key_AB4)] - + node_key_a=node_B, node_key_b=node_A, arcs=arc_tech_AB4 + ) + + mandatory_arcs = [ + ("mynet", node_A, node_B, arc_key_AB3), + ("mynet", node_B, node_A, arc_key_AB4), + ] + arc_groups_dict = {} - - elif case == '1_r': - + + elif case == "1_r": # r) use mandatory and pre-existing undirected arcs in both directions - + max_number_arcs_AB = 2 - + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_key_AB1 = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1 + ) + arc_tech_AB2 = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1.1, 2.1, 3.1, 4.1, 5.1], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1.1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1.1, + capacity_is_instantaneous=False, + ) + arc_key_AB2 = mynet.add_undirected_arc( - node_key_a=node_B, - node_key_b=node_A, - arcs=arc_tech_AB2) - - mandatory_arcs = [('mynet', node_A, node_B, arc_key_AB1), - ('mynet', node_B, node_A, arc_key_AB2)] - + node_key_a=node_B, node_key_b=node_A, arcs=arc_tech_AB2 + ) + + mandatory_arcs = [ + ("mynet", node_A, node_B, arc_key_AB1), + ("mynet", node_B, node_A, arc_key_AB2), + ] + arc_groups_dict = {} - - elif case == '1_s': - + + elif case == "1_s": # s) TODO: use groups of arcs with mandatory arcs - + max_number_arcs_AB = 2 - + arc_tech_AB1 = Arcs( - name='any', - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_key_AB1 = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1 + ) + arc_tech_AB2 = Arcs( - name='any', - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1.1, 2.1, 3.1, 4.1, 5.1], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1.1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1.1, + capacity_is_instantaneous=False, + ) + arc_key_AB2 = mynet.add_undirected_arc( - node_key_a=node_B, - node_key_b=node_A, - arcs=arc_tech_AB2) - - mandatory_arcs = [('mynet', node_A, node_B, arc_key_AB1), - ('mynet', node_B, node_A, arc_key_AB2)] - + node_key_a=node_B, node_key_b=node_A, arcs=arc_tech_AB2 + ) + + mandatory_arcs = [ + ("mynet", node_A, node_B, arc_key_AB1), + ("mynet", node_B, node_A, arc_key_AB2), + ] + arc_groups_dict = { - 0: (('mynet',node_A,node_B,arc_key_AB1), - ('mynet',node_B,node_A,arc_key_AB2), - ('mynet',node_A,node_B,arc_key_AB)) - } - - elif case == '1_t': - + 0: ( + ("mynet", node_A, node_B, arc_key_AB1), + ("mynet", node_B, node_A, arc_key_AB2), + ("mynet", node_A, node_B, arc_key_AB), + ) + } + + elif case == "1_t": # t) TODO: use mandatory groups of arcs - + max_number_arcs_AB = 2 - + arc_tech_AB1 = Arcs( - name='any', - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_key_AB1 = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1 + ) + arc_tech_AB2 = Arcs( - name='any', - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1.1, 2.1, 3.1, 4.1, 5.1], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1.1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1.1, + capacity_is_instantaneous=False, + ) + arc_key_AB2 = mynet.add_undirected_arc( - node_key_a=node_B, - node_key_b=node_A, - arcs=arc_tech_AB2) - - mandatory_arcs = [('mynet', node_A, node_B, arc_key_AB1), - ('mynet', node_B, node_A, arc_key_AB2)] - + node_key_a=node_B, node_key_b=node_A, arcs=arc_tech_AB2 + ) + + mandatory_arcs = [ + ("mynet", node_A, node_B, arc_key_AB1), + ("mynet", node_B, node_A, arc_key_AB2), + ] + arc_groups_dict = { - 0: (('mynet',node_A,node_B,arc_key_AB1), - ('mynet',node_B,node_A,arc_key_AB2)) - } - + 0: ( + ("mynet", node_A, node_B, arc_key_AB1), + ("mynet", node_B, node_A, arc_key_AB2), + ) + } + # elif case == '1_u': - + # pass - + # else: - + # mandatory_arcs = [] - - #************************************************************************** - #************************************************************************** - + + # ************************************************************************** + # ************************************************************************** + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + _ = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -5980,97 +5460,99 @@ def example_problem_max_arc_limits_infeasible( sense_use_real_variables_if_possible=sense_use_real_variables_if_possible, sense_use_arc_interfaces=use_arc_interfaces, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=irregular_time_intervals, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP, mandatory_arcs=mandatory_arcs, - max_number_parallel_arcs={('mynet',node_A,node_B):max_number_arcs_AB}, + max_number_parallel_arcs={("mynet", node_A, node_B): max_number_arcs_AB}, arc_groups_dict=arc_groups_dict, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # # validation - + # # the import arc is installed - + # assert True in ipp.networks['mynet'].edges[(imp_node_key, node_A, 0)][ # Network.KEY_ARC_TECH].options_selected - + # # the intermediate arc was installed - + # assert True in ipp.networks['mynet'].edges[(node_A, node_B, 0)][ # Network.KEY_ARC_TECH].options_selected - - # # the export arc was installed - + + # # the export arc was installed + # assert True in ipp.networks['mynet'].edges[(node_B, exp_node_key, 0)][ # Network.KEY_ARC_TECH].options_selected - + # # overview - - # (flow_in, - # flow_out, - # flow_in_cost, + + # (flow_in, + # flow_out, + # flow_in_cost, # flow_out_revenue) = compute_cost_volume_metrics(ipp.instance, True) - + # # there should be imports - + # abs_tol = 1e-6 - + # assert flow_in[('mynet',0,0)] > 0.0 - abs_tol - + # assert flow_in_cost[('mynet',0,0)] > 0.0 - abs_tol - + # # there should be exports - + # abs_tol = 1e-2 - + # assert flow_out[('mynet',0,0)] > 0.0 - abs_tol - + # assert flow_out_revenue[('mynet',0,0)] > 0.0 - abs_tol - - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** + + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def example_problem_max_arc_limits_skip( - solver, - solver_options, - different_technologies, - irregular_time_intervals, - use_sos_arcs, - sos_weight_key, - use_real_variables_if_possible, - use_sos_sense, - sense_sos_weight_key, - sense_use_real_variables_if_possible, - use_arc_interfaces, - case, - print_model, - init_aux_sets): - - #************************************************************************** - #************************************************************************** - + solver, + solver_options, + different_technologies, + irregular_time_intervals, + use_sos_arcs, + sos_weight_key, + use_real_variables_if_possible, + use_sos_sense, + sense_sos_weight_key, + sense_use_real_variables_if_possible, + use_arc_interfaces, + case, + print_model, + init_aux_sets, +): + # ************************************************************************** + # ************************************************************************** + # there are 3 possible outcomes: - # 1) the number of preexisting and mandatory arcs is above the limit + # 1) the number of preexisting and mandatory arcs is above the limit # >> the problem is infeasible - # 2) maximum number of arcs lower than or equal to the limit + # 2) maximum number of arcs lower than or equal to the limit # >> the constraint is skipped # 3) maximum number of arcs above the limit, the number of preexisting and # mandatory arcs is below the limit >> the constraint is used - + # various ways to test the cases: # 1) preexisting vs selectable arcs # 2) mandatory vs optional arcs # 3) directed vs undirected arcs - + # how to test case 1: # a) use only preexisting directed arcs # b) use only preexisting undirected arcs @@ -6088,963 +5570,742 @@ def example_problem_max_arc_limits_skip( # n) use preexi. undirected arcs and mandatory directed and undirected arcs # o) use preselelected and mandatory directed and undirected arcs - #************************************************************************** - #************************************************************************** - + # ************************************************************************** + # ************************************************************************** + q = 0 - + # time - + number_intervals = 4 number_periods = 2 # 4 nodes: one import, one export, two supply/demand nodes - + mynet = Network() - + # import node - + # imp_prices = ResourcePrice( - # prices=[0+(i+1)/number_intervals for i in range(number_intervals)], + # prices=[0+(i+1)/number_intervals for i in range(number_intervals)], # volumes=None) - + imp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp_node_key, + node_key=imp_node_key, prices={ - (q,p,k): ResourcePrice( - prices=0+(k+1)/number_intervals, - volumes=None - ) + (q, p, k): ResourcePrice( + prices=0 + (k + 1) / number_intervals, volumes=None + ) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # export node - + # exp_prices = ResourcePrice( - # prices=[10+(i+1)/number_intervals for i in range(number_intervals)], + # prices=[10+(i+1)/number_intervals for i in range(number_intervals)], # volumes=None) - + exp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_export_node( - node_key=exp_node_key, + node_key=exp_node_key, prices={ - (q,p,k): ResourcePrice( - prices=10+(k+1)/number_intervals, - volumes=None - ) + (q, p, k): ResourcePrice( + prices=10 + (k + 1) / number_intervals, volumes=None + ) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_A, - #base_flow=[0.1, 0.2, 0.3, 0.4], - base_flow={ - (0,0):0.1, - (0,1):0.2, - (0,2):0.3, - (0,3):0.4 - } - ) - + node_key=node_A, + # base_flow=[0.1, 0.2, 0.3, 0.4], + base_flow={(0, 0): 0.1, (0, 1): 0.2, (0, 2): 0.3, (0, 3): 0.4}, + ) + node_B = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_B, - #base_flow=[0.4, 0.3, 0.2, 0.1], - base_flow={ - (0,0):0.4, - (0,1):0.3, - (0,2):0.2, - (0,3):0.1 - } - ) - + node_key=node_B, + # base_flow=[0.4, 0.3, 0.2, 0.1], + base_flow={(0, 0): 0.4, (0, 1): 0.3, (0, 2): 0.2, (0, 3): 0.1}, + ) + # add arcs - + # import arc - + arc_tech_IA = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[10, 10.1, 10.2, 10.3, 10.4, 10.5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=imp_node_key, - node_key_b=node_A, - arcs=arc_tech_IA) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=imp_node_key, node_key_b=node_A, arcs=arc_tech_IA) + # export arc - + arc_tech_BE = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[0.5, 0.75, 1.0, 1.25, 1.5, 2.0], minimum_cost=[1, 1.1, 1.2, 1.3, 1.4, 1.5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_B, - node_key_b=exp_node_key, - arcs=arc_tech_BE) - - #************************************************************************** - #************************************************************************** - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_B, node_key_b=exp_node_key, arcs=arc_tech_BE) + + # ************************************************************************** + # ************************************************************************** + # arcs between A and B - + max_number_arcs_AB = 1 - + arc_tech_AB = Arcs( - name='any', - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + name="any", + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB) - - if case == '2_a': - + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB) + + if case == "2_a": # a) use only preexisting directed arcs - + max_number_arcs_AB = 2 - + mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, - capacity=0.1, - capacity_is_instantaneous=False - ) - + capacity=0.1, + capacity_is_instantaneous=False, + ) + mandatory_arcs = [] - - elif case == '2_b': - + + elif case == "2_b": # b) use only preexisting undirected arcs - + max_number_arcs_AB = 2 - + arc_key_AB1_und = mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, efficiency_reverse=None, - capacity=0.1, - capacity_is_instantaneous=False - ) - + capacity=0.1, + capacity_is_instantaneous=False, + ) + mandatory_arcs = [] - - elif case == '2_c': - + + elif case == "2_c": # c) use preexisting directed and undirected arcs - + max_number_arcs_AB = 3 - + mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, - capacity=0.1, - capacity_is_instantaneous=False - ) - + capacity=0.1, + capacity_is_instantaneous=False, + ) + arc_key_AB1_und = mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, static_loss=None, efficiency_reverse=None, - capacity=0.05, - capacity_is_instantaneous=False - ) - + capacity=0.05, + capacity_is_instantaneous=False, + ) + mandatory_arcs = [] - - elif case == '2_d': - + + elif case == "2_d": # d) use only mandatory directed arcs - + max_number_arcs_AB = 3 - + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + name="any", + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1) + arc_tech_AB2 = Arcs( - name='any', - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + name="any", + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1.1, 2.1, 3.1, 4.1, 5.1], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1.1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB2) - - mandatory_arcs = [('mynet', node_A, node_B, 1), - ('mynet', node_A, node_B, 2), - ('mynet', node_A, node_B, 0)] - - elif case == '2_e': - + specific_capacity_cost=1.1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB2) + + mandatory_arcs = [ + ("mynet", node_A, node_B, 1), + ("mynet", node_A, node_B, 2), + ("mynet", node_A, node_B, 0), + ] + + elif case == "2_e": # e) use only mandatory undirected arcs - + max_number_arcs_AB = 3 - + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + name="any", + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_key_AB1_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1 + ) + arc_tech_AB2 = Arcs( - name='any', - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + name="any", + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1.1, 2.1, 3.1, 4.1, 5.1], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1.1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1.1, + capacity_is_instantaneous=False, + ) + arc_key_AB2_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB2) - - mandatory_arcs = [('mynet', node_A, node_B, arc_key_AB1_und), - ('mynet', node_A, node_B, arc_key_AB2_und), - ('mynet', node_A, node_B, 0)] - - elif case == '2_f': - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB2 + ) + + mandatory_arcs = [ + ("mynet", node_A, node_B, arc_key_AB1_und), + ("mynet", node_A, node_B, arc_key_AB2_und), + ("mynet", node_A, node_B, 0), + ] + + elif case == "2_f": # f) use mandatory directed and undirected arcs - + max_number_arcs_AB = 3 - + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + name="any", + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1) + arc_tech_AB2 = Arcs( - name='any', - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + name="any", + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1.1, 2.1, 3.1, 4.1, 5.1], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1.1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1.1, + capacity_is_instantaneous=False, + ) + arc_key_AB2_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB2) - - mandatory_arcs = [('mynet', node_A, node_B, 1), - ('mynet', node_A, node_B, arc_key_AB2_und), - ('mynet', node_A, node_B, 0)] - - elif case == '2_g': - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB2 + ) + + mandatory_arcs = [ + ("mynet", node_A, node_B, 1), + ("mynet", node_A, node_B, arc_key_AB2_und), + ("mynet", node_A, node_B, 0), + ] + + elif case == "2_g": # g) use preexisting directed arcs and mandatory directed arcs - + max_number_arcs_AB = 3 - + mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, - capacity=0.1, - capacity_is_instantaneous=False - ) - + capacity=0.1, + capacity_is_instantaneous=False, + ) + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + name="any", + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - - mandatory_arcs = [('mynet', node_A, node_B, 2), - ('mynet', node_A, node_B, 0)] - - elif case == '2_h': - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1) + + mandatory_arcs = [("mynet", node_A, node_B, 2), ("mynet", node_A, node_B, 0)] + + elif case == "2_h": # h) use preexisting undirected arcs and mandatory undirected arcs - + max_number_arcs_AB = 3 - + arc_key_AB1_und = mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, efficiency_reverse=None, - capacity=0.1, - capacity_is_instantaneous=False - ) - + capacity=0.1, + capacity_is_instantaneous=False, + ) + arc_tech_AB2 = Arcs( - name='any', - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + name="any", + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_key_AB2_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB2) - - mandatory_arcs = [('mynet', node_A, node_B, 0), - ('mynet', node_A, node_B, arc_key_AB2_und)] - - elif case == '2_i': - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB2 + ) + + mandatory_arcs = [ + ("mynet", node_A, node_B, 0), + ("mynet", node_A, node_B, arc_key_AB2_und), + ] + + elif case == "2_i": # i) use preexisting undirected arcs and mandatory directed arcs - + max_number_arcs_AB = 3 - + arc_key_AB1_und = mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, efficiency_reverse=None, - capacity=0.1, - capacity_is_instantaneous=False - ) - + capacity=0.1, + capacity_is_instantaneous=False, + ) + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + name="any", + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - - mandatory_arcs = [('mynet', node_A, node_B, 2), - ('mynet', node_A, node_B, 0)] - - elif case == '2_j': - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1) + + mandatory_arcs = [("mynet", node_A, node_B, 2), ("mynet", node_A, node_B, 0)] + + elif case == "2_j": # j) use preexisting directed arcs and mandatory undirected arcs - + max_number_arcs_AB = 3 - + mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, - capacity=0.1, - capacity_is_instantaneous=False - ) - + capacity=0.1, + capacity_is_instantaneous=False, + ) + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + name="any", + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_key_AB1_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - - mandatory_arcs = [('mynet', node_A, node_B, arc_key_AB1_und), - ('mynet', node_A, node_B, 0)] - - elif case == '2_k': - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1 + ) + + mandatory_arcs = [ + ("mynet", node_A, node_B, arc_key_AB1_und), + ("mynet", node_A, node_B, 0), + ] + + elif case == "2_k": # k) use preexi. directed and undirected arcs and mandat. directed arcs - + max_number_arcs_AB = 4 - + mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, - capacity=0.1, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, + capacity=0.1, static_loss=None, - capacity_is_instantaneous=False - ) - + capacity_is_instantaneous=False, + ) + arc_key_AB1_und = mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, efficiency_reverse=None, - capacity=5, - capacity_is_instantaneous=False - ) - + capacity=5, + capacity_is_instantaneous=False, + ) + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + name="any", + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - - mandatory_arcs = [('mynet', node_A, node_B, 3), - ('mynet', node_A, node_B, 0)] - - elif case == '2_l': - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1) + + mandatory_arcs = [("mynet", node_A, node_B, 3), ("mynet", node_A, node_B, 0)] + + elif case == "2_l": # l) use preexi. directed and undir. arcs and mandatory undirected arcs - + max_number_arcs_AB = 4 - + mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, - capacity=0.1, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, + capacity=0.1, static_loss=None, - capacity_is_instantaneous=False - ) - + capacity_is_instantaneous=False, + ) + arc_key_AB1_und = mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, efficiency_reverse=None, - capacity=0.05, - capacity_is_instantaneous=False - ) - + capacity=0.05, + capacity_is_instantaneous=False, + ) + arc_tech_AB2 = Arcs( - name='any', - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + name="any", + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + arc_key_AB2_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB2) - - mandatory_arcs = [('mynet', node_A, node_B, arc_key_AB2_und), - ('mynet', node_A, node_B, 0)] - - elif case == '2_m': - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB2 + ) + + mandatory_arcs = [ + ("mynet", node_A, node_B, arc_key_AB2_und), + ("mynet", node_A, node_B, 0), + ] + + elif case == "2_m": # m) use preexi. directed arcs and mandat. directed and undirected arcs - + max_number_arcs_AB = 4 - + mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, - capacity=0.1, - static_loss=None, - capacity_is_instantaneous=False - ) - + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, + capacity=0.1, + static_loss=None, + capacity_is_instantaneous=False, + ) + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + name="any", + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1) + arc_tech_AB2 = Arcs( - name='any', - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + name="any", + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1.1, 2.1, 3.1, 4.1, 5.1], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1.1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1.1, + capacity_is_instantaneous=False, + ) + arc_key_AB2_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB2) - - mandatory_arcs = [('mynet', node_A, node_B, arc_key_AB2_und), - ('mynet', node_A, node_B, 2), - ('mynet', node_A, node_B, 0)] - - elif case == '2_n': - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB2 + ) + + mandatory_arcs = [ + ("mynet", node_A, node_B, arc_key_AB2_und), + ("mynet", node_A, node_B, 2), + ("mynet", node_A, node_B, 0), + ] + + elif case == "2_n": # n) use preexi. undirected arcs and man. directed and undirected arcs - + max_number_arcs_AB = 4 - + arc_key_AB1_und = mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, efficiency_reverse=None, - capacity=0.05, - capacity_is_instantaneous=False - ) - + capacity=0.05, + capacity_is_instantaneous=False, + ) + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + name="any", + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1) + arc_tech_AB2 = Arcs( - name='any', - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + name="any", + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1.1, 2.1, 3.1, 4.1, 5.1], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1.1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1.1, + capacity_is_instantaneous=False, + ) + arc_key_AB2_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB2) - - mandatory_arcs = [('mynet', node_A, node_B, 2), - ('mynet', node_A, node_B, arc_key_AB2_und), - ('mynet', node_A, node_B, 0)] - - elif case == '2_o': - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB2 + ) + + mandatory_arcs = [ + ("mynet", node_A, node_B, 2), + ("mynet", node_A, node_B, arc_key_AB2_und), + ("mynet", node_A, node_B, 0), + ] + + elif case == "2_o": # o) use pre-existing and mandatory directed and undirected arcs - + max_number_arcs_AB = 5 - + mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, - capacity=0.1, - capacity_is_instantaneous=False - ) - + capacity=0.1, + capacity_is_instantaneous=False, + ) + arc_key_AB1_und = mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - #efficiency=[1, 1, 1, 1], - efficiency={ - (0,0):1, - (0,1):1, - (0,2):1, - (0,3):1 - }, + node_key_a=node_A, + node_key_b=node_B, + # efficiency=[1, 1, 1, 1], + efficiency={(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1}, static_loss=None, efficiency_reverse=None, - capacity=0.05, - capacity_is_instantaneous=False - ) - + capacity=0.05, + capacity_is_instantaneous=False, + ) + arc_tech_AB1 = Arcs( - name='any', - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + name="any", + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1, 2, 3, 4, 5], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1, - capacity_is_instantaneous=False) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB1) - + specific_capacity_cost=1, + capacity_is_instantaneous=False, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB1) + arc_tech_AB2 = Arcs( - name='any', - #efficiency=[0.8, 0.8, 0.8, 0.8], - efficiency={ - (0,0):0.8, - (0,1):0.8, - (0,2):0.8, - (0,3):0.8 - }, + name="any", + # efficiency=[0.8, 0.8, 0.8, 0.8], + efficiency={(0, 0): 0.8, (0, 1): 0.8, (0, 2): 0.8, (0, 3): 0.8}, efficiency_reverse=None, static_loss=None, validate=False, capacity=[1.1, 2.1, 3.1, 4.1, 5.1], minimum_cost=[1, 2, 3, 4, 5], - specific_capacity_cost=1.1, - capacity_is_instantaneous=False) - + specific_capacity_cost=1.1, + capacity_is_instantaneous=False, + ) + arc_key_AB2_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB2) - - mandatory_arcs = [('mynet', node_A, node_B, 3), - ('mynet', node_A, node_B, arc_key_AB2_und), - ('mynet', node_A, node_B, 0)] - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB2 + ) + + mandatory_arcs = [ + ("mynet", node_A, node_B, 3), + ("mynet", node_A, node_B, arc_key_AB2_und), + ("mynet", node_A, node_B, 0), + ] + # else: - + # mandatory_arcs = [] - - #************************************************************************** - #************************************************************************** - + + # ************************************************************************** + # ************************************************************************** + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -7056,187 +6317,178 @@ def example_problem_max_arc_limits_skip( sense_use_real_variables_if_possible=sense_use_real_variables_if_possible, sense_use_arc_interfaces=use_arc_interfaces, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=irregular_time_intervals, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP, mandatory_arcs=mandatory_arcs, - max_number_parallel_arcs={('mynet',node_A,node_B):max_number_arcs_AB}, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + max_number_parallel_arcs={("mynet", node_A, node_B): max_number_arcs_AB}, + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # validation - + # assert that the constraint was not needed - + # ipp.instance.constr_limited_parallel_arcs_per_direction.pprint() - + assert ( - ('mynet', node_A, node_B) not in - ipp.instance.constr_limited_parallel_arcs_per_direction - ) - + "mynet", + node_A, + node_B, + ) not in ipp.instance.constr_limited_parallel_arcs_per_direction + # the import arc is installed - - assert True in ipp.networks['mynet'].edges[(imp_node_key, node_A, 0)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp_node_key, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # the intermediate arc was installed - - assert True in ipp.networks['mynet'].edges[(node_A, node_B, 0)][ - Network.KEY_ARC_TECH].options_selected - - # the export arc was installed - - assert True in ipp.networks['mynet'].edges[(node_B, exp_node_key, 0)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + + # the export arc was installed + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_B, exp_node_key, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # overview - - (flow_in, - flow_in_k, - flow_out, - flow_in_cost, - flow_out_revenue) = compute_cost_volume_metrics(ipp.instance, True) - + + ( + flow_in, + flow_in_k, + flow_out, + flow_in_cost, + flow_out_revenue, + ) = compute_cost_volume_metrics(ipp.instance, True) + # there should be imports - + abs_tol = 1e-6 - - assert flow_in[('mynet',0,0)] > 0.0 - abs_tol - - assert flow_in_cost[('mynet',0,0)] > 0.0 - abs_tol - + + assert flow_in[("mynet", 0, 0)] > 0.0 - abs_tol + + assert flow_in_cost[("mynet", 0, 0)] > 0.0 - abs_tol + # there should be exports - + abs_tol = 1e-2 - - assert flow_out[('mynet',0,0)] > 0.0 - abs_tol - - assert flow_out_revenue[('mynet',0,0)] > 0.0 - abs_tol - - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** + + assert flow_out[("mynet", 0, 0)] > 0.0 - abs_tol + + assert flow_out_revenue[("mynet", 0, 0)] > 0.0 - abs_tol + + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def example_report_directed_network_static_losses( - solver, - solver_options, - static_losses_mode, - use_new_arcs, - init_aux_sets): - + solver, solver_options, static_losses_mode, use_new_arcs, init_aux_sets +): q = 0 - + # time - - number_intervals = 1 - + + number_intervals = 1 + number_periods = 2 # 4 nodes: one import, one export, two supply/demand nodes - + mynet = Network() - + # import node - + # imp_prices = ResourcePrice( - # prices=[1+random.random() for i in range(number_intervals)], + # prices=[1+random.random() for i in range(number_intervals)], # volumes=None) - + imp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp_node_key, + node_key=imp_node_key, prices={ - (q,p,k): ResourcePrice( - prices=1+random.random(), - volumes=None - ) + (q, p, k): ResourcePrice(prices=1 + random.random(), volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_waypoint_node(node_key=node_A) - + node_B = generate_pseudo_unique_key(mynet.nodes()) - - mynet.add_source_sink_node( - node_key=node_B, - base_flow={ - (q,0):0.2 - } - ) - + + mynet.add_source_sink_node(node_key=node_B, base_flow={(q, 0): 0.2}) + # add arcs - + # IA arc - + mynet.add_infinite_capacity_arc( - node_key_a=imp_node_key, - node_key_b=node_A, - efficiency={ - (q,0):1 - }, - static_loss=None) - + node_key_a=imp_node_key, + node_key_b=node_A, + efficiency={(q, 0): 1}, + static_loss=None, + ) + if use_new_arcs: - # AB arc - + arc_tech_AB = Arcs( - name='AB', - #efficiency=[1, 1, 1, 1], - efficiency={ - (q,0):0.8 - }, + name="AB", + # efficiency=[1, 1, 1, 1], + efficiency={(q, 0): 0.8}, efficiency_reverse=None, validate=False, - capacity=[1.0], - minimum_cost=[0], - specific_capacity_cost=0, - capacity_is_instantaneous=False, - static_loss={ - (0,q,0):0.10 - }, - ) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB) - + capacity=[1.0], + minimum_cost=[0], + specific_capacity_cost=0, + capacity_is_instantaneous=False, + static_loss={(0, q, 0): 0.10}, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB) + else: - mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - efficiency={ - (q,0):0.8 - }, - static_loss={ - (0,q,0):0.10 - }, - capacity=1.0, - capacity_is_instantaneous=False) - - + node_key_a=node_A, + node_key_b=node_B, + efficiency={(q, 0): 0.8}, + static_loss={(0, q, 0): 0.10}, + capacity=1.0, + capacity_is_instantaneous=False, + ) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -7248,94 +6500,96 @@ def example_report_directed_network_static_losses( sense_use_real_variables_if_possible=False, sense_use_arc_interfaces=False, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=False, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=static_losses_mode, mandatory_arcs=[], max_number_parallel_arcs={}, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # if print_model: - + # all arcs should be installed (they are not new) - - assert True in ipp.networks['mynet'].edges[ - (imp_node_key, node_A, 0)][Network.KEY_ARC_TECH].options_selected - - assert True in ipp.networks['mynet'].edges[ - (node_A, node_B, 0)][Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp_node_key, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # overview - - (flow_in, - flow_in_k, - flow_out, - flow_in_cost, - flow_out_revenue) = compute_cost_volume_metrics(ipp.instance, True) - + + ( + flow_in, + flow_in_k, + flow_out, + flow_in_cost, + flow_out_revenue, + ) = compute_cost_volume_metrics(ipp.instance, True) + # there should be imports - + abs_tol = 1e-6 - - assert math.isclose(flow_in[('mynet',0,0)], 0.35, abs_tol=abs_tol) - - # there should be no exports - + + assert math.isclose(flow_in[("mynet", 0, 0)], 0.35, abs_tol=abs_tol) + + # there should be no exports + abs_tol = 1e-6 - - assert math.isclose(flow_out[('mynet',0,0)], 0, abs_tol=abs_tol) - + + assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol) + # flow through IA must be 0.35 - + abs_tol = 1e-6 - - assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[('mynet',imp_node_key,node_A,0,0,0)] - ), + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp_node_key, node_A, 0, 0, 0)]), 0.35, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # validation - - if (static_losses_mode == - InfrastructurePlanningProblem.STATIC_LOSS_MODE_ARR): - + + if static_losses_mode == InfrastructurePlanningProblem.STATIC_LOSS_MODE_ARR: # losses are downstream - + # flow through AB must be 0.35 - - assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[('mynet',node_A,node_B,0,0,0)] - ), + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_B, 0, 0, 0)]), 0.35, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + else: - # losses are upstream - + # flow through AB must be 0.25 - - assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[('mynet',node_A,node_B,0,0,0)] - ), + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_B, 0, 0, 0)]), 0.25, - abs_tol=abs_tol - ) - -#****************************************************************************** -#****************************************************************************** + abs_tol=abs_tol, + ) + + +# ****************************************************************************** +# ****************************************************************************** # test with pre-existing arcs: # one import node, one regular node, and two directed arcs for imports @@ -7345,86 +6599,73 @@ def example_report_directed_network_static_losses( # this way, the arc with losses does not have to be used but still has losses # that requires that its efficiency be lower than the arc without losses -def example_directed_arc_static_downstream_pre( - solver, - solver_options, - init_aux_sets): - + +def example_directed_arc_static_downstream_pre(solver, solver_options, init_aux_sets): q = 0 - + # time - - number_intervals = 1 - - number_periods = 2 - # 4 nodes: one import, one export, two supply/demand nodes - + number_intervals = 1 + + number_periods = 2 + + # 4 nodes: one import, one export, two supply/demand nodes + mynet = Network() - + # import node - + # imp_prices = ResourcePrice( - # prices=[1+random.random() for i in range(number_intervals)], + # prices=[1+random.random() for i in range(number_intervals)], # volumes=None) - + imp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp_node_key, + node_key=imp_node_key, prices={ - (q,p,k): ResourcePrice( - prices=1+random.random(), - volumes=None - ) + (q, p, k): ResourcePrice(prices=1 + random.random(), volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - - mynet.add_source_sink_node( - node_key=node_A, - base_flow={ - (q,0):1.0 - } - ) - + + mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0}) + # add arcs - + # IA1 mynet.add_preexisting_directed_arc( - node_key_a=imp_node_key, - node_key_b=node_A, - efficiency={ - (q,0):0.9 - }, - static_loss={ - (q,0,0):0.1 - }, - capacity=0.5, - capacity_is_instantaneous=False) - + node_key_a=imp_node_key, + node_key_b=node_A, + efficiency={(q, 0): 0.9}, + static_loss={(q, 0, 0): 0.1}, + capacity=0.5, + capacity_is_instantaneous=False, + ) + # IA2 mynet.add_preexisting_directed_arc( - node_key_a=imp_node_key, - node_key_b=node_A, - efficiency=None, + node_key_a=imp_node_key, + node_key_b=node_A, + efficiency=None, static_loss=None, - capacity=1.2, - capacity_is_instantaneous=False) - + capacity=1.2, + capacity_is_instantaneous=False, + ) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -7436,73 +6677,80 @@ def example_directed_arc_static_downstream_pre( sense_use_real_variables_if_possible=False, sense_use_arc_interfaces=False, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=False, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=True, mandatory_arcs=[], max_number_parallel_arcs={}, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # if print_model: - + # all arcs should be installed (they are not new) - - assert True in ipp.networks['mynet'].edges[ - (imp_node_key, node_A, 0)][Network.KEY_ARC_TECH].options_selected - - assert True in ipp.networks['mynet'].edges[ - (imp_node_key, node_A, 1)][Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp_node_key, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp_node_key, node_A, 1)][Network.KEY_ARC_TECH] + .options_selected + ) + # overview - - (flow_in, - flow_in_k, - flow_out, - flow_in_cost, - flow_out_revenue) = compute_cost_volume_metrics(ipp.instance, True) - + + ( + flow_in, + flow_in_k, + flow_out, + flow_in_cost, + flow_out_revenue, + ) = compute_cost_volume_metrics(ipp.instance, True) + # there should be imports - + abs_tol = 1e-6 - - assert math.isclose(flow_in[('mynet',0,0)], (1.0+0.1), abs_tol=abs_tol) - - # there should be no exports - + + assert math.isclose(flow_in[("mynet", 0, 0)], (1.0 + 0.1), abs_tol=abs_tol) + + # there should be no exports + abs_tol = 1e-6 - - assert math.isclose(flow_out[('mynet',0,0)], 0, abs_tol=abs_tol) - + + assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol) + # flow through IA1 must be 0.1 - + abs_tol = 1e-6 - - assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[('mynet',imp_node_key,node_A,0,0,0)] - ), + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp_node_key, node_A, 0, 0, 0)]), 0.1, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow through IA2 must be 1.0 - - assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[('mynet',imp_node_key,node_A,1,0,0)] - ), + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp_node_key, node_A, 1, 0, 0)]), 1.0, - abs_tol=abs_tol - ) - -#****************************************************************************** -#****************************************************************************** + abs_tol=abs_tol, + ) + + +# ****************************************************************************** +# ****************************************************************************** # test with new arcs: # two steps must be used, one to force the investments, another to demonstrate @@ -7512,107 +6760,83 @@ def example_directed_arc_static_downstream_pre( # the arc with losses should not be used during one of the time steps # during the other, the conditions must be such that the lossy arc is necessary -def example_directed_arc_static_downstream_new( - solver, - solver_options, - init_aux_sets): - + +def example_directed_arc_static_downstream_new(solver, solver_options, init_aux_sets): q = 0 - + # time - + number_intervals = 2 - + number_periods = 2 # 4 nodes: one import, one export, two supply/demand nodes - + mynet = Network() - + # import node - + # imp_prices = ResourcePrice( - # prices=[1+random.random() for i in range(number_intervals)], + # prices=[1+random.random() for i in range(number_intervals)], # volumes=None) - + imp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp_node_key, + node_key=imp_node_key, prices={ - (q,p,k): ResourcePrice( - prices=0+random.random(), - volumes=None - ) + (q, p, k): ResourcePrice(prices=0 + random.random(), volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - - mynet.add_source_sink_node( - node_key=node_A, - base_flow={ - (q,0):1.0, - (q,1):1.3 - } - ) - + + mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0, (q, 1): 1.3}) + # add arcs - + # IA1 - + arcs_ia1 = Arcs( - name='IA1', - efficiency={ - (q,0): 0.9, - (q,1): 0.9 - }, - efficiency_reverse=None, - static_loss={ - (0,q,0): 0.0, - (0,q,1): 0.1 - }, - capacity=tuple([0.5/0.9]), + name="IA1", + efficiency={(q, 0): 0.9, (q, 1): 0.9}, + efficiency_reverse=None, + static_loss={(0, q, 0): 0.0, (0, q, 1): 0.1}, + capacity=tuple([0.5 / 0.9]), minimum_cost=tuple([0.1]), - specific_capacity_cost=0, - capacity_is_instantaneous=False, - validate=True) - - mynet.add_directed_arc( - node_key_a=imp_node_key, - node_key_b=node_A, - arcs=arcs_ia1 - ) - + specific_capacity_cost=0, + capacity_is_instantaneous=False, + validate=True, + ) + + mynet.add_directed_arc(node_key_a=imp_node_key, node_key_b=node_A, arcs=arcs_ia1) + # IA2 - + arcs_ia2 = Arcs( - name='IA2', + name="IA2", efficiency=None, - efficiency_reverse=None, - static_loss=None, + efficiency_reverse=None, + static_loss=None, capacity=tuple([1.2]), - minimum_cost=tuple([0.1]), - specific_capacity_cost=0, - capacity_is_instantaneous=False, - validate=True) - - mynet.add_directed_arc( - node_key_a=imp_node_key, - node_key_b=node_A, - arcs=arcs_ia2 - ) - + minimum_cost=tuple([0.1]), + specific_capacity_cost=0, + capacity_is_instantaneous=False, + validate=True, + ) + + mynet.add_directed_arc(node_key_a=imp_node_key, node_key_b=node_A, arcs=arcs_ia2) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -7624,257 +6848,239 @@ def example_directed_arc_static_downstream_new( sense_use_real_variables_if_possible=False, sense_use_arc_interfaces=False, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=False, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=True, mandatory_arcs=[], max_number_parallel_arcs={}, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # if print_model: - + # all arcs should be installed (they are not new) - - assert True in ipp.networks['mynet'].edges[ - (imp_node_key, node_A, 0)][Network.KEY_ARC_TECH].options_selected - - assert True in ipp.networks['mynet'].edges[ - (imp_node_key, node_A, 1)][Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp_node_key, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp_node_key, node_A, 1)][Network.KEY_ARC_TECH] + .options_selected + ) + # overview - - (flow_in, - flow_in_k, - flow_out, - flow_in_cost, - flow_out_revenue) = compute_cost_volume_metrics(ipp.instance, True) - + + ( + flow_in, + flow_in_k, + flow_out, + flow_in_cost, + flow_out_revenue, + ) = compute_cost_volume_metrics(ipp.instance, True) + # there should be imports - + abs_tol = 1e-6 - + assert math.isclose( - flow_in[('mynet',0,0)], - (1.2+0.1/0.9+1.0+0.1), - abs_tol=abs_tol - ) - - # there should be no exports - + flow_in[("mynet", 0, 0)], (1.2 + 0.1 / 0.9 + 1.0 + 0.1), abs_tol=abs_tol + ) + + # there should be no exports + abs_tol = 1e-6 - - assert math.isclose(flow_out[('mynet',0,0)], 0, abs_tol=abs_tol) - + + assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol) + # interval 0: flow through IA1 must be 0 # interval 1: flow through IA1 must be 0.1+0.1/0.9 - + abs_tol = 1e-6 - - assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[('mynet',imp_node_key,node_A,0,0,0)] - ), + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp_node_key, node_A, 0, 0, 0)]), 0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[('mynet',imp_node_key,node_A,0,0,1)] - ), - 0.1+0.1/0.9, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp_node_key, node_A, 0, 0, 1)]), + 0.1 + 0.1 / 0.9, + abs_tol=abs_tol, + ) + # interval 0: flow through IA2 must be 1.0 # interval 1: flow through IA2 must be 1.2 - - assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[('mynet',imp_node_key,node_A,1,0,0)] - ), + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp_node_key, node_A, 1, 0, 0)]), 1.0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[('mynet',imp_node_key,node_A,1,0,1)] - ), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp_node_key, node_A, 1, 0, 1)]), 1.2, - abs_tol=abs_tol - ) - -#****************************************************************************** -#****************************************************************************** + abs_tol=abs_tol, + ) + + +# ****************************************************************************** +# ****************************************************************************** # test the capacity reduction when losses are upstream + def example_directed_arc_static_upstream( - solver, - solver_options, - use_new_arcs, - init_aux_sets): - + solver, solver_options, use_new_arcs, init_aux_sets +): q = 0 - + # time - + number_intervals = 1 - + number_periods = 2 # 4 nodes: two import nodes, two supply/demand nodes - + mynet = Network() - + # import nodes - + # imp1_prices = ResourcePrice( - # prices=[1 for i in range(number_intervals)], + # prices=[1 for i in range(number_intervals)], # volumes=None) - + imp1_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp1_node_key, + node_key=imp1_node_key, prices={ - (q,p,k): ResourcePrice( - prices=1, - volumes=None - ) + (q, p, k): ResourcePrice(prices=1, volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # imp2_prices = ResourcePrice( - # prices=[2 for i in range(number_intervals)], + # prices=[2 for i in range(number_intervals)], # volumes=None) - + imp2_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp2_node_key, + node_key=imp2_node_key, prices={ - (q,p,k): ResourcePrice( - prices=2, - volumes=None - ) + (q, p, k): ResourcePrice(prices=2, volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - - mynet.add_waypoint_node( - node_key=node_A - ) - + + mynet.add_waypoint_node(node_key=node_A) + node_B = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_B, + node_key=node_B, base_flow={ - (q,0):1.0, - } - ) - + (q, 0): 1.0, + }, + ) + # add arcs - + # I1A - + mynet.add_preexisting_directed_arc( - node_key_a=imp1_node_key, + node_key_a=imp1_node_key, node_key_b=node_A, - efficiency=None, - static_loss=None, - capacity=1, - capacity_is_instantaneous=False) - - if use_new_arcs: - + efficiency=None, + static_loss=None, + capacity=1, + capacity_is_instantaneous=False, + ) + + if use_new_arcs: # I2B - + arcs_i2b = Arcs( - name='I2B', + name="I2B", efficiency=None, - efficiency_reverse=None, - static_loss=None, + efficiency_reverse=None, + static_loss=None, capacity=(0.1,), minimum_cost=(0.025,), - specific_capacity_cost=0, - capacity_is_instantaneous=False, - validate=True) - + specific_capacity_cost=0, + capacity_is_instantaneous=False, + validate=True, + ) + mynet.add_directed_arc( - node_key_a=imp2_node_key, - node_key_b=node_B, - arcs=arcs_i2b - ) - + node_key_a=imp2_node_key, node_key_b=node_B, arcs=arcs_i2b + ) + # AB - + arcs_ab = Arcs( - name='IA1', + name="IA1", efficiency=None, - efficiency_reverse=None, - static_loss={ - (0,q,0): 0.1 - }, + efficiency_reverse=None, + static_loss={(0, q, 0): 0.1}, capacity=(1,), minimum_cost=(0.05,), - specific_capacity_cost=0, - capacity_is_instantaneous=False, - validate=True) - - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arcs_ab - ) - + specific_capacity_cost=0, + capacity_is_instantaneous=False, + validate=True, + ) + + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_B, arcs=arcs_ab) + else: - # I2B - + mynet.add_preexisting_directed_arc( - node_key_a=imp2_node_key, + node_key_a=imp2_node_key, node_key_b=node_B, - efficiency=None, - static_loss=None, - capacity=0.1, - capacity_is_instantaneous=False - ) - + efficiency=None, + static_loss=None, + capacity=0.1, + capacity_is_instantaneous=False, + ) + # AB - + mynet.add_preexisting_directed_arc( - node_key_a=node_A, - node_key_b=node_B, - efficiency=None, - static_loss={ - (0,q,0): 0.1 - }, - capacity=1, - capacity_is_instantaneous=False - ) - + node_key_a=node_A, + node_key_b=node_B, + efficiency=None, + static_loss={(0, q, 0): 0.1}, + capacity=1, + capacity_is_instantaneous=False, + ) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -7886,251 +7092,224 @@ def example_directed_arc_static_upstream( sense_use_real_variables_if_possible=False, sense_use_arc_interfaces=False, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=False, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP, mandatory_arcs=[], max_number_parallel_arcs={}, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # if print_model: - + # all arcs should be installed (they are not new) - - assert True in ipp.networks['mynet'].edges[ - (imp1_node_key, node_A, 0)][Network.KEY_ARC_TECH].options_selected - - assert True in ipp.networks['mynet'].edges[ - (imp2_node_key, node_B, 0)][Network.KEY_ARC_TECH].options_selected - - assert True in ipp.networks['mynet'].edges[ - (node_A, node_B, 0)][Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp1_node_key, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp2_node_key, node_B, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # overview - - (flow_in, - flow_in_k, - flow_out, - flow_in_cost, - flow_out_revenue) = compute_cost_volume_metrics(ipp.instance, True) - + + ( + flow_in, + flow_in_k, + flow_out, + flow_in_cost, + flow_out_revenue, + ) = compute_cost_volume_metrics(ipp.instance, True) + # there should be imports - + abs_tol = 1e-6 - - assert math.isclose(flow_in[('mynet',0,0)], 1.1, abs_tol=abs_tol) - - # there should be no exports - + + assert math.isclose(flow_in[("mynet", 0, 0)], 1.1, abs_tol=abs_tol) + + # there should be no exports + abs_tol = 1e-6 - - assert math.isclose(flow_out[('mynet',0,0)], 0, abs_tol=abs_tol) - + + assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol) + # interval 0: flow through IA1 must be 1 - + abs_tol = 1e-6 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',imp1_node_key,node_A,0,0,0)]), + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp1_node_key, node_A, 0, 0, 0)]), 1, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # interval 0: flow through AB must be 0.9 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[('mynet',node_A,node_B,0,0,0)]), - 0.9, - abs_tol=abs_tol - ) - + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_B, 0, 0, 0)]), + 0.9, + abs_tol=abs_tol, + ) + # interval 0: flow through IB2 must be 0.1 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',imp2_node_key,node_B,0,0,0)]), + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp2_node_key, node_B, 0, 0, 0)]), 0.1, - abs_tol=abs_tol - ) - -#****************************************************************************** -#****************************************************************************** + abs_tol=abs_tol, + ) + + +# ****************************************************************************** +# ****************************************************************************** + def example_report_undirected_network_static_losses( - solver, - solver_options, - static_losses_mode, - use_new_arcs, - invert_original_direction, - init_aux_sets): - + solver, + solver_options, + static_losses_mode, + use_new_arcs, + invert_original_direction, + init_aux_sets, +): q = 0 - + # time - - number_intervals = 2 - + + number_intervals = 2 + number_periods = 2 - + # 3 nodes: one import, two regular nodes - + mynet = Network() - + # import node - + # imp_prices = ResourcePrice( - # prices=[1+random.random() for i in range(number_intervals)], + # prices=[1+random.random() for i in range(number_intervals)], # volumes=None) - + imp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp_node_key, + node_key=imp_node_key, prices={ - (q,p,k): ResourcePrice( - prices=1+random.random(), - volumes=None - ) + (q, p, k): ResourcePrice(prices=1 + random.random(), volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - - mynet.add_source_sink_node( - node_key=node_A, - base_flow={ - (q,0):0.0, - (q,1):0.4 - } - ) - + + mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 0.0, (q, 1): 0.4}) + node_B = generate_pseudo_unique_key(mynet.nodes()) - - mynet.add_source_sink_node( - node_key=node_B, - base_flow={ - (q,0):0.2, - (q,1):-0.6 - } - ) - + + mynet.add_source_sink_node(node_key=node_B, base_flow={(q, 0): 0.2, (q, 1): -0.6}) + # add arcs - + # IA arc - + mynet.add_infinite_capacity_arc( - node_key_a=imp_node_key, - node_key_b=node_A, - efficiency=None, - static_loss=None) - - AB_efficiency = { - (q,0):0.8, - (q,1):0.8 - } - - BA_efficiency = { - (q,0):0.5, - (q,1):0.5 - } - + node_key_a=imp_node_key, node_key_b=node_A, efficiency=None, static_loss=None + ) + + AB_efficiency = {(q, 0): 0.8, (q, 1): 0.8} + + BA_efficiency = {(q, 0): 0.5, (q, 1): 0.5} + if use_new_arcs: - # AB arc - - if invert_original_direction: + if invert_original_direction: arc_tech_AB = Arcs( - name='AB', + name="AB", efficiency=BA_efficiency, efficiency_reverse=AB_efficiency, validate=False, - capacity=[1.0], - minimum_cost=[0.01], - specific_capacity_cost=0, - capacity_is_instantaneous=False, - static_loss={ - (0,q,0):0.10, - (0,q,1):0.10 - }, - ) - + capacity=[1.0], + minimum_cost=[0.01], + specific_capacity_cost=0, + capacity_is_instantaneous=False, + static_loss={(0, q, 0): 0.10, (0, q, 1): 0.10}, + ) + arc_key_AB_und = mynet.add_undirected_arc( - node_key_a=node_B, - node_key_b=node_A, - arcs=arc_tech_AB) - + node_key_a=node_B, node_key_b=node_A, arcs=arc_tech_AB + ) + else: - arc_tech_AB = Arcs( - name='AB', + name="AB", efficiency=AB_efficiency, efficiency_reverse=BA_efficiency, validate=False, - capacity=[1.0], - minimum_cost=[0.01], - specific_capacity_cost=0, - capacity_is_instantaneous=False, - static_loss={ - (0,q,0):0.10, - (0,q,1):0.10 - }, - ) - + capacity=[1.0], + minimum_cost=[0.01], + specific_capacity_cost=0, + capacity_is_instantaneous=False, + static_loss={(0, q, 0): 0.10, (0, q, 1): 0.10}, + ) + arc_key_AB_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arc_tech_AB) - + node_key_a=node_A, node_key_b=node_B, arcs=arc_tech_AB + ) + else: - # pre-existing arcs if invert_original_direction: - arc_key_AB_und = mynet.add_preexisting_undirected_arc( - node_key_a=node_B, - node_key_b=node_A, - efficiency=BA_efficiency, - efficiency_reverse=AB_efficiency, - static_loss={ - (0,q,0):0.10, - (0,q,1):0.10 - }, - capacity=1.0, - capacity_is_instantaneous=False) - + node_key_a=node_B, + node_key_b=node_A, + efficiency=BA_efficiency, + efficiency_reverse=AB_efficiency, + static_loss={(0, q, 0): 0.10, (0, q, 1): 0.10}, + capacity=1.0, + capacity_is_instantaneous=False, + ) + else: - arc_key_AB_und = mynet.add_preexisting_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - efficiency=AB_efficiency, - efficiency_reverse=BA_efficiency, - static_loss={ - (0,q,0):0.10, - (0,q,1):0.10 - }, - capacity=1.0, - capacity_is_instantaneous=False) - + node_key_a=node_A, + node_key_b=node_B, + efficiency=AB_efficiency, + efficiency_reverse=BA_efficiency, + static_loss={(0, q, 0): 0.10, (0, q, 1): 0.10}, + capacity=1.0, + capacity_is_instantaneous=False, + ) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -8142,447 +7321,465 @@ def example_report_undirected_network_static_losses( sense_use_real_variables_if_possible=False, sense_use_arc_interfaces=False, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=False, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=static_losses_mode, mandatory_arcs=[], max_number_parallel_arcs={}, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # if print_model: - + # all arcs should be installed (they are not new) - - assert True in ipp.networks['mynet'].edges[ - (imp_node_key, node_A, 0)][Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp_node_key, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + if invert_original_direction: - - assert True in ipp.networks['mynet'].edges[ - (node_B, node_A, arc_key_AB_und)][ - Network.KEY_ARC_TECH].options_selected - + assert ( + True + in ipp.networks["mynet"] + .edges[(node_B, node_A, arc_key_AB_und)][Network.KEY_ARC_TECH] + .options_selected + ) + else: - - assert True in ipp.networks['mynet'].edges[ - (node_A, node_B, arc_key_AB_und)][ - Network.KEY_ARC_TECH].options_selected - + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, arc_key_AB_und)][Network.KEY_ARC_TECH] + .options_selected + ) + # overview - - (flow_in, - flow_in_k, - flow_out, - flow_in_cost, - flow_out_revenue) = compute_cost_volume_metrics(ipp.instance, True) - + + ( + flow_in, + flow_in_k, + flow_out, + flow_in_cost, + flow_out_revenue, + ) = compute_cost_volume_metrics(ipp.instance, True) + # the flow through AB should be from A to B during interval 0 - + abs_tol = 1e-6 - - assert math.isclose( - pyo.value(ipp.instance.var_zeta_sns_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,q,0) - ]), + + assert math.isclose( + pyo.value( + ipp.instance.var_zeta_sns_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, q, 0) + ] + ), 1, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_zeta_sns_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,q,0) - ]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_zeta_sns_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, q, 0) + ] + ), 0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # the flow through AB should be from B to A during interval 1 - - assert math.isclose( - pyo.value(ipp.instance.var_zeta_sns_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,q,1) - ]), + + assert math.isclose( + pyo.value( + ipp.instance.var_zeta_sns_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, q, 1) + ] + ), 0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_zeta_sns_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,q,1) - ]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_zeta_sns_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, q, 1) + ] + ), 1, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # there should be imports - + abs_tol = 1e-6 - - assert math.isclose(flow_in[('mynet',0,0)], (0.35+0.15), abs_tol=abs_tol) - - # there should be no exports - + + assert math.isclose(flow_in[("mynet", 0, 0)], (0.35 + 0.15), abs_tol=abs_tol) + + # there should be no exports + abs_tol = 1e-6 - - assert math.isclose(flow_out[('mynet',0,0)], 0, abs_tol=abs_tol) - + + assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol) + # flow through IA must be 0.35 during time interval 0 # flow through IA must be 0.15 during time interval 1 - + abs_tol = 1e-6 - - assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[('mynet',imp_node_key,node_A,0,q,0)]), + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp_node_key, node_A, 0, q, 0)]), 0.35, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + abs_tol = 1e-6 - - assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[('mynet',imp_node_key,node_A,0,q,1)]), + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp_node_key, node_A, 0, q, 1)]), 0.15, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from B to A must be 0 durng time interval 0 # flow from A to B must be 0 during time interval 1 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,0)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[("mynet", node_B, node_A, arc_key_AB_und, 0, 0)] + ), 0.0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,1)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[("mynet", node_A, node_B, arc_key_AB_und, 0, 1)] + ), 0.0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # validation - - if (static_losses_mode == - InfrastructurePlanningProblem.STATIC_LOSS_MODE_ARR): - + + if static_losses_mode == InfrastructurePlanningProblem.STATIC_LOSS_MODE_ARR: # arrival node - + if invert_original_direction: - # flow from A to B must be 0.25 during time interval 0 - - assert math.isclose( + + assert math.isclose( pyo.value( ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,q,0)]), + ("mynet", node_A, node_B, arc_key_AB_und, q, 0) + ] + ), 0.25, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from B to A must be 0.6 during time interval 1 - - assert math.isclose( + + assert math.isclose( pyo.value( ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,q,1)]), + ("mynet", node_B, node_A, arc_key_AB_und, q, 1) + ] + ), 0.6, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + else: - # flow from A to B must be 0.35 during time interval 0 - - assert math.isclose( + + assert math.isclose( pyo.value( ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,q,0)]), + ("mynet", node_A, node_B, arc_key_AB_und, q, 0) + ] + ), 0.35, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from B to A must be 0.6 during time interval 1 - - assert math.isclose( + + assert math.isclose( pyo.value( ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,q,1)]), + ("mynet", node_B, node_A, arc_key_AB_und, q, 1) + ] + ), 0.5, - abs_tol=abs_tol - ) - - elif (static_losses_mode == - InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP): - + abs_tol=abs_tol, + ) + + elif static_losses_mode == InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP: # departure node - + if invert_original_direction: - # arrival node - + # flow from A to B must be 0.35 during time interval 0 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,q,0)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, q, 0) + ] + ), 0.35, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from B to A must be 0.6 during time interval 1 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,q,1)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, q, 1) + ] + ), 0.5, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + else: - # flow from A to B must be 0.25 during time interval 0 - - assert math.isclose( + + assert math.isclose( pyo.value( ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,q,0)]), + ("mynet", node_A, node_B, arc_key_AB_und, q, 0) + ] + ), 0.25, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from B to A must be 0.6 during time interval 1 - - assert math.isclose( + + assert math.isclose( pyo.value( ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,q,1)]), + ("mynet", node_B, node_A, arc_key_AB_und, q, 1) + ] + ), 0.6, - abs_tol=abs_tol - ) - - elif (static_losses_mode == - InfrastructurePlanningProblem.STATIC_LOSS_MODE_US): - + abs_tol=abs_tol, + ) + + elif static_losses_mode == InfrastructurePlanningProblem.STATIC_LOSS_MODE_US: # upstream - + # flow from A to B must be 0.25 during time interval 0 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,q,0)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, q, 0) + ] + ), 0.25, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from B to A must be 0.6 during time interval 1 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,q,1)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, q, 1) + ] + ), 0.5, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + else: - # downstream - + # flow from A to B must be 0.35 during time interval 0 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,q,0)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, q, 0) + ] + ), 0.35, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from B to A must be 0.6 during time interval 1 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,q,1)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, q, 1) + ] + ), 0.6, - abs_tol=abs_tol - ) - -#****************************************************************************** -#****************************************************************************** + abs_tol=abs_tol, + ) + + +# ****************************************************************************** +# ****************************************************************************** # test the capacity reduction when losses are upstream w/ undirected arcs + def example_undirected_arc_static_upstream( - solver, - solver_options, - use_new_arcs, - static_losses_mode, - init_aux_sets): - + solver, solver_options, use_new_arcs, static_losses_mode, init_aux_sets +): q = 0 - + # time - + number_intervals = 2 - + number_periods = 2 # 4 nodes: two import nodes, two supply/demand nodes - + mynet = Network() - + # import nodes - + # imp1_prices = ResourcePrice( - # prices=[1, 2], + # prices=[1, 2], # volumes=None) - + imp1_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp1_node_key, + node_key=imp1_node_key, prices={ - (q,p,k): ResourcePrice( - prices=k+1, - volumes=None - ) + (q, p, k): ResourcePrice(prices=k + 1, volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # imp2_prices = ResourcePrice( - # prices=[2, 1], + # prices=[2, 1], # volumes=None) - + imp2_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp2_node_key, + node_key=imp2_node_key, prices={ - (q,p,k): ResourcePrice( - prices=2-k, - volumes=None - ) + (q, p, k): ResourcePrice(prices=2 - k, volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_A, - base_flow={ - (0,0):0.0, - (0,1):1.1 # 1.0 - } - ) - + node_key=node_A, base_flow={(0, 0): 0.0, (0, 1): 1.1} # 1.0 + ) + node_B = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_B, - base_flow={ - (0,0):1.1, # 1.0 - (0,1):0.0 - } - ) - + node_key=node_B, base_flow={(0, 0): 1.1, (0, 1): 0.0} # 1.0 + ) + # add arcs - + # I1A - + mynet.add_preexisting_directed_arc( - node_key_a=imp1_node_key, + node_key_a=imp1_node_key, node_key_b=node_A, - efficiency=None, - static_loss=None, - capacity=1.2, - capacity_is_instantaneous=False) - + efficiency=None, + static_loss=None, + capacity=1.2, + capacity_is_instantaneous=False, + ) + # I2B - + mynet.add_preexisting_directed_arc( - node_key_a=imp2_node_key, + node_key_a=imp2_node_key, node_key_b=node_B, - efficiency=None, - static_loss=None, - capacity=1.2, - capacity_is_instantaneous=False) - - efficiency_AB = { - (0,0): 1, - (0,1): 1 - } - - efficiency_BA = { - (0,0): 1, - (0,1): 1 - } - - if use_new_arcs: - + efficiency=None, + static_loss=None, + capacity=1.2, + capacity_is_instantaneous=False, + ) + + efficiency_AB = {(0, 0): 1, (0, 1): 1} + + efficiency_BA = {(0, 0): 1, (0, 1): 1} + + if use_new_arcs: # AB - + static_loss_AB = { - (0,q,0): 0.1, - (0,q,1): 0.1, - (1,q,0): 0.1, - (1,q,1): 0.1 - } - + (0, q, 0): 0.1, + (0, q, 1): 0.1, + (1, q, 0): 0.1, + (1, q, 1): 0.1, + } + arcs_ab = Arcs( - name='AB', + name="AB", efficiency=efficiency_AB, - efficiency_reverse=efficiency_BA, - static_loss=static_loss_AB, - capacity=(0.5, 1,), - minimum_cost=(0.025, 0.05,), - specific_capacity_cost=0, - capacity_is_instantaneous=False, - validate=True) - + efficiency_reverse=efficiency_BA, + static_loss=static_loss_AB, + capacity=( + 0.5, + 1, + ), + minimum_cost=( + 0.025, + 0.05, + ), + specific_capacity_cost=0, + capacity_is_instantaneous=False, + validate=True, + ) + arc_key_AB_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arcs_ab - ) - + node_key_a=node_A, node_key_b=node_B, arcs=arcs_ab + ) + else: - # AB - - static_loss_AB = { - (0,q,0): 0.1, - (0,q,1): 0.1 - } - + + static_loss_AB = {(0, q, 0): 0.1, (0, q, 1): 0.1} + arc_key_AB_und = mynet.add_preexisting_undirected_arc( node_key_a=node_A, node_key_b=node_B, - efficiency=efficiency_BA, - efficiency_reverse=efficiency_BA, - static_loss=static_loss_AB, - capacity=1, - capacity_is_instantaneous=False) - + efficiency=efficiency_BA, + efficiency_reverse=efficiency_BA, + static_loss=static_loss_AB, + capacity=1, + capacity_is_instantaneous=False, + ) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -8594,426 +7791,435 @@ def example_undirected_arc_static_upstream( sense_use_real_variables_if_possible=False, sense_use_arc_interfaces=False, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=False, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=static_losses_mode, mandatory_arcs=[], max_number_parallel_arcs={}, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # if print_model: - + # all arcs should be installed (they are not new) - - assert True in ipp.networks['mynet'].edges[ - (imp1_node_key, node_A, 0)][Network.KEY_ARC_TECH].options_selected - - assert True in ipp.networks['mynet'].edges[ - (imp2_node_key, node_B, 0)][Network.KEY_ARC_TECH].options_selected - - assert True in ipp.networks['mynet'].edges[ - (node_A, node_B, arc_key_AB_und)][Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp1_node_key, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp2_node_key, node_B, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, arc_key_AB_und)][Network.KEY_ARC_TECH] + .options_selected + ) + # overview - - (flow_in, - flow_in_k, - flow_out, - flow_in_cost, - flow_out_revenue) = compute_cost_volume_metrics(ipp.instance, True) - + + ( + flow_in, + flow_in_k, + flow_out, + flow_in_cost, + flow_out_revenue, + ) = compute_cost_volume_metrics(ipp.instance, True) + # the flow through AB should be from A to B during interval 0 - + abs_tol = 1e-6 - - assert math.isclose( - pyo.value(ipp.instance.var_zeta_sns_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,q,0) - ]), + + assert math.isclose( + pyo.value( + ipp.instance.var_zeta_sns_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, q, 0) + ] + ), 1, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_zeta_sns_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,q,0) - ]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_zeta_sns_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, q, 0) + ] + ), 0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # the flow through AB should be from B to A during interval 1 - - assert math.isclose( - pyo.value(ipp.instance.var_zeta_sns_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,q,1) - ]), + + assert math.isclose( + pyo.value( + ipp.instance.var_zeta_sns_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, q, 1) + ] + ), 0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_zeta_sns_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,q,1) - ]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_zeta_sns_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, q, 1) + ] + ), 1, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # there should be imports - + abs_tol = 1e-6 - - assert math.isclose(flow_in[('mynet',0,0)], (1.2+1.2), abs_tol=abs_tol) - - # there should be no exports - + + assert math.isclose(flow_in[("mynet", 0, 0)], (1.2 + 1.2), abs_tol=abs_tol) + + # there should be no exports + abs_tol = 1e-6 - - assert math.isclose(flow_out[('mynet',0,0)], 0, abs_tol=abs_tol) - + + assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol) + # flow through I1A must be 1.0 during time interval 0 # flow through I1A must be 0.2 during time interval 1 - + abs_tol = 1e-6 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',imp1_node_key,node_A,0,0,0)]), + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp1_node_key, node_A, 0, 0, 0)]), 1.0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + abs_tol = 1e-6 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',imp1_node_key,node_A,0,0,1)]), + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp1_node_key, node_A, 0, 0, 1)]), 0.2, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow through I2B must be 0.2 during time interval 0 # flow through I2B must be 1.0 during time interval 1 - + abs_tol = 1e-6 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',imp2_node_key,node_B,0,0,0) - ]), + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp2_node_key, node_B, 0, 0, 0)]), 0.2, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + abs_tol = 1e-6 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',imp2_node_key,node_B,0,0,1) - ]), + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp2_node_key, node_B, 0, 0, 1)]), 1.0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from B to A must be 0 during time interval 0 # flow from A to B must be 0 during time interval 1 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,0)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[("mynet", node_B, node_A, arc_key_AB_und, 0, 0)] + ), 0.0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,1)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[("mynet", node_A, node_B, arc_key_AB_und, 0, 1)] + ), 0.0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # validation - - if (static_losses_mode == - InfrastructurePlanningProblem.STATIC_LOSS_MODE_ARR): - + + if static_losses_mode == InfrastructurePlanningProblem.STATIC_LOSS_MODE_ARR: # arrival node - + # flow from A to B must be 1.0 during time interval 0 - - assert math.isclose( + + assert math.isclose( pyo.value( ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,0)]), + ("mynet", node_A, node_B, arc_key_AB_und, 0, 0) + ] + ), 1.0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from B to A must be 0.9 during time interval 1 - - assert math.isclose( + + assert math.isclose( pyo.value( ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,1)]), + ("mynet", node_B, node_A, arc_key_AB_und, 0, 1) + ] + ), 0.9, - abs_tol=abs_tol - ) - - elif (static_losses_mode == - InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP): - + abs_tol=abs_tol, + ) + + elif static_losses_mode == InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP: # departure node - + # flow from A to B must be 0.9 during time interval 0 - - assert math.isclose( + + assert math.isclose( pyo.value( ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,0)]), + ("mynet", node_A, node_B, arc_key_AB_und, 0, 0) + ] + ), 0.9, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from B to A must be 1.0 during time interval 1 - - assert math.isclose( + + assert math.isclose( pyo.value( ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,1)]), + ("mynet", node_B, node_A, arc_key_AB_und, 0, 1) + ] + ), 1.0, - abs_tol=abs_tol - ) - - elif (static_losses_mode == - InfrastructurePlanningProblem.STATIC_LOSS_MODE_US): - + abs_tol=abs_tol, + ) + + elif static_losses_mode == InfrastructurePlanningProblem.STATIC_LOSS_MODE_US: # upstream - + # flow from A to B must be 0.9 during time interval 0 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,0)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, 0, 0) + ] + ), 0.9, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from B to A must be 0.9 during time interval 1 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,1)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, 0, 1) + ] + ), 0.9, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + else: - # downstream - + # flow from A to B must be 1.0 during time interval 0 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,0)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, 0, 0) + ] + ), 1.0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from B to A must be 1.0 during time interval 1 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,1)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, 0, 1) + ] + ), 1.0, - abs_tol=abs_tol - ) - -#****************************************************************************** -#****************************************************************************** + abs_tol=abs_tol, + ) + + +# ****************************************************************************** +# ****************************************************************************** # test the capacity reduction when losses are upstream w/ undirected arcs + def example_undirected_arc_static_downstream( - solver, - solver_options, - use_new_arcs, - static_losses_mode, - init_aux_sets): - + solver, solver_options, use_new_arcs, static_losses_mode, init_aux_sets +): q = 0 - + # time - + number_intervals = 4 - + number_periods = 2 # 4 nodes: two import nodes, two supply/demand nodes - + mynet = Network() - + # import nodes - - imp1_prices = [ - ResourcePrice( - prices=k, - volumes=None) - for k in [1,2,1,1] - ] - + + imp1_prices = [ResourcePrice(prices=k, volumes=None) for k in [1, 2, 1, 1]] + imp1_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp1_node_key, + node_key=imp1_node_key, prices={ - (q,p,k): imp1_prices[k] + (q, p, k): imp1_prices[k] for p in range(number_periods) for k in range(number_intervals) - } - ) - - imp2_prices = [ - ResourcePrice( - prices=k, - volumes=None) - for k in [2,1,2,2] - ] - + }, + ) + + imp2_prices = [ResourcePrice(prices=k, volumes=None) for k in [2, 1, 2, 2]] + imp2_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp2_node_key, + node_key=imp2_node_key, prices={ - (q,p,k): imp2_prices[k] + (q, p, k): imp2_prices[k] for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_A, + node_key=node_A, base_flow={ - (0,0):1.0, # to be provided via I1 but AB losses have to be comp. - (0,1):0.0, - (0,2):0.0, - (0,3):0.0 - } - ) - + (0, 0): 1.0, # to be provided via I1 but AB losses have to be comp. + (0, 1): 0.0, + (0, 2): 0.0, + (0, 3): 0.0, + }, + ) + node_B = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_B, + node_key=node_B, base_flow={ - (0,0):0.0, - (0,1):1.0, # to be provided via I2 but AB losses have to be comp. - (0,2):2.0, # forces the undirected arc to be used and installed - (0,3):0.9 # forces the undirected arc to be used and installed - } - ) - + (0, 0): 0.0, + (0, 1): 1.0, # to be provided via I2 but AB losses have to be comp. + (0, 2): 2.0, # forces the undirected arc to be used and installed + (0, 3): 0.9, # forces the undirected arc to be used and installed + }, + ) + # add arcs - + # I1A - + mynet.add_preexisting_directed_arc( - node_key_a=imp1_node_key, + node_key_a=imp1_node_key, node_key_b=node_A, - efficiency=None, - static_loss=None, - capacity=1.1, - capacity_is_instantaneous=False) - + efficiency=None, + static_loss=None, + capacity=1.1, + capacity_is_instantaneous=False, + ) + # I2B - + mynet.add_preexisting_directed_arc( - node_key_a=imp2_node_key, + node_key_a=imp2_node_key, node_key_b=node_B, - efficiency=None, - static_loss=None, - capacity=1.1, - capacity_is_instantaneous=False) - - efficiency_AB = { - (0,0): 1, - (0,1): 1, - (0,2): 1, - (0,3): 1 - } - - efficiency_BA = { - (0,0): 1, - (0,1): 1, - (0,2): 1, - (0,3): 1 - } - - if use_new_arcs: - + efficiency=None, + static_loss=None, + capacity=1.1, + capacity_is_instantaneous=False, + ) + + efficiency_AB = {(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1} + + efficiency_BA = {(0, 0): 1, (0, 1): 1, (0, 2): 1, (0, 3): 1} + + if use_new_arcs: # AB - + static_loss_AB = { - (0,q,0): 0.1, - (0,q,1): 0.1, - (0,q,2): 0.1, - (0,q,3): 0.1 - } - + (0, q, 0): 0.1, + (0, q, 1): 0.1, + (0, q, 2): 0.1, + (0, q, 3): 0.1, + } + arcs_ab = Arcs( - name='AB', + name="AB", efficiency=efficiency_AB, - efficiency_reverse=efficiency_BA, - static_loss=static_loss_AB, + efficiency_reverse=efficiency_BA, + static_loss=static_loss_AB, capacity=(1,), minimum_cost=(0.05,), - specific_capacity_cost=0, - capacity_is_instantaneous=False, - validate=True) - + specific_capacity_cost=0, + capacity_is_instantaneous=False, + validate=True, + ) + arc_key_AB_und = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arcs_ab - ) - + node_key_a=node_A, node_key_b=node_B, arcs=arcs_ab + ) + else: - # AB - + static_loss_AB = { - (0,q,0): 0.1, - (0,q,1): 0.1, - (0,q,2): 0.1, - (0,q,3): 0.1 - } - + (0, q, 0): 0.1, + (0, q, 1): 0.1, + (0, q, 2): 0.1, + (0, q, 3): 0.1, + } + arc_key_AB_und = mynet.add_preexisting_undirected_arc( node_key_a=node_A, node_key_b=node_B, - efficiency=efficiency_BA, - efficiency_reverse=efficiency_BA, - static_loss=static_loss_AB, - capacity=1, - capacity_is_instantaneous=False) - + efficiency=efficiency_BA, + efficiency_reverse=efficiency_BA, + static_loss=static_loss_AB, + capacity=1, + capacity_is_instantaneous=False, + ) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -9025,654 +8231,747 @@ def example_undirected_arc_static_downstream( sense_use_real_variables_if_possible=False, sense_use_arc_interfaces=False, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=False, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=static_losses_mode, mandatory_arcs=[], max_number_parallel_arcs={}, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # if print_model: - + # all arcs should be installed (they are not new) - - assert True in ipp.networks['mynet'].edges[ - (imp1_node_key, node_A, 0)][Network.KEY_ARC_TECH].options_selected - - assert True in ipp.networks['mynet'].edges[ - (imp2_node_key, node_B, 0)][Network.KEY_ARC_TECH].options_selected - - assert True in ipp.networks['mynet'].edges[ - (node_A, node_B, arc_key_AB_und)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp1_node_key, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp2_node_key, node_B, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, arc_key_AB_und)][Network.KEY_ARC_TECH] + .options_selected + ) + # overview - - (flow_in, - flow_in_k, - flow_out, - flow_in_cost, - flow_out_revenue) = compute_cost_volume_metrics(ipp.instance, True) - + + ( + flow_in, + flow_in_k, + flow_out, + flow_in_cost, + flow_out_revenue, + ) = compute_cost_volume_metrics(ipp.instance, True) + # there should be imports - + abs_tol = 1e-6 - - assert math.isclose(flow_in[('mynet',0,0)], (1+1+2+0.3+1), abs_tol=abs_tol) - - # there should be no exports - + + assert math.isclose( + flow_in[("mynet", 0, 0)], (1 + 1 + 2 + 0.3 + 1), abs_tol=abs_tol + ) + + # there should be no exports + abs_tol = 1e-6 - - assert math.isclose(flow_out[('mynet',0,0)], 0, abs_tol=abs_tol) - + + assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol) + # flow through I1A must be 1.1 during time interval 0 # flow through I1A must be 0.0 during time interval 1 # flow through I1A must be 1.0 during time interval 2 (flow from B to A) # flow through I1A must be 1.0 during time interval 3 (because AB is used from B to A) - + abs_tol = 1e-6 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',imp1_node_key,node_A,0,0,0)]), + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp1_node_key, node_A, 0, 0, 0)]), 1.1, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',imp1_node_key,node_A,0,0,1)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp1_node_key, node_A, 0, 0, 1)]), 0.0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',imp1_node_key,node_A,0,0,2)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp1_node_key, node_A, 0, 0, 2)]), 1.0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',imp1_node_key,node_A,0,0,3)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp1_node_key, node_A, 0, 0, 3)]), 1.0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow through I2B must be 0.0 during time interval 0 # flow through I2B must be 1.1 during time interval 1 # flow through I2B must be 1.1 during time interval 2 # flow through I2B must be 0.0 during time interval 3 - + abs_tol = 1e-6 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',imp2_node_key,node_B,0,0,0) - ]), + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp2_node_key, node_B, 0, 0, 0)]), 0.0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',imp2_node_key,node_B,0,0,1) - ]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp2_node_key, node_B, 0, 0, 1)]), 1.1, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',imp2_node_key,node_B,0,0,2) - ]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp2_node_key, node_B, 0, 0, 2)]), 1.1, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',imp2_node_key,node_B,0,0,3) - ]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value(ipp.instance.var_v_glljqk[("mynet", imp2_node_key, node_B, 0, 0, 3)]), 0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # validation - - if (static_losses_mode == - InfrastructurePlanningProblem.STATIC_LOSS_MODE_ARR): - + + if static_losses_mode == InfrastructurePlanningProblem.STATIC_LOSS_MODE_ARR: # arrival node - + # losses are always in B - + # flow from A to B must be 0.1 during time interval 0 # flow from B to A must be 0 during time interval 0 - + abs_tol = 1e-3 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,0)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, 0, 0) + ] + ), 0.1, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,0)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, 0, 0) + ] + ), 0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from A to B must be 0 during time interval 1 # flow from B to A must be 0 during time interval 1 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,1)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, 0, 1) + ] + ), 0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,1)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, 0, 1) + ] + ), 0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from A to B must be 1.0 during time interval 2 # flow from B to A must be 0 during time interval 2 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,2)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, 0, 2) + ] + ), 1.0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,2)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, 0, 2) + ] + ), 0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from A to B must be 1.0 during time interval 3 # flow from B to A must be 0 during time interval 3 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,3)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, 0, 3) + ] + ), 1.0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,3)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, 0, 3) + ] + ), 0, - abs_tol=abs_tol - ) - - elif (static_losses_mode == - InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP): - + abs_tol=abs_tol, + ) + + elif static_losses_mode == InfrastructurePlanningProblem.STATIC_LOSS_MODE_DEP: # departure node - + # losses are always in A - + # flow from A to B must be 0 during time interval 0 # flow from B to A must be 0 during time interval 0 - + abs_tol = 1e-3 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,0)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, 0, 0) + ] + ), 0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,0)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, 0, 0) + ] + ), 0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from A to B must be 0 during time interval 1 # flow from B to A must be 0.1 during time interval 1 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,1)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, 0, 1) + ] + ), 0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,1)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, 0, 1) + ] + ), 0.1, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from A to B must be 0.9 during time interval 2 # flow from B to A must be 0 during time interval 2 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,2)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, 0, 2) + ] + ), 0.9, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,2)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, 0, 2) + ] + ), 0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from A to B must be 0.9 during time interval 3 # flow from B to A must be 0 during time interval 3 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,3)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, 0, 3) + ] + ), 0.9, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,3)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, 0, 3) + ] + ), 0, - abs_tol=abs_tol - ) - - elif (static_losses_mode == - InfrastructurePlanningProblem.STATIC_LOSS_MODE_US): - + abs_tol=abs_tol, + ) + + elif static_losses_mode == InfrastructurePlanningProblem.STATIC_LOSS_MODE_US: # upstream - + # flow from A to B must be 0 during time interval 0 # flow from B to A must be 0 during time interval 0 - + abs_tol = 1e-3 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,0)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, 0, 0) + ] + ), 0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,0)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, 0, 0) + ] + ), 0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from A to B must be 0 during time interval 1 # flow from B to A must be 0 during time interval 1 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,1)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, 0, 1) + ] + ), 0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,1)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, 0, 1) + ] + ), 0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from A to B must be 0.9 during time interval 2 # flow from B to A must be 0 during time interval 2 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,2)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, 0, 2) + ] + ), 0.9, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,2)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, 0, 2) + ] + ), 0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from A to B must be 0.9 during time interval 3 # flow from B to A must be 0 during time interval 3 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,3)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, 0, 3) + ] + ), 0.9, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,3)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, 0, 3) + ] + ), 0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + else: - # downstream - + # flow from A to B must be 0 during time interval 0 # flow from B to A must be 0 during time interval 0 - + abs_tol = 1e-3 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,0)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, 0, 0) + ] + ), 0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,0)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, 0, 0) + ] + ), 0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from A to B must be 0 during time interval 1 # flow from B to A must be 0.1 during time interval 1 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,1)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, 0, 1) + ] + ), 0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,1)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, 0, 1) + ] + ), 0.1, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from A to B must be 1.0 during time interval 2 # flow from B to A must be 0 during time interval 2 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,2)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, 0, 2) + ] + ), 1, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,2)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, 0, 2) + ] + ), 0, - abs_tol=abs_tol - ) - + abs_tol=abs_tol, + ) + # flow from A to B must be 1.0 during time interval 3 # flow from B to A must be 0 during time interval 3 - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_A,node_B,arc_key_AB_und,0,3)]), + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_A, node_B, arc_key_AB_und, 0, 3) + ] + ), 1.0, - abs_tol=abs_tol - ) - - assert math.isclose( - pyo.value(ipp.instance.var_v_glljqk[ - ('mynet',node_B,node_A,arc_key_AB_und,0,3)]), + abs_tol=abs_tol, + ) + + assert math.isclose( + pyo.value( + ipp.instance.var_v_glljqk[ + ("mynet", node_B, node_A, arc_key_AB_und, 0, 3) + ] + ), 0, - abs_tol=abs_tol - ) - -#****************************************************************************** -#****************************************************************************** + abs_tol=abs_tol, + ) + + +# ****************************************************************************** +# ****************************************************************************** + def example_arc_groups_individual( - solver, - solver_options, - use_arc_groups, - static_losses_mode, - init_aux_sets): - + solver, solver_options, use_arc_groups, static_losses_mode, init_aux_sets +): q = 0 - + # time - + number_intervals = 2 number_periods = 2 # 4 nodes: two import nodes, two supply/demand nodes - + mynet = Network() - #************************************************************************** - + # ************************************************************************** + # import nodes - + # imp1_prices = ResourcePrice( - # prices=[1, - # 2], + # prices=[1, + # 2], # volumes=None) - + imp1_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp1_node_key, + node_key=imp1_node_key, prices={ - (q,p,k): ResourcePrice( - prices=k+1, - volumes=None - ) + (q, p, k): ResourcePrice(prices=k + 1, volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # imp2_prices = ResourcePrice( - # prices=[3, - # 2], + # prices=[3, + # 2], # volumes=None) - + imp2_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp2_node_key, + node_key=imp2_node_key, prices={ - (q,p,k): ResourcePrice( - prices=3-k, - volumes=None - ) + (q, p, k): ResourcePrice(prices=3 - k, volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # imp3_prices = ResourcePrice( - # prices=[1, - # 3], + # prices=[1, + # 3], # volumes=None) - + imp3_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp3_node_key, + node_key=imp3_node_key, prices={ - (q,p,k): ResourcePrice( - prices=1+2*k, - volumes=None - ) + (q, p, k): ResourcePrice(prices=1 + 2 * k, volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - - #************************************************************************** - + }, + ) + + # ************************************************************************** + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - - mynet.add_source_sink_node( - node_key=node_A, - base_flow={ - (q,0):1.0, - (q,1):0.5 - } - ) - + + mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0, (q, 1): 0.5}) + # add arcs - + efficiency_IA1 = { - (q,0): 1.00, - (q,1): 0.85, - } - + (q, 0): 1.00, + (q, 1): 0.85, + } + efficiency_IA2 = { - (q,0): 0.95, - (q,1): 0.90, - } - + (q, 0): 0.95, + (q, 1): 0.90, + } + efficiency_IA3 = { - (q,0): 0.90, - (q,1): 0.95, - } - + (q, 0): 0.90, + (q, 1): 0.95, + } + # I1A static_loss_IA1 = { - (0,q,0): 0.10, - (0,q,1): 0.10, - (1,q,0): 0.15, - (1,q,1): 0.15, - (2,q,0): 0.20, - (2,q,1): 0.20 - } - + (0, q, 0): 0.10, + (0, q, 1): 0.10, + (1, q, 0): 0.15, + (1, q, 1): 0.15, + (2, q, 0): 0.20, + (2, q, 1): 0.20, + } + # I2A static_loss_IA2 = { - (0,q,0): 0.20, - (0,q,1): 0.20, - (1,q,0): 0.05, - (1,q,1): 0.05, - (2,q,0): 0.10, - (2,q,1): 0.10 - } - + (0, q, 0): 0.20, + (0, q, 1): 0.20, + (1, q, 0): 0.05, + (1, q, 1): 0.05, + (2, q, 0): 0.10, + (2, q, 1): 0.10, + } + # I3A static_loss_IA3 = { - (0,q,0): 0.15, - (0,q,1): 0.15, - (1,q,0): 0.10, - (1,q,1): 0.10, - (2,q,0): 0.05, - (2,q,1): 0.05 - } - + (0, q, 0): 0.15, + (0, q, 1): 0.15, + (1, q, 0): 0.10, + (1, q, 1): 0.10, + (2, q, 0): 0.05, + (2, q, 1): 0.05, + } + arcs_I1A = Arcs( - name='IA1', + name="IA1", efficiency=efficiency_IA1, - efficiency_reverse=None, - static_loss=static_loss_IA1, - capacity=(0.5, 0.75, 1.2,), - minimum_cost=(0.2, 0.5, 0.75,), - specific_capacity_cost=0, - capacity_is_instantaneous=False, - validate=True - ) - + efficiency_reverse=None, + static_loss=static_loss_IA1, + capacity=( + 0.5, + 0.75, + 1.2, + ), + minimum_cost=( + 0.2, + 0.5, + 0.75, + ), + specific_capacity_cost=0, + capacity_is_instantaneous=False, + validate=True, + ) + arc_key_I1A = mynet.add_directed_arc( - node_key_a=imp1_node_key, - node_key_b=node_A, - arcs=arcs_I1A - ) - + node_key_a=imp1_node_key, node_key_b=node_A, arcs=arcs_I1A + ) + arcs_I2A = Arcs( - name='IA2', + name="IA2", efficiency=efficiency_IA2, - efficiency_reverse=None, - static_loss=static_loss_IA2, - capacity=(0.5, 0.75, 1.2,), - minimum_cost=(0.2, 0.5, 0.75,), - specific_capacity_cost=0, - capacity_is_instantaneous=False, - validate=True - ) - + efficiency_reverse=None, + static_loss=static_loss_IA2, + capacity=( + 0.5, + 0.75, + 1.2, + ), + minimum_cost=( + 0.2, + 0.5, + 0.75, + ), + specific_capacity_cost=0, + capacity_is_instantaneous=False, + validate=True, + ) + arc_key_I2A = mynet.add_directed_arc( - node_key_a=imp2_node_key, - node_key_b=node_A, - arcs=arcs_I2A - ) - + node_key_a=imp2_node_key, node_key_b=node_A, arcs=arcs_I2A + ) + arcs_I3A = Arcs( - name='IA3', + name="IA3", efficiency=efficiency_IA3, - efficiency_reverse=None, - static_loss=static_loss_IA3, - capacity=(0.5, 0.75, 1.2,), - minimum_cost=(0.2, 0.5, 0.75,), - specific_capacity_cost=0, - capacity_is_instantaneous=False, - validate=True - ) - + efficiency_reverse=None, + static_loss=static_loss_IA3, + capacity=( + 0.5, + 0.75, + 1.2, + ), + minimum_cost=( + 0.2, + 0.5, + 0.75, + ), + specific_capacity_cost=0, + capacity_is_instantaneous=False, + validate=True, + ) + arc_key_I3A = mynet.add_directed_arc( - node_key_a=imp3_node_key, - node_key_b=node_A, - arcs=arcs_I3A - ) - - if use_arc_groups: - + node_key_a=imp3_node_key, node_key_b=node_A, arcs=arcs_I3A + ) + + if use_arc_groups: arc_groups_dict = { - 0: (('mynet',imp1_node_key,node_A,arc_key_I1A), - ('mynet',imp2_node_key,node_A,arc_key_I2A), - ('mynet',imp3_node_key,node_A,arc_key_I3A)) - } - + 0: ( + ("mynet", imp1_node_key, node_A, arc_key_I1A), + ("mynet", imp2_node_key, node_A, arc_key_I2A), + ("mynet", imp3_node_key, node_A, arc_key_I3A), + ) + } + else: - arc_groups_dict = {} - + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - - solver_options['relative_mip_gap'] = 0 - solver_options['absolute_mip_gap'] = 1e-4 - + + solver_options["relative_mip_gap"] = 0 + solver_options["absolute_mip_gap"] = 1e-4 + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -9684,27 +8983,29 @@ def example_arc_groups_individual( sense_use_real_variables_if_possible=False, sense_use_arc_interfaces=False, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=False, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=static_losses_mode, mandatory_arcs=[], max_number_parallel_arcs={}, arc_groups_dict=arc_groups_dict, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # overview - - (flow_in, - flow_in_k, - flow_out, - flow_in_cost, - flow_out_revenue) = compute_cost_volume_metrics(ipp.instance, True) + + ( + flow_in, + flow_in_k, + flow_out, + flow_in_cost, + flow_out_revenue, + ) = compute_cost_volume_metrics(ipp.instance, True) # print('**********(((((((((((((((())))))))))))))))))))))') # if use_arc_groups: # print('hohohoho') @@ -9729,461 +9030,416 @@ def example_arc_groups_individual( capex_ind = 0.75 capex_group = 1.5 imp_ind = 1.9882352941176473 - imp_group = 2.2 # {'mynet': 2.2245012886626414} - sdncf_group = -6.184560251632995 # -6.2386008960367345 + imp_group = 2.2 # {'mynet': 2.2245012886626414} + sdncf_group = -6.184560251632995 # -6.2386008960367345 sdncf_ind = -5.274445281858455 sdext_group = 0 sdext_ind = 0 losses_ind = 0 losses_group = 0 - obj_ind = sdncf_ind+sdext_ind-capex_ind - obj_group = sdncf_group+sdext_group-capex_group - + obj_ind = sdncf_ind + sdext_ind - capex_ind + obj_group = sdncf_group + sdext_group - capex_group + assert capex_ind < capex_group assert imp_ind < imp_group assert obj_ind > obj_group - - if use_arc_groups: - + + if use_arc_groups: # all arcs have to be installed - - assert True in ipp.networks['mynet'].edges[ - (imp1_node_key, node_A, arc_key_I1A)][ - Network.KEY_ARC_TECH].options_selected - - assert True in ipp.networks['mynet'].edges[ - (imp2_node_key, node_A, arc_key_I2A)][ - Network.KEY_ARC_TECH].options_selected - - assert True in ipp.networks['mynet'].edges[ - (imp3_node_key, node_A, arc_key_I3A)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp1_node_key, node_A, arc_key_I1A)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp2_node_key, node_A, arc_key_I2A)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + in ipp.networks["mynet"] + .edges[(imp3_node_key, node_A, arc_key_I3A)][Network.KEY_ARC_TECH] + .options_selected + ) + # the same option has to be selected in all arcs - - h1 = ipp.networks['mynet'].edges[ - (imp1_node_key, node_A, arc_key_I1A)][ - Network.KEY_ARC_TECH].options_selected.index(True) - - h2 = ipp.networks['mynet'].edges[ - (imp2_node_key, node_A, arc_key_I2A)][ - Network.KEY_ARC_TECH].options_selected.index(True) - - h3 = ipp.networks['mynet'].edges[ - (imp3_node_key, node_A, arc_key_I3A)][ - Network.KEY_ARC_TECH].options_selected.index(True) - + + h1 = ( + ipp.networks["mynet"] + .edges[(imp1_node_key, node_A, arc_key_I1A)][Network.KEY_ARC_TECH] + .options_selected.index(True) + ) + + h2 = ( + ipp.networks["mynet"] + .edges[(imp2_node_key, node_A, arc_key_I2A)][Network.KEY_ARC_TECH] + .options_selected.index(True) + ) + + h3 = ( + ipp.networks["mynet"] + .edges[(imp3_node_key, node_A, arc_key_I3A)][Network.KEY_ARC_TECH] + .options_selected.index(True) + ) + assert h1 == h2 - + assert h1 == h3 - + # the capex have to be higher than those of the best individual arc - + abs_tol = 1e-3 - + assert math.isclose( - pyo.value(ipp.instance.var_capex), - capex_group, - abs_tol=abs_tol) - + pyo.value(ipp.instance.var_capex), capex_group, abs_tol=abs_tol + ) + # there should be no exports - - assert math.isclose( - flow_out[('mynet',0,0)], - 0, - abs_tol=abs_tol) - + + assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol) + # the imports should be higher than with individual arcs - + abs_tol = 1e-3 - - assert math.isclose( - flow_in[('mynet',0,0)], - imp_group, - abs_tol=abs_tol) - + + assert math.isclose(flow_in[("mynet", 0, 0)], imp_group, abs_tol=abs_tol) + # the operating results should be lower than with an individual arc - + abs_tol = 1e-3 - + assert math.isclose( - pyo.value(ipp.instance.var_sdncf_q[q]), - sdncf_group, - abs_tol=abs_tol) - + pyo.value(ipp.instance.var_sdncf_q[q]), sdncf_group, abs_tol=abs_tol + ) + # the externalities should be zero - + abs_tol = 1e-3 - - assert math.isclose( - pyo.value(ipp.instance.var_sdext_q[q]), - 0, - abs_tol=abs_tol) - + + assert math.isclose(pyo.value(ipp.instance.var_sdext_q[q]), 0, abs_tol=abs_tol) + # the objective function should be -6.3639758220728595-1.5 - + abs_tol = 1e-3 - - assert math.isclose( - pyo.value(ipp.instance.obj_f), - obj_group, - abs_tol=abs_tol) - + + assert math.isclose(pyo.value(ipp.instance.obj_f), obj_group, abs_tol=abs_tol) + # the imports should be greater than or equal to the losses for all arx - + losses_model = sum( pyo.value( ipp.instance.var_w_glljqk[ - ('mynet',imp1_node_key,node_A,arc_key_I1A,q,k)] - ) - + - pyo.value( - ipp.instance.var_w_glljqk[ - ('mynet',imp2_node_key,node_A,arc_key_I2A,q,k)] - ) - + - pyo.value( + ("mynet", imp1_node_key, node_A, arc_key_I1A, q, k) + ] + ) + + pyo.value( ipp.instance.var_w_glljqk[ - ('mynet',imp3_node_key,node_A,arc_key_I3A,q,k)] - ) - for k in range(number_intervals) + ("mynet", imp2_node_key, node_A, arc_key_I2A, q, k) + ] + ) + + pyo.value( + ipp.instance.var_w_glljqk[ + ("mynet", imp3_node_key, node_A, arc_key_I3A, q, k) + ] ) - + for k in range(number_intervals) + ) + losses_data = sum( - static_loss_IA1[(h1,q,k)]+ - static_loss_IA2[(h2,q,k)]+ - static_loss_IA3[(h3,q,k)] + static_loss_IA1[(h1, q, k)] + + static_loss_IA2[(h2, q, k)] + + static_loss_IA3[(h3, q, k)] for k in range(number_intervals) - ) - - assert math.isclose( - losses_model, - losses_data, - abs_tol=abs_tol) - - assert flow_in[('mynet',0,0)] >= losses_model - + ) + + assert math.isclose(losses_model, losses_data, abs_tol=abs_tol) + + assert flow_in[("mynet", 0, 0)] >= losses_model + else: - # at least one arc has to be installed - + assert ( - True in ipp.networks['mynet'].edges[ - (imp1_node_key, node_A, arc_key_I1A)][ - Network.KEY_ARC_TECH].options_selected - or - True in ipp.networks['mynet'].edges[ - (imp2_node_key, node_A, arc_key_I2A)][ - Network.KEY_ARC_TECH].options_selected - or - True in ipp.networks['mynet'].edges[ - (imp3_node_key, node_A, arc_key_I3A)][ - Network.KEY_ARC_TECH].options_selected - ) - + True + in ipp.networks["mynet"] + .edges[(imp1_node_key, node_A, arc_key_I1A)][Network.KEY_ARC_TECH] + .options_selected + or True + in ipp.networks["mynet"] + .edges[(imp2_node_key, node_A, arc_key_I2A)][Network.KEY_ARC_TECH] + .options_selected + or True + in ipp.networks["mynet"] + .edges[(imp3_node_key, node_A, arc_key_I3A)][Network.KEY_ARC_TECH] + .options_selected + ) + # the capex have to be lower than with a group of arcs - + abs_tol = 1e-3 - + assert math.isclose( - pyo.value(ipp.instance.var_capex), - capex_ind, - abs_tol=abs_tol) - + pyo.value(ipp.instance.var_capex), capex_ind, abs_tol=abs_tol + ) + # there should be no exports - - assert math.isclose( - flow_out[('mynet',0,0)], - 0, - abs_tol=abs_tol) - + + assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol) + # the imports should be lower than with a group of arcs - + abs_tol = 1e-3 - - assert math.isclose( - flow_in[('mynet',0,0)], - imp_ind, - abs_tol=abs_tol) - + + assert math.isclose(flow_in[("mynet", 0, 0)], imp_ind, abs_tol=abs_tol) + # the operating results should be lower than with an individual arc - + abs_tol = 1e-3 - + assert math.isclose( - pyo.value(ipp.instance.var_sdncf_q[q]), - sdncf_ind, - abs_tol=abs_tol) - + pyo.value(ipp.instance.var_sdncf_q[q]), sdncf_ind, abs_tol=abs_tol + ) + # the externalities should be zero - + abs_tol = 1e-3 - + assert math.isclose( - pyo.value(ipp.instance.var_sdext_q[q]), - sdext_ind, - abs_tol=abs_tol) - + pyo.value(ipp.instance.var_sdext_q[q]), sdext_ind, abs_tol=abs_tol + ) + # the objective function should be -6.3639758220728595-1.5 - + abs_tol = 1e-3 - - assert math.isclose( - pyo.value(ipp.instance.obj_f), - obj_ind, - abs_tol=abs_tol) - + + assert math.isclose(pyo.value(ipp.instance.obj_f), obj_ind, abs_tol=abs_tol) + # the imports should be greater than or equal to the losses for all arx - + losses_model = sum( pyo.value( ipp.instance.var_w_glljqk[ - ('mynet',imp1_node_key,node_A,arc_key_I1A,q,k)] - ) - + - pyo.value( + ("mynet", imp1_node_key, node_A, arc_key_I1A, q, k) + ] + ) + + pyo.value( ipp.instance.var_w_glljqk[ - ('mynet',imp2_node_key,node_A,arc_key_I2A,q,k)] - ) - + - pyo.value( + ("mynet", imp2_node_key, node_A, arc_key_I2A, q, k) + ] + ) + + pyo.value( ipp.instance.var_w_glljqk[ - ('mynet',imp3_node_key,node_A,arc_key_I3A,q,k)] - ) - for k in range(number_intervals) + ("mynet", imp3_node_key, node_A, arc_key_I3A, q, k) + ] ) - + for k in range(number_intervals) + ) + # losses_data = sum( # static_loss_IA1[(h1,0,k)]+ # static_loss_IA2[(h2,0,k)]+ # static_loss_IA3[(h3,0,k)] # for k in range(number_intervals) # ) - + # assert math.isclose( # losses_model, # losses_data, # abs_tol=abs_tol) - - assert flow_in[('mynet',0,0)] >= losses_model - -#****************************************************************************** -#****************************************************************************** + + assert flow_in[("mynet", 0, 0)] >= losses_model + + +# ****************************************************************************** +# ****************************************************************************** + def example_arc_groups_individual_undirected( - solver, - solver_options, - use_arc_groups, - static_losses_mode, - init_aux_sets): - + solver, solver_options, use_arc_groups, static_losses_mode, init_aux_sets +): # time - + number_intervals = 2 - + number_periods = 2 # 4 nodes: one import node, four regular nodes - + mynet = Network() - + q = 0 - #************************************************************************** - + # ************************************************************************** + # import nodes - + # imp_prices = ResourcePrice( - # prices=[1, - # 2], + # prices=[1, + # 2], # volumes=None) - + imp_node_key = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_import_node( - node_key=imp_node_key, + node_key=imp_node_key, prices={ - (q,p,k): ResourcePrice( - prices=1+k, - volumes=None - ) + (q, p, k): ResourcePrice(prices=1 + k, volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - - #************************************************************************** - + }, + ) + + # ************************************************************************** + # A - + node_A = generate_pseudo_unique_key(mynet.nodes()) - - mynet.add_source_sink_node( - node_key=node_A, - base_flow={ - (q,0):0.0, - (q,1):1.0 - } - ) - + + mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 0.0, (q, 1): 1.0}) + # B - + node_B = generate_pseudo_unique_key(mynet.nodes()) - - mynet.add_source_sink_node( - node_key=node_B, - base_flow={ - (q,0):1.0, - (q,1):-0.5 - } - ) - + + mynet.add_source_sink_node(node_key=node_B, base_flow={(q, 0): 1.0, (q, 1): -0.5}) + # C - + node_C = generate_pseudo_unique_key(mynet.nodes()) - - mynet.add_source_sink_node( - node_key=node_C, - base_flow={ - (q,0):0.0, - (q,1):0.5 - } - ) - + + mynet.add_source_sink_node(node_key=node_C, base_flow={(q, 0): 0.0, (q, 1): 0.5}) + # D - + node_D = generate_pseudo_unique_key(mynet.nodes()) - - mynet.add_source_sink_node( - node_key=node_D, - base_flow={ - (q,0):0.5, - (q,1):-0.25 - } - ) - #************************************************************************** - + mynet.add_source_sink_node(node_key=node_D, base_flow={(q, 0): 0.5, (q, 1): -0.25}) + + # ************************************************************************** + # add arcs - + # IA - + mynet.add_preexisting_directed_arc( - node_key_a=imp_node_key, - node_key_b=node_A, - efficiency=None, - static_loss=None, - capacity=1.5, - capacity_is_instantaneous=False - ) - + node_key_a=imp_node_key, + node_key_b=node_A, + efficiency=None, + static_loss=None, + capacity=1.5, + capacity_is_instantaneous=False, + ) + # IC - + mynet.add_preexisting_directed_arc( - node_key_a=imp_node_key, - node_key_b=node_C, - efficiency=None, - static_loss=None, - capacity=1.5, - capacity_is_instantaneous=False - ) - + node_key_a=imp_node_key, + node_key_b=node_C, + efficiency=None, + static_loss=None, + capacity=1.5, + capacity_is_instantaneous=False, + ) + # AB - + efficiency_AB = { - (q,0): 1.00, - (q,1): 0.85, - } - + (q, 0): 1.00, + (q, 1): 0.85, + } + efficiency_BA = { - (q,0): 0.95, - (q,1): 0.80, - } - + (q, 0): 0.95, + (q, 1): 0.80, + } + static_loss_AB = { - (0,q,0): 0.20, - (0,q,1): 0.25, - (1,q,0): 0.25, - (1,q,1): 0.30, - (2,q,0): 0.30, - (2,q,1): 0.35 - } - + (0, q, 0): 0.20, + (0, q, 1): 0.25, + (1, q, 0): 0.25, + (1, q, 1): 0.30, + (2, q, 0): 0.30, + (2, q, 1): 0.35, + } + arcs_AB = Arcs( - name='AB', - efficiency=efficiency_AB, - efficiency_reverse=efficiency_BA, - static_loss=static_loss_AB, - capacity=(0.85, 1.5, 2.5), - minimum_cost=(1, 2, 3), - specific_capacity_cost=0, - capacity_is_instantaneous=False, - validate=True) - + name="AB", + efficiency=efficiency_AB, + efficiency_reverse=efficiency_BA, + static_loss=static_loss_AB, + capacity=(0.85, 1.5, 2.5), + minimum_cost=(1, 2, 3), + specific_capacity_cost=0, + capacity_is_instantaneous=False, + validate=True, + ) + arc_key_AB = mynet.add_undirected_arc( - node_key_a=node_A, - node_key_b=node_B, - arcs=arcs_AB) - + node_key_a=node_A, node_key_b=node_B, arcs=arcs_AB + ) + # CD - + efficiency_CD = { - (q,0): 1.00, - (q,1): 0.85, - } - - efficiency_DC = { - (q,0): 0.95, - (q,1): 0.80 - } - + (q, 0): 1.00, + (q, 1): 0.85, + } + + efficiency_DC = {(q, 0): 0.95, (q, 1): 0.80} + static_loss_CD = { - (0,q,0): 0.010, - (0,q,1): 0.015, - (1,q,0): 0.015, - (1,q,1): 0.020, - (2,q,0): 0.020, - (2,q,1): 0.025 - } - + (0, q, 0): 0.010, + (0, q, 1): 0.015, + (1, q, 0): 0.015, + (1, q, 1): 0.020, + (2, q, 0): 0.020, + (2, q, 1): 0.025, + } + arcs_CD = Arcs( - name='CD', - efficiency=efficiency_CD, - efficiency_reverse=efficiency_DC, - static_loss=static_loss_CD, - capacity=(0.85, 1.5, 2.5), - minimum_cost=(1, 2, 3), - specific_capacity_cost=0, - capacity_is_instantaneous=False, - validate=True) - + name="CD", + efficiency=efficiency_CD, + efficiency_reverse=efficiency_DC, + static_loss=static_loss_CD, + capacity=(0.85, 1.5, 2.5), + minimum_cost=(1, 2, 3), + specific_capacity_cost=0, + capacity_is_instantaneous=False, + validate=True, + ) + arc_key_CD = mynet.add_undirected_arc( - node_key_a=node_C, - node_key_b=node_D, - arcs=arcs_CD) - - if use_arc_groups: - + node_key_a=node_C, node_key_b=node_D, arcs=arcs_CD + ) + + if use_arc_groups: arc_groups_dict = { - 0: (('mynet',node_A,node_B,arc_key_AB), - ('mynet',node_C,node_D,arc_key_CD)) - } - + 0: ( + ("mynet", node_A, node_B, arc_key_AB), + ("mynet", node_C, node_D, arc_key_CD), + ) + } + else: - arc_groups_dict = {} - + # identify node types - + mynet.identify_node_types() - + # solver settings - - solver_options['relative_mip_gap'] = 0 - solver_options['absolute_mip_gap'] = 1e-4 - + + solver_options["relative_mip_gap"] = 0 + solver_options["absolute_mip_gap"] = 1e-4 + # no sos, regular time intervals - + ipp = build_solve_ipp( solver=solver, solver_options=solver_options, @@ -10195,29 +9451,30 @@ def example_arc_groups_individual_undirected( sense_use_real_variables_if_possible=False, sense_use_arc_interfaces=False, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, irregular_time_intervals=False, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, static_losses_mode=static_losses_mode, mandatory_arcs=[], max_number_parallel_arcs={}, arc_groups_dict=arc_groups_dict, - init_aux_sets=init_aux_sets - ) - - #************************************************************************** - + init_aux_sets=init_aux_sets, + ) + + # ************************************************************************** + # overview - - (flow_in, - flow_in_k, - flow_out, - flow_in_cost, - flow_out_revenue) = compute_cost_volume_metrics(ipp.instance, True) - - + + ( + flow_in, + flow_in_k, + flow_out, + flow_in_cost, + flow_out_revenue, + ) = compute_cost_volume_metrics(ipp.instance, True) + # print('**********(((((((((((((((())))))))))))))))))))))') # print('flow in') # print(flow_in) @@ -10229,228 +9486,198 @@ def example_arc_groups_individual_undirected( # print(pyo.value(ipp.instance.var_sdncf)) # print('var_sdext') # print(pyo.value(ipp.instance.var_sdext)) - + capex_ind = 3 capex_group = 4 - + imp_ind = 2.912 imp_group = 2.9210000000000003 - + sdncf_ind = -7.72035753459824 sdncf_group = -7.745053560176434 - + sdnext_ind = 0 sdnext_group = 0 - - obj_ind = sdnext_ind+sdncf_ind-capex_ind - obj_group = sdnext_group+sdncf_group-capex_group - + + obj_ind = sdnext_ind + sdncf_ind - capex_ind + obj_group = sdnext_group + sdncf_group - capex_group + losses_ind = sum( - static_loss_AB[(1,q,k)]+static_loss_CD[(0,q,k)] + static_loss_AB[(1, q, k)] + static_loss_CD[(0, q, k)] for k in range(number_intervals) - ) + ) losses_group = sum( - static_loss_AB[(1,q,k)]+static_loss_CD[(1,q,k)] + static_loss_AB[(1, q, k)] + static_loss_CD[(1, q, k)] for k in range(number_intervals) - ) - + ) + losses_model = sum( pyo.value( - ipp.instance.var_w_glljqk[('mynet',node_A,node_B,arc_key_AB,q,k)] - ) - + - pyo.value( - ipp.instance.var_w_glljqk[('mynet',node_C,node_D,arc_key_CD,q,k)] - ) - for k in range(number_intervals) + ipp.instance.var_w_glljqk[("mynet", node_A, node_B, arc_key_AB, q, k)] + ) + + pyo.value( + ipp.instance.var_w_glljqk[("mynet", node_C, node_D, arc_key_CD, q, k)] ) - + for k in range(number_intervals) + ) + assert capex_group > capex_ind # # assert math.isclose(losses_group, losses_ind, abs_tol=1e-3) assert losses_group > losses_ind assert imp_group > imp_ind - - if use_arc_groups: - + + if use_arc_groups: # all arcs have to be installed - - assert True in ipp.networks['mynet'].edges[ - (node_A, node_B, arc_key_AB)][ - Network.KEY_ARC_TECH].options_selected - - assert True in ipp.networks['mynet'].edges[ - (node_C, node_D, arc_key_CD)][ - Network.KEY_ARC_TECH].options_selected - + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, arc_key_AB)][Network.KEY_ARC_TECH] + .options_selected + ) + + assert ( + True + in ipp.networks["mynet"] + .edges[(node_C, node_D, arc_key_CD)][Network.KEY_ARC_TECH] + .options_selected + ) + # the same option has to be selected in all arcs - - h1 = ipp.networks['mynet'].edges[ - (node_A, node_B, arc_key_AB)][ - Network.KEY_ARC_TECH].options_selected.index(True) - - h2 = ipp.networks['mynet'].edges[ - (node_C, node_D, arc_key_CD)][ - Network.KEY_ARC_TECH].options_selected.index(True) - + + h1 = ( + ipp.networks["mynet"] + .edges[(node_A, node_B, arc_key_AB)][Network.KEY_ARC_TECH] + .options_selected.index(True) + ) + + h2 = ( + ipp.networks["mynet"] + .edges[(node_C, node_D, arc_key_CD)][Network.KEY_ARC_TECH] + .options_selected.index(True) + ) + assert h1 == h2 - + # the capex have to be higher than those of the best individual arc - + abs_tol = 1e-3 - + assert math.isclose( - pyo.value(ipp.instance.var_capex), - capex_group, - abs_tol=abs_tol) - + pyo.value(ipp.instance.var_capex), capex_group, abs_tol=abs_tol + ) + # there should be no exports - - assert math.isclose( - flow_out[('mynet',0,0)], - 0, - abs_tol=abs_tol) - + + assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol) + # the imports should be higher than with individual arcs - + abs_tol = 1e-3 - - assert math.isclose( - flow_in[('mynet',0,0)], - imp_group, - abs_tol=abs_tol) - + + assert math.isclose(flow_in[("mynet", 0, 0)], imp_group, abs_tol=abs_tol) + assert imp_group > imp_ind - + # the operating results should be lower than with an individual arc - + abs_tol = 1e-3 - + assert math.isclose( - pyo.value(ipp.instance.var_sdncf_q[q]), - sdncf_group, - abs_tol=abs_tol) - + pyo.value(ipp.instance.var_sdncf_q[q]), sdncf_group, abs_tol=abs_tol + ) + # the externalities should be zero - + abs_tol = 1e-3 - + assert math.isclose( - pyo.value(ipp.instance.var_sdext_q[q]), - sdnext_group, - abs_tol=abs_tol) - + pyo.value(ipp.instance.var_sdext_q[q]), sdnext_group, abs_tol=abs_tol + ) + # the objective function should be -6.3639758220728595-1.5 - + abs_tol = 1e-3 - - assert math.isclose( - pyo.value(ipp.instance.obj_f), - obj_group, - abs_tol=abs_tol) - + + assert math.isclose(pyo.value(ipp.instance.obj_f), obj_group, abs_tol=abs_tol) + # the imports should be greater than or equal to the losses for all arx - + losses_model = sum( pyo.value( - ipp.instance.var_w_glljqk[ - ('mynet',node_A,node_B,arc_key_AB,q,k)] - ) - + - pyo.value( - ipp.instance.var_w_glljqk[ - ('mynet',node_C,node_D,arc_key_CD,q,k)] - ) - for k in range(number_intervals) + ipp.instance.var_w_glljqk[("mynet", node_A, node_B, arc_key_AB, q, k)] ) - + + pyo.value( + ipp.instance.var_w_glljqk[("mynet", node_C, node_D, arc_key_CD, q, k)] + ) + for k in range(number_intervals) + ) + losses_data = sum( - static_loss_AB[(h1,q,k)]+ - static_loss_CD[(h2,q,k)] + static_loss_AB[(h1, q, k)] + static_loss_CD[(h2, q, k)] for k in range(number_intervals) - ) - - assert math.isclose( - losses_model, - losses_data, - abs_tol=abs_tol) - - assert math.isclose( - losses_data, - losses_group, - abs_tol=abs_tol) - + ) + + assert math.isclose(losses_model, losses_data, abs_tol=abs_tol) + + assert math.isclose(losses_data, losses_group, abs_tol=abs_tol) + else: - # at least one arc has to be installed - + assert ( - True in ipp.networks['mynet'].edges[ - (node_A, node_B, arc_key_AB)][ - Network.KEY_ARC_TECH].options_selected - or - True in ipp.networks['mynet'].edges[ - (node_C, node_D, arc_key_CD)][ - Network.KEY_ARC_TECH].options_selected - ) - + True + in ipp.networks["mynet"] + .edges[(node_A, node_B, arc_key_AB)][Network.KEY_ARC_TECH] + .options_selected + or True + in ipp.networks["mynet"] + .edges[(node_C, node_D, arc_key_CD)][Network.KEY_ARC_TECH] + .options_selected + ) + # the capex have to be lower than with a group of arcs - + abs_tol = 1e-3 - + assert math.isclose( - pyo.value(ipp.instance.var_capex), - capex_ind, - abs_tol=abs_tol) - + pyo.value(ipp.instance.var_capex), capex_ind, abs_tol=abs_tol + ) + # there should be no exports - - assert math.isclose( - flow_out[('mynet',0,0)], - 0, - abs_tol=abs_tol) - + + assert math.isclose(flow_out[("mynet", 0, 0)], 0, abs_tol=abs_tol) + # the imports should be lower than with a group of arcs - + abs_tol = 1e-3 - - assert math.isclose( - flow_in[('mynet',0,0)], - imp_ind, - abs_tol=abs_tol) - + + assert math.isclose(flow_in[("mynet", 0, 0)], imp_ind, abs_tol=abs_tol) + # the operating results should be lower than with an individual arc - + abs_tol = 1e-3 - + assert math.isclose( - pyo.value(ipp.instance.var_sdncf_q[q]), - sdncf_ind, - abs_tol=abs_tol) - + pyo.value(ipp.instance.var_sdncf_q[q]), sdncf_ind, abs_tol=abs_tol + ) + # the externalities should be zero - + abs_tol = 1e-3 - - assert math.isclose( - pyo.value(ipp.instance.var_sdext_q[q]), - 0, - abs_tol=abs_tol) - + + assert math.isclose(pyo.value(ipp.instance.var_sdext_q[q]), 0, abs_tol=abs_tol) + # the objective function should be -6.3639758220728595-1.5 - + abs_tol = 1e-3 - - assert math.isclose( - pyo.value(ipp.instance.obj_f), - obj_ind, - abs_tol=abs_tol) - + + assert math.isclose(pyo.value(ipp.instance.obj_f), obj_ind, abs_tol=abs_tol) + # the imports should be greater than or equal to the losses for all arx - - assert math.isclose( - losses_model, - losses_ind, - abs_tol=abs_tol) - -#****************************************************************************** -#****************************************************************************** \ No newline at end of file + + assert math.isclose(losses_model, losses_ind, abs_tol=abs_tol) + + +# ****************************************************************************** +# ****************************************************************************** diff --git a/tests/examples_signal.py b/tests/examples_signal.py index 644e68b..dc481e4 100644 --- a/tests/examples_signal.py +++ b/tests/examples_signal.py @@ -10,164 +10,167 @@ import random import src.topupopt.problems.esipp.signal as signal -#****************************************************************************** -#****************************************************************************** +# ****************************************************************************** +# ****************************************************************************** + def examples(): - - #************************************************************************** - #************************************************************************** - + # ************************************************************************** + # ************************************************************************** + # test creating fixed signals - + example_fixed_signals() - + # test creating free signals - + example_free_signals() - + # test creating bounded signals - + example_bounded_signals() - + # test setting a signal - + example_set_signal() - + # test non-negative reals - + example_nnr_signals() - + # test binary signals - + example_binary_signals() - + # trigger errors that can only happen by messing with private/interval vars - + example_peculiar_errors() - + # test amplitude constrained signals - + example_amplitude_constrained_signals() - + # test amplitude constrained non-negative real signals - + example_amplitude_constrained_nnr_signals() - - #************************************************************************** - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** - + + # ************************************************************************** + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + + def example_amplitude_constrained_nnr_signals(): - # number of time intervals - + number_intervals = 3 - - #************************************************************************** - + + # ************************************************************************** + # error-free examples - + # create signal with max positive amplitude limit - + sig = signal.AmplitudeConstrainedNNRSignal( - number_samples=number_intervals, - max_pos_amp_limit=10, - min_pos_amp_limit=None, - ) - - assert not sig.has_max_neg_amp_limit + number_samples=number_intervals, + max_pos_amp_limit=10, + min_pos_amp_limit=None, + ) + + assert not sig.has_max_neg_amp_limit assert not sig.has_min_neg_amp_limit - assert sig.has_max_pos_amp_limit + assert sig.has_max_pos_amp_limit assert not sig.has_min_pos_amp_limit assert sig.is_signal_bounded() assert not sig.is_signal_fixed() - + # create signal with min positive amplitude limit - + sig = signal.AmplitudeConstrainedNNRSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=3, - ) - - assert not sig.has_max_neg_amp_limit + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=3, + ) + + assert not sig.has_max_neg_amp_limit assert not sig.has_min_neg_amp_limit - assert not sig.has_max_pos_amp_limit + assert not sig.has_max_pos_amp_limit assert sig.has_min_pos_amp_limit assert sig.is_signal_bounded() assert not sig.is_signal_fixed() - + # create signal with positive constraints only - + sig = signal.AmplitudeConstrainedNNRSignal( - number_samples=number_intervals, - max_pos_amp_limit=10, - min_pos_amp_limit=3, - ) - - assert not sig.has_max_neg_amp_limit + number_samples=number_intervals, + max_pos_amp_limit=10, + min_pos_amp_limit=3, + ) + + assert not sig.has_max_neg_amp_limit assert not sig.has_min_neg_amp_limit - assert sig.has_max_pos_amp_limit + assert sig.has_max_pos_amp_limit assert sig.has_min_pos_amp_limit assert sig.is_signal_bounded() assert not sig.is_signal_fixed() - - #************************************************************************** - + + # ************************************************************************** + # trigger errors - + # by providing a non-numeric nr. of samples without specific lower bounds - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedNNRSignal( - number_samples=(number_intervals,), - max_pos_amp_limit=10) + number_samples=(number_intervals,), max_pos_amp_limit=10 + ) except TypeError: error_was_triggered = True assert error_was_triggered - + # by providing negative lower bounds - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedNNRSignal( number_samples=number_intervals, max_pos_amp_limit=10, - lower_bounds=[-1 for i in range(number_intervals)]) + lower_bounds=[-1 for i in range(number_intervals)], + ) except ValueError: error_was_triggered = True assert error_was_triggered - -#****************************************************************************** -#****************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def example_amplitude_constrained_signals(): - # number of time intervals - + number_intervals = 3 - - #************************************************************************** - + + # ************************************************************************** + # error-free examples - + # create signal with max positive amplitude limit - + sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=10, - min_pos_amp_limit=None, - max_neg_amp_limit=None, - min_neg_amp_limit=None - ) - - assert not sig.has_max_neg_amp_limit + number_samples=number_intervals, + max_pos_amp_limit=10, + min_pos_amp_limit=None, + max_neg_amp_limit=None, + min_neg_amp_limit=None, + ) + + assert not sig.has_max_neg_amp_limit assert not sig.has_min_neg_amp_limit - assert sig.has_max_pos_amp_limit + assert sig.has_max_pos_amp_limit assert not sig.has_min_pos_amp_limit assert not sig.is_signal_bounded() assert not sig.is_signal_fixed() @@ -176,20 +179,20 @@ def example_amplitude_constrained_signals(): # use the tolerances and validate an otherwise invalid amplitude sig.set_positive_amplitude(positive_amplitude=12) sig.validate_positive_amplitude(tolerance=2) - + # create signal with min positive amplitude limit - + sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=3, - max_neg_amp_limit=None, - min_neg_amp_limit=None - ) - - assert not sig.has_max_neg_amp_limit + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=3, + max_neg_amp_limit=None, + min_neg_amp_limit=None, + ) + + assert not sig.has_max_neg_amp_limit assert not sig.has_min_neg_amp_limit - assert not sig.has_max_pos_amp_limit + assert not sig.has_max_pos_amp_limit assert sig.has_min_pos_amp_limit assert not sig.is_signal_bounded() assert not sig.is_signal_fixed() @@ -198,20 +201,20 @@ def example_amplitude_constrained_signals(): # use the tolerances and validate an otherwise invalid amplitude sig.set_positive_amplitude(positive_amplitude=1) sig.validate_positive_amplitude(tolerance=2) - + # create signal with max negative amplitude limit - + sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=None, - max_neg_amp_limit=10, - min_neg_amp_limit=None - ) - - assert sig.has_max_neg_amp_limit + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=None, + max_neg_amp_limit=10, + min_neg_amp_limit=None, + ) + + assert sig.has_max_neg_amp_limit assert not sig.has_min_neg_amp_limit - assert not sig.has_max_pos_amp_limit + assert not sig.has_max_pos_amp_limit assert not sig.has_min_pos_amp_limit assert not sig.is_signal_bounded() assert not sig.is_signal_fixed() @@ -220,20 +223,20 @@ def example_amplitude_constrained_signals(): # use the tolerances and validate an otherwise invalid amplitude sig.set_negative_amplitude(negative_amplitude=12) sig.validate_negative_amplitude(tolerance=2) - + # create signal with min negative amplitude limit - + sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=None, - max_neg_amp_limit=None, - min_neg_amp_limit=3 - ) - - assert not sig.has_max_neg_amp_limit + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=None, + max_neg_amp_limit=None, + min_neg_amp_limit=3, + ) + + assert not sig.has_max_neg_amp_limit assert sig.has_min_neg_amp_limit - assert not sig.has_max_pos_amp_limit + assert not sig.has_max_pos_amp_limit assert not sig.has_min_pos_amp_limit assert not sig.is_signal_bounded() assert not sig.is_signal_fixed() @@ -242,562 +245,554 @@ def example_amplitude_constrained_signals(): # use the tolerances and validate an otherwise invalid amplitude sig.set_negative_amplitude(negative_amplitude=1) sig.validate_negative_amplitude(tolerance=2) - + # create signal with positive constraints only - + sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=10, - min_pos_amp_limit=3, - max_neg_amp_limit=None, - min_neg_amp_limit=None - ) - - assert not sig.has_max_neg_amp_limit + number_samples=number_intervals, + max_pos_amp_limit=10, + min_pos_amp_limit=3, + max_neg_amp_limit=None, + min_neg_amp_limit=None, + ) + + assert not sig.has_max_neg_amp_limit assert not sig.has_min_neg_amp_limit - assert sig.has_max_pos_amp_limit + assert sig.has_max_pos_amp_limit assert sig.has_min_pos_amp_limit assert not sig.is_signal_bounded() assert not sig.is_signal_fixed() - + # create signal with negative constraints only - + sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=None, - max_neg_amp_limit=10, - min_neg_amp_limit=3 - ) - - assert sig.has_max_neg_amp_limit + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=None, + max_neg_amp_limit=10, + min_neg_amp_limit=3, + ) + + assert sig.has_max_neg_amp_limit assert sig.has_min_neg_amp_limit - assert not sig.has_max_pos_amp_limit + assert not sig.has_max_pos_amp_limit assert not sig.has_min_pos_amp_limit assert not sig.is_signal_bounded() assert not sig.is_signal_fixed() - + # create amplitude constrained signal with all limits but without bounds - + sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=10, - min_pos_amp_limit=3, - max_neg_amp_limit=11, - min_neg_amp_limit=4 - ) - - assert sig.has_max_neg_amp_limit + number_samples=number_intervals, + max_pos_amp_limit=10, + min_pos_amp_limit=3, + max_neg_amp_limit=11, + min_neg_amp_limit=4, + ) + + assert sig.has_max_neg_amp_limit assert sig.has_min_neg_amp_limit - assert sig.has_max_pos_amp_limit + assert sig.has_max_pos_amp_limit assert sig.has_min_pos_amp_limit assert not sig.is_signal_bounded() assert not sig.is_signal_fixed() - + # create amplitude constrained signal with all limits and with bounds - + sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=10, - min_pos_amp_limit=3, - max_neg_amp_limit=11, + number_samples=number_intervals, + max_pos_amp_limit=10, + min_pos_amp_limit=3, + max_neg_amp_limit=11, min_neg_amp_limit=4, lower_bounds=[-7 for i in range(number_intervals)], - upper_bounds=[15 for i in range(number_intervals)] - ) - - assert sig.has_max_neg_amp_limit + upper_bounds=[15 for i in range(number_intervals)], + ) + + assert sig.has_max_neg_amp_limit assert sig.has_min_neg_amp_limit - assert sig.has_max_pos_amp_limit + assert sig.has_max_pos_amp_limit assert sig.has_min_pos_amp_limit assert sig.is_signal_bounded() assert not sig.is_signal_fixed() - + # set the signal using samples that do not violate the limits nor the bounds - + sig.set_signal([5 for i in range(number_intervals)]) - + assert not sig.violates_amplitude_limits() - + assert not sig.violates_bounds() - + # set the signal using samples that violate the positive limits and bounds - + sig.set_signal([25 for i in range(number_intervals)]) - + assert sig.violates_amplitude_limits() - + assert sig.violates_bounds() - + # set the signal using samples that violate the positive limits and bounds - + sig.set_signal([-25 for i in range(number_intervals)]) - + assert sig.violates_amplitude_limits() - + assert sig.violates_bounds() - - # test external samples that do not violate the limits - + + # test external samples that do not violate the limits + assert not sig.violates_amplitude_limits( samples=[0 for i in range(number_intervals)] - ) - + ) + # test external samples that violate the positive limits - - assert sig.violates_amplitude_limits( - samples=[15 for i in range(number_intervals)] - ) - - # test external samples that violate the negative limits - - assert sig.violates_amplitude_limits( - samples=[-15 for i in range(number_intervals)] - ) - + + assert sig.violates_amplitude_limits(samples=[15 for i in range(number_intervals)]) + + # test external samples that violate the negative limits + + assert sig.violates_amplitude_limits(samples=[-15 for i in range(number_intervals)]) + # create amplitude constrained signal without limits or bounds - + sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=None, - max_neg_amp_limit=None, - min_neg_amp_limit=None - ) - - assert not sig.has_max_neg_amp_limit + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=None, + max_neg_amp_limit=None, + min_neg_amp_limit=None, + ) + + assert not sig.has_max_neg_amp_limit assert not sig.has_min_neg_amp_limit - assert not sig.has_max_pos_amp_limit + assert not sig.has_max_pos_amp_limit assert not sig.has_min_pos_amp_limit assert not sig.is_signal_bounded() assert not sig.is_signal_fixed() - assert not sig.violates_amplitude_limits() # because it has none - - #************************************************************************** - + assert not sig.violates_amplitude_limits() # because it has none + + # ************************************************************************** + # trigger errors - + # by providing negative 'positive' amplitude limits - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=-10, - min_pos_amp_limit=3, - max_neg_amp_limit=None, - min_neg_amp_limit=None - ) + number_samples=number_intervals, + max_pos_amp_limit=-10, + min_pos_amp_limit=3, + max_neg_amp_limit=None, + min_neg_amp_limit=None, + ) except ValueError: error_was_triggered = True assert error_was_triggered - - - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=10, - min_pos_amp_limit=-3, - max_neg_amp_limit=None, - min_neg_amp_limit=None - ) + number_samples=number_intervals, + max_pos_amp_limit=10, + min_pos_amp_limit=-3, + max_neg_amp_limit=None, + min_neg_amp_limit=None, + ) except ValueError: error_was_triggered = True assert error_was_triggered - + # by providing negative 'negative' amplitude limits - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=None, - max_neg_amp_limit=-11, - min_neg_amp_limit=4 - ) + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=None, + max_neg_amp_limit=-11, + min_neg_amp_limit=4, + ) except ValueError: error_was_triggered = True assert error_was_triggered - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=None, - max_neg_amp_limit=11, - min_neg_amp_limit=-4 - ) + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=None, + max_neg_amp_limit=11, + min_neg_amp_limit=-4, + ) except ValueError: error_was_triggered = True assert error_was_triggered - + # by providing non-numeric or not None amplitude limits (e.g. tuple) - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=(10,), - min_pos_amp_limit=None, - max_neg_amp_limit=None, - min_neg_amp_limit=None - ) + number_samples=number_intervals, + max_pos_amp_limit=(10,), + min_pos_amp_limit=None, + max_neg_amp_limit=None, + min_neg_amp_limit=None, + ) except TypeError: error_was_triggered = True assert error_was_triggered - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=(3,), - max_neg_amp_limit=None, - min_neg_amp_limit=None - ) + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=(3,), + max_neg_amp_limit=None, + min_neg_amp_limit=None, + ) except TypeError: error_was_triggered = True assert error_was_triggered - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=None, - max_neg_amp_limit=(10,), - min_neg_amp_limit=None - ) + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=None, + max_neg_amp_limit=(10,), + min_neg_amp_limit=None, + ) except TypeError: error_was_triggered = True assert error_was_triggered - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=None, - max_neg_amp_limit=None, - min_neg_amp_limit=(3,) - ) + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=None, + max_neg_amp_limit=None, + min_neg_amp_limit=(3,), + ) except TypeError: error_was_triggered = True assert error_was_triggered - + # by providing bounds incompatible with positive limits - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=9, - min_pos_amp_limit=None, - max_neg_amp_limit=None, + number_samples=number_intervals, + max_pos_amp_limit=9, + min_pos_amp_limit=None, + max_neg_amp_limit=None, min_neg_amp_limit=None, upper_bounds=None, - lower_bounds=[10 for i in range(number_intervals)] - ) + lower_bounds=[10 for i in range(number_intervals)], + ) except ValueError: error_was_triggered = True assert error_was_triggered - + # by providing bounds incompatible with negative limits - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=None, - max_neg_amp_limit=6, + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=None, + max_neg_amp_limit=6, min_neg_amp_limit=None, upper_bounds=[-10 for i in range(number_intervals)], - lower_bounds=None - ) + lower_bounds=None, + ) except ValueError: error_was_triggered = True assert error_was_triggered - + # by providing incompatible maximum and minimum positive limits - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=5, - min_pos_amp_limit=10, - max_neg_amp_limit=None, - min_neg_amp_limit=None - ) + number_samples=number_intervals, + max_pos_amp_limit=5, + min_pos_amp_limit=10, + max_neg_amp_limit=None, + min_neg_amp_limit=None, + ) except ValueError: error_was_triggered = True assert error_was_triggered - + # by providing incompatible maximum and minimum negative limits - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=None, - max_neg_amp_limit=6, - min_neg_amp_limit=11 - ) + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=None, + max_neg_amp_limit=6, + min_neg_amp_limit=11, + ) except ValueError: error_was_triggered = True assert error_was_triggered - + # by providing non-numeric or not None amplitude limits (e.g. tuple) - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=10, - min_pos_amp_limit=3, - max_neg_amp_limit=None, - min_neg_amp_limit=None - ) + number_samples=number_intervals, + max_pos_amp_limit=10, + min_pos_amp_limit=3, + max_neg_amp_limit=None, + min_neg_amp_limit=None, + ) sig.set_positive_amplitude(positive_amplitude=(5,)) except TypeError: error_was_triggered = True assert error_was_triggered - + # by providing non-numeric or not None amplitude limits (e.g. tuple) - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=None, - max_neg_amp_limit=10, - min_neg_amp_limit=3 - ) + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=None, + max_neg_amp_limit=10, + min_neg_amp_limit=3, + ) sig.set_negative_amplitude(negative_amplitude=(5,)) except TypeError: error_was_triggered = True assert error_was_triggered - + # by checking if bounds have been violated without there being samples - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=10, - min_pos_amp_limit=3, - max_neg_amp_limit=None, - min_neg_amp_limit=None - ) - - assert not sig.has_max_neg_amp_limit + number_samples=number_intervals, + max_pos_amp_limit=10, + min_pos_amp_limit=3, + max_neg_amp_limit=None, + min_neg_amp_limit=None, + ) + + assert not sig.has_max_neg_amp_limit assert not sig.has_min_neg_amp_limit - assert sig.has_max_pos_amp_limit + assert sig.has_max_pos_amp_limit assert sig.has_min_pos_amp_limit assert not sig.is_signal_bounded() - assert not sig.is_signal_fixed() # signal is not set - assert not sig.violates_amplitude_limits() # since the sig is not set + assert not sig.is_signal_fixed() # signal is not set + assert not sig.violates_amplitude_limits() # since the sig is not set except ValueError: error_was_triggered = True assert error_was_triggered - + # by seeking to validate a positive amplitude when there are no positive # amplitude limits - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=10, - min_pos_amp_limit=3, - max_neg_amp_limit=None, - min_neg_amp_limit=None - ) + number_samples=number_intervals, + max_pos_amp_limit=10, + min_pos_amp_limit=3, + max_neg_amp_limit=None, + min_neg_amp_limit=None, + ) sig.validate_negative_amplitude() except ValueError: error_was_triggered = True assert error_was_triggered - + # by seeking to validate a negative amplitude when there are no negative # amplitude limits - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=None, - max_neg_amp_limit=10, - min_neg_amp_limit=3 - ) + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=None, + max_neg_amp_limit=10, + min_neg_amp_limit=3, + ) sig.validate_positive_amplitude() except ValueError: error_was_triggered = True assert error_was_triggered - + # by seeking to validate a positive amplitude that exceeds its tolerated # maximum, using the internal positive amplitude - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=10, - min_pos_amp_limit=3, - max_neg_amp_limit=None, - min_neg_amp_limit=None - ) + number_samples=number_intervals, + max_pos_amp_limit=10, + min_pos_amp_limit=3, + max_neg_amp_limit=None, + min_neg_amp_limit=None, + ) sig.set_positive_amplitude(12) sig.validate_positive_amplitude() except ValueError: error_was_triggered = True assert error_was_triggered - + # by seeking to validate a positive amplitude that exceeds its tolerated # maximum, using an externally supplied amplitude - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=10, - min_pos_amp_limit=3, - max_neg_amp_limit=None, - min_neg_amp_limit=None - ) + number_samples=number_intervals, + max_pos_amp_limit=10, + min_pos_amp_limit=3, + max_neg_amp_limit=None, + min_neg_amp_limit=None, + ) sig.validate_positive_amplitude(12) except ValueError: error_was_triggered = True assert error_was_triggered - + # by seeking to validate a positive amplitude that is below its tolerated # minimum, using the internal positive amplitude - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=10, - min_pos_amp_limit=3, - max_neg_amp_limit=None, - min_neg_amp_limit=None - ) + number_samples=number_intervals, + max_pos_amp_limit=10, + min_pos_amp_limit=3, + max_neg_amp_limit=None, + min_neg_amp_limit=None, + ) sig.set_positive_amplitude(2) sig.validate_positive_amplitude() except ValueError: error_was_triggered = True assert error_was_triggered - + # by seeking to validate a positive amplitude that is below its tolerated # minimum, using an externally supplied amplitude - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=10, - min_pos_amp_limit=3, - max_neg_amp_limit=None, - min_neg_amp_limit=None - ) + number_samples=number_intervals, + max_pos_amp_limit=10, + min_pos_amp_limit=3, + max_neg_amp_limit=None, + min_neg_amp_limit=None, + ) sig.validate_positive_amplitude(2) except ValueError: error_was_triggered = True assert error_was_triggered - - - - + # by seeking to validate a negative amplitude that exceeds its tolerated # maximum, using the internal negative amplitude - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=None, - max_neg_amp_limit=10, - min_neg_amp_limit=3 - ) + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=None, + max_neg_amp_limit=10, + min_neg_amp_limit=3, + ) sig.set_negative_amplitude(12) sig.validate_negative_amplitude() except ValueError: error_was_triggered = True assert error_was_triggered - + # by seeking to validate a negative amplitude that exceeds its tolerated # maximum, using an externally supplied amplitude - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=None, - max_neg_amp_limit=10, - min_neg_amp_limit=3 - ) + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=None, + max_neg_amp_limit=10, + min_neg_amp_limit=3, + ) sig.validate_negative_amplitude(12) except ValueError: error_was_triggered = True assert error_was_triggered - + # by seeking to validate a negative amplitude that is below its tolerated # minimum, using the internal negative amplitude - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=None, - max_neg_amp_limit=10, - min_neg_amp_limit=3 - ) + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=None, + max_neg_amp_limit=10, + min_neg_amp_limit=3, + ) sig.set_negative_amplitude(2) sig.validate_negative_amplitude() except ValueError: error_was_triggered = True assert error_was_triggered - + # by seeking to validate a negative amplitude that is below its tolerated # minimum, using an externally supplied amplitude - + error_was_triggered = False try: sig = signal.AmplitudeConstrainedSignal( - number_samples=number_intervals, - max_pos_amp_limit=None, - min_pos_amp_limit=None, - max_neg_amp_limit=10, - min_neg_amp_limit=3 - ) + number_samples=number_intervals, + max_pos_amp_limit=None, + min_pos_amp_limit=None, + max_neg_amp_limit=10, + min_neg_amp_limit=3, + ) sig.validate_negative_amplitude(2) except ValueError: error_was_triggered = True assert error_was_triggered - -#****************************************************************************** -#****************************************************************************** - + + +# ****************************************************************************** +# ****************************************************************************** + + def example_peculiar_errors(): - # number of time intervals - + number_intervals = 3 - - #************************************************************************** - + + # ************************************************************************** + # by providing samples as something other than a list, e.g. tuples - + error_was_triggered = False try: _ = signal.Signal( @@ -805,36 +800,36 @@ def example_peculiar_errors(): samples=(random.random() for i in range(number_intervals)), lower_bounds=None, upper_bounds=None, - ) + ) except TypeError: error_was_triggered = True assert error_was_triggered - + # by providing an incorrect number of samples - + error_was_triggered = False try: _ = signal.Signal( number_samples=number_intervals, - samples=[random.random() for i in range(number_intervals+1)], + samples=[random.random() for i in range(number_intervals + 1)], lower_bounds=None, upper_bounds=None, - ) + ) except ValueError: error_was_triggered = True assert error_was_triggered - - #************************************************************************** - #************************************************************************** - - # the tests below require messing with the internals - + + # ************************************************************************** + # ************************************************************************** + + # the tests below require messing with the internals + # by providing an incorrect number of lower bounds - - lower_bounds=[5 for i in range(number_intervals)] - - upper_bounds=[7 for i in range(number_intervals)] - + + lower_bounds = [5 for i in range(number_intervals)] + + upper_bounds = [7 for i in range(number_intervals)] + error_was_triggered = False try: sig = signal.Signal( @@ -842,15 +837,15 @@ def example_peculiar_errors(): samples=None, lower_bounds=lower_bounds, upper_bounds=upper_bounds, - ) - sig.lower_bounds = [random.random() for i in range(number_intervals+1)] + ) + sig.lower_bounds = [random.random() for i in range(number_intervals + 1)] sig.has_lower_bounds() except ValueError: error_was_triggered = True assert error_was_triggered - + # by providing an incorrect number of upper bounds - + error_was_triggered = False try: sig = signal.Signal( @@ -858,15 +853,15 @@ def example_peculiar_errors(): samples=None, lower_bounds=lower_bounds, upper_bounds=upper_bounds, - ) - sig.upper_bounds = [random.random() for i in range(number_intervals-1)] + ) + sig.upper_bounds = [random.random() for i in range(number_intervals - 1)] sig.has_upper_bounds() except ValueError: error_was_triggered = True assert error_was_triggered - + # by providing an incorrect number of samples - + error_was_triggered = False try: sig = signal.Signal( @@ -874,606 +869,592 @@ def example_peculiar_errors(): samples=[random.random() for i in range(number_intervals)], lower_bounds=None, upper_bounds=None, - ) - sig.samples = [random.random() for i in range(number_intervals-1)] + ) + sig.samples = [random.random() for i in range(number_intervals - 1)] sig.is_signal_fixed() except ValueError: error_was_triggered = True assert error_was_triggered - + # by deleting the lower bounds after creating the object - + error_was_triggered = False try: - sig = signal.NonNegativeRealSignal( - number_samples=number_intervals - ) + sig = signal.NonNegativeRealSignal(number_samples=number_intervals) sig.lower_bounds = None sig.is_lower_bounded = False if not sig.are_bounds_nnr(): raise ValueError() except ValueError: error_was_triggered = True - assert error_was_triggered - + assert error_was_triggered + # by providing negative upper bounds (requires even lower lower bounds) - + error_was_triggered = False try: - sig = signal.NonNegativeRealSignal( - number_samples=number_intervals - ) + sig = signal.NonNegativeRealSignal(number_samples=number_intervals) sig.is_upper_bounded = True sig.upper_bounds = [-1 for i in range(number_intervals)] if not sig.are_bounds_nnr(): raise ValueError() except ValueError: error_was_triggered = True - assert error_was_triggered - -#****************************************************************************** -#****************************************************************************** - + assert error_was_triggered + + +# ****************************************************************************** +# ****************************************************************************** + + def example_binary_signals(): - # number of time intervals - + number_intervals = 3 - - #************************************************************************** - + + # ************************************************************************** + # error-free examples - + # create a binary signal and insert non-binary integer numbers in it - - sig = signal.BinarySignal( - number_samples=number_intervals) + + sig = signal.BinarySignal(number_samples=number_intervals) assert sig.is_signal_fixed() == False - assert sig.is_signal_bounded() == True # it has upper and lower bounds + assert sig.is_signal_bounded() == True # it has upper and lower bounds + + assert sig.violates_bounds() == False # because it is not fixed + + assert sig.is_nnr() == False # because it is not fixed + + assert sig.are_bounds_nnr() == True # because it is within the range [0,1] + + sig.set_signal(samples=[3 for _ in range(number_intervals)]) - assert sig.violates_bounds() == False # because it is not fixed - - assert sig.is_nnr() == False # because it is not fixed - - assert sig.are_bounds_nnr() == True # because it is within the range [0,1] - - sig.set_signal( - samples=[3 for _ in range(number_intervals)] - ) - assert sig.is_signal_binary_only(integrality_tolerance=0.0) == False - + assert sig.is_signal_integer_only(integrality_tolerance=0.0) == True - + assert sig.is_signal_binary_only(integrality_tolerance=None) == False - + # create a binary signal and insert a noisy binary numbers in it - - sig = signal.BinarySignal( - number_samples=number_intervals) - + + sig = signal.BinarySignal(number_samples=number_intervals) + amplitude = 0.1 - - deviation = [amplitude*(random.random() - 0.5) - for _ in range(number_intervals)] - - samples = [random.randint(0, 1)+deviation[i] - for i in range(number_intervals)] - + + deviation = [amplitude * (random.random() - 0.5) for _ in range(number_intervals)] + + samples = [random.randint(0, 1) + deviation[i] for i in range(number_intervals)] + sig.set_signal(samples=samples) - + assert sig.is_signal_fixed() == True - assert sig.is_signal_bounded() == True # it has upper and lower bounds - - assert sig.violates_bounds(tolerance=amplitude) == False # it should not - - assert sig.is_nnr(tolerance=amplitude) == True # it should be - - assert sig.are_bounds_nnr() == True # because it is within the range [0,1] - - assert sig.is_signal_binary_only( - integrality_tolerance=amplitude) == True # since the tol. is the ampli. - - assert sig.is_signal_integer_only( - integrality_tolerance=amplitude) == True # since the tol. is the ampli. - - assert sig.is_signal_binary_only( - integrality_tolerance=max( - [abs(max(deviation)),abs(min(deviation))] - )*(1-0.1) - ) == False # because the tolerance was set below the maximum deviation - - assert sig.is_signal_integer_only( - integrality_tolerance=max( - [abs(max(deviation)),abs(min(deviation))] - )*(1-0.1) - ) == False # because the tolerance was set below the maximum deviation - + assert sig.is_signal_bounded() == True # it has upper and lower bounds + + assert sig.violates_bounds(tolerance=amplitude) == False # it should not + + assert sig.is_nnr(tolerance=amplitude) == True # it should be + + assert sig.are_bounds_nnr() == True # because it is within the range [0,1] + + assert ( + sig.is_signal_binary_only(integrality_tolerance=amplitude) == True + ) # since the tol. is the ampli. + + assert ( + sig.is_signal_integer_only(integrality_tolerance=amplitude) == True + ) # since the tol. is the ampli. + + assert ( + sig.is_signal_binary_only( + integrality_tolerance=max([abs(max(deviation)), abs(min(deviation))]) + * (1 - 0.1) + ) + == False + ) # because the tolerance was set below the maximum deviation + + assert ( + sig.is_signal_integer_only( + integrality_tolerance=max([abs(max(deviation)), abs(min(deviation))]) + * (1 - 0.1) + ) + == False + ) # because the tolerance was set below the maximum deviation + assert sig.is_signal_binary_only(integrality_tolerance=None) == True - + # create a binary signal - - sig = signal.BinarySignal( - number_samples=number_intervals) - - assert sig.is_signal_binary_only() == False # because it is not fixed yet - - assert sig.is_signal_integer_only() == False # because it is not fixed yet + + sig = signal.BinarySignal(number_samples=number_intervals) + + assert sig.is_signal_binary_only() == False # because it is not fixed yet + + assert sig.is_signal_integer_only() == False # because it is not fixed yet assert sig.is_signal_fixed() == False - assert sig.is_signal_bounded() == True # it has upper and lower bounds + assert sig.is_signal_bounded() == True # it has upper and lower bounds + + assert sig.violates_bounds() == False # because it is not fixed + + assert sig.is_nnr() == False # because it is not fixed + + assert sig.are_bounds_nnr() == True # because it is within the range [0,1] + + sig.set_signal(samples=[random.randint(0, 1) for _ in range(number_intervals)]) - assert sig.violates_bounds() == False # because it is not fixed - - assert sig.is_nnr() == False # because it is not fixed - - assert sig.are_bounds_nnr() == True # because it is within the range [0,1] - - sig.set_signal( - samples=[random.randint(0, 1) for _ in range(number_intervals)] - ) - assert sig.is_signal_binary_only(integrality_tolerance=0.0) == True - + assert sig.is_signal_integer_only(integrality_tolerance=0.0) == True - - #************************************************************************** - + + # ************************************************************************** + # trigger errors - + # by specifying an integrality tolerance greater than or equal to 0.5 - + error_was_triggered = False try: sig.is_signal_binary_only(integrality_tolerance=0.5) except ValueError: error_was_triggered = True - assert error_was_triggered - + assert error_was_triggered + # by specifying an integrality tolerance greater than or equal to 0.5 - + error_was_triggered = False try: sig.is_signal_integer_only(integrality_tolerance=0.5) except ValueError: error_was_triggered = True - assert error_was_triggered - + assert error_was_triggered + # by specifying an integrality tolerance as a tuple - + error_was_triggered = False try: sig.is_signal_binary_only(integrality_tolerance=(0.5,)) except TypeError: error_was_triggered = True - assert error_was_triggered - + assert error_was_triggered + # by specifying an integrality tolerance as a tuple - + error_was_triggered = False try: sig.is_signal_integer_only(integrality_tolerance=(0.5,)) except TypeError: error_was_triggered = True - assert error_was_triggered - + assert error_was_triggered + # by specifying the number of samples as a float - + error_was_triggered = False try: - sig = signal.BinarySignal( - number_samples=float(number_intervals) - ) + sig = signal.BinarySignal(number_samples=float(number_intervals)) except TypeError: error_was_triggered = True - assert error_was_triggered - -#****************************************************************************** -#****************************************************************************** + assert error_was_triggered + + +# ****************************************************************************** +# ****************************************************************************** + def example_nnr_signals(): - # number of time intervals - + number_intervals = 3 - - #************************************************************************** - + + # ************************************************************************** + # error-free examples - + # create an NNR signal - - sig = signal.NonNegativeRealSignal( - number_intervals) + + sig = signal.NonNegativeRealSignal(number_intervals) assert sig.is_signal_fixed() == False - assert sig.is_signal_bounded() == True # it has lower bounds by default + assert sig.is_signal_bounded() == True # it has lower bounds by default + + assert sig.violates_bounds() == False # because it is not fixed + + assert sig.is_nnr() == False # because it is not fixed + + assert sig.are_bounds_nnr() == True # default case - assert sig.violates_bounds() == False # because it is not fixed - - assert sig.is_nnr() == False # because it is not fixed - - assert sig.are_bounds_nnr() == True # default case - # create a create an NNR signal with more specific lower bounds - + sig = signal.NonNegativeRealSignal( - number_intervals, - lower_bounds=[1 for i in range(number_intervals)]) + number_intervals, lower_bounds=[1 for i in range(number_intervals)] + ) assert sig.is_signal_fixed() == False assert sig.is_signal_bounded() == True - assert sig.violates_bounds() == False # because it is not fixed - - assert sig.is_nnr() == False # because it is not fixed - + assert sig.violates_bounds() == False # because it is not fixed + + assert sig.is_nnr() == False # because it is not fixed + assert sig.are_bounds_nnr() == True - + # create a create an NNR signal with more specific upper bounds - + sig = signal.NonNegativeRealSignal( - number_intervals, - upper_bounds=[1 for i in range(number_intervals)]) + number_intervals, upper_bounds=[1 for i in range(number_intervals)] + ) assert sig.is_signal_fixed() == False assert sig.is_signal_bounded() == True - assert sig.violates_bounds() == False # because it is not fixed - - assert sig.is_nnr() == False # because it is not fixed - + assert sig.violates_bounds() == False # because it is not fixed + + assert sig.is_nnr() == False # because it is not fixed + assert sig.are_bounds_nnr() == True - + # create a fixed NNR signal - + sig = signal.FixedNonNegativeRealSignal( samples=[random.random() for i in range(number_intervals)] - ) + ) assert sig.is_signal_fixed() == True assert sig.is_signal_bounded() == True - assert sig.violates_bounds() == False # no, since samples are within [0,1] - - assert sig.is_nnr() == True # yes, same as above - - assert sig.are_bounds_nnr() == True # yes, same as above - + assert sig.violates_bounds() == False # no, since samples are within [0,1] + + assert sig.is_nnr() == True # yes, same as above + + assert sig.are_bounds_nnr() == True # yes, same as above + # create a fixed NNR signal with binary numbers - + sig = signal.FixedNonNegativeRealSignal( - samples=[random.randint(0,1) for i in range(number_intervals)] - ) + samples=[random.randint(0, 1) for i in range(number_intervals)] + ) assert sig.is_signal_fixed() == True assert sig.is_signal_bounded() == True - assert sig.violates_bounds() == False # no, since samples are within [0,1] - - assert sig.is_nnr() == True # yes, same as above - - assert sig.are_bounds_nnr() == True # yes, same as above - + assert sig.violates_bounds() == False # no, since samples are within [0,1] + + assert sig.is_nnr() == True # yes, same as above + + assert sig.are_bounds_nnr() == True # yes, same as above + assert sig.is_signal_integer_only() == True - - #************************************************************************** - + + # ************************************************************************** + # trigger errors - + # by providing a float as the number of intervals - + error_was_triggered = False try: - sig = signal.NonNegativeRealSignal( - number_samples=float(number_intervals) - ) + sig = signal.NonNegativeRealSignal(number_samples=float(number_intervals)) except TypeError: error_was_triggered = True - assert error_was_triggered - + assert error_was_triggered + # by providing negative lower bounds - + error_was_triggered = False try: sig = signal.NonNegativeRealSignal( number_samples=number_intervals, - lower_bounds=[-1 for i in range(number_intervals)] - ) + lower_bounds=[-1 for i in range(number_intervals)], + ) except ValueError: error_was_triggered = True - assert error_was_triggered - + assert error_was_triggered + # by providing samples that are not nnr - + samples = [random.random() for i in range(number_intervals)] - + samples[-1] = -1 - + error_was_triggered = False try: - sig = signal.FixedNonNegativeRealSignal( - samples=samples - ) + sig = signal.FixedNonNegativeRealSignal(samples=samples) except ValueError: error_was_triggered = True - assert error_was_triggered - + assert error_was_triggered + # by providing samples as tuples - + samples = (random.random() for i in range(number_intervals)) - + error_was_triggered = False try: - sig = signal.FixedNonNegativeRealSignal( - samples=samples - ) + sig = signal.FixedNonNegativeRealSignal(samples=samples) except TypeError: error_was_triggered = True - assert error_was_triggered + assert error_was_triggered + + +# ****************************************************************************** +# ****************************************************************************** + -#****************************************************************************** -#****************************************************************************** - def example_set_signal(): - # number of time intervals - + number_intervals = 3 - - #************************************************************************** - + + # ************************************************************************** + # error-free examples - + # create a free signal and set it afterwards - - sig = signal.FreeUnboundedSignal( - number_samples=number_intervals - ) - + + sig = signal.FreeUnboundedSignal(number_samples=number_intervals) + samples = [random.random() for i in range(number_intervals)] - + assert sig.is_signal_fixed() == False - + sig.set_signal(samples) - + assert sig.is_signal_fixed() == True - + # create a fixed signal and set it to something else afterwards - + sig = signal.FixedSignal( samples=[0.5 for i in range(number_intervals)], lower_bounds=[0 for i in range(number_intervals)], - upper_bounds=[1 for i in range(number_intervals)] - ) - + upper_bounds=[1 for i in range(number_intervals)], + ) + assert sig.is_signal_fixed() == True - + assert sig.violates_bounds() == False - + new_samples = [2 for i in range(number_intervals)] - + sig.set_signal(new_samples) - + assert sig.is_signal_fixed() == True - + assert sig.violates_bounds() == True - - #************************************************************************** - + + # ************************************************************************** + # trigger errors - + # by providing an integer instead of a list - + error_was_triggered = False try: sig.set_signal(samples=3) except TypeError: error_was_triggered = True - assert error_was_triggered - + assert error_was_triggered + # by providing an incorrectly sized list - + error_was_triggered = False try: - sig.set_signal(samples=[2 for i in range(number_intervals+1)]) + sig.set_signal(samples=[2 for i in range(number_intervals + 1)]) except ValueError: error_was_triggered = True - assert error_was_triggered - - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** + assert error_was_triggered + + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def example_bounded_signals(): - # number of time intervals - + number_intervals = 3 - - #************************************************************************** - + + # ************************************************************************** + # error-free examples - + # create an upper bounded signal via the main class - + sig = signal.Signal( number_samples=number_intervals, samples=None, lower_bounds=None, - upper_bounds=[10 for i in range(number_intervals)] - ) - - assert sig.is_signal_fixed() == False # because it has no samples - - assert sig.is_signal_bounded() == True # because it does - - assert sig.has_upper_bounds() == True # because it does - + upper_bounds=[10 for i in range(number_intervals)], + ) + + assert sig.is_signal_fixed() == False # because it has no samples + + assert sig.is_signal_bounded() == True # because it does + + assert sig.has_upper_bounds() == True # because it does + assert sig.has_lower_bounds() == False # because it does not - - assert sig.violates_bounds() == False # because it is not fixed - + + assert sig.violates_bounds() == False # because it is not fixed + # create a lower bounded signal via the main class - + sig = signal.Signal( number_samples=number_intervals, samples=None, lower_bounds=[10 for i in range(number_intervals)], - upper_bounds=None - ) - - assert sig.is_signal_fixed() == False # because it has no samples - - assert sig.is_signal_bounded() == True # because it is - - assert sig.has_upper_bounds() == False # because it does not - + upper_bounds=None, + ) + + assert sig.is_signal_fixed() == False # because it has no samples + + assert sig.is_signal_bounded() == True # because it is + + assert sig.has_upper_bounds() == False # because it does not + assert sig.has_lower_bounds() == True # because it does - - assert sig.violates_bounds() == False # because it is not fixed - + + assert sig.violates_bounds() == False # because it is not fixed + # create a signal with upper and lower bounds via the main class - + sig = signal.Signal( number_samples=number_intervals, samples=None, lower_bounds=[3 for i in range(number_intervals)], upper_bounds=[10 for i in range(number_intervals)], - ) - - assert sig.is_signal_fixed() == False # because it has no samples - - assert sig.is_signal_bounded() == True # because it is - - assert sig.has_upper_bounds() == True # because it does - + ) + + assert sig.is_signal_fixed() == False # because it has no samples + + assert sig.is_signal_bounded() == True # because it is + + assert sig.has_upper_bounds() == True # because it does + assert sig.has_lower_bounds() == True # because it does - - assert sig.violates_bounds() == False # because it is not fixed - + + assert sig.violates_bounds() == False # because it is not fixed + # create a fixed signal whose upper bounds are violated every time - + sig = signal.FixedSignal( samples=[11 for i in range(number_intervals)], lower_bounds=[4 for i in range(number_intervals)], upper_bounds=[10 for i in range(number_intervals)], - ) - - assert sig.is_signal_fixed() == True # because it has no samples - - assert sig.is_signal_bounded() == True # because it is - - assert sig.violates_bounds() == True # because 11 > 4 - + ) + + assert sig.is_signal_fixed() == True # because it has no samples + + assert sig.is_signal_bounded() == True # because it is + + assert sig.violates_bounds() == True # because 11 > 4 + # create a fixed signal whose lower bounds are violated every time - + sig = signal.FixedSignal( samples=[3 for i in range(number_intervals)], lower_bounds=[4 for i in range(number_intervals)], upper_bounds=[10 for i in range(number_intervals)], - ) - - assert sig.is_signal_fixed() == True # because it has no samples - - assert sig.is_signal_bounded() == True # because it does - - assert sig.violates_bounds() == True # because 3 < 4 - + ) + + assert sig.is_signal_fixed() == True # because it has no samples + + assert sig.is_signal_bounded() == True # because it does + + assert sig.violates_bounds() == True # because 3 < 4 + # create a fixed signal whose upper bounds are violated only once - + samples = [5 for i in range(number_intervals)] - + samples[-1] = 11 - + sig = signal.FixedSignal( samples=samples, lower_bounds=[4 for i in range(number_intervals)], upper_bounds=[10 for i in range(number_intervals)], - ) - - assert sig.is_signal_fixed() == True # because it has no samples - - assert sig.is_signal_bounded() == True # because it is - - assert sig.violates_bounds() == True # because 11 > 4 - + ) + + assert sig.is_signal_fixed() == True # because it has no samples + + assert sig.is_signal_bounded() == True # because it is + + assert sig.violates_bounds() == True # because 11 > 4 + # create a fixed signal whose lower bounds are violated only once - + samples = [5 for i in range(number_intervals)] - + samples[-1] = 3 - + sig = signal.FixedSignal( samples=samples, lower_bounds=[4 for i in range(number_intervals)], upper_bounds=[10 for i in range(number_intervals)], - ) - - assert sig.is_signal_fixed() == True # because it has no samples - - assert sig.is_signal_bounded() == True # because it does - - assert sig.violates_bounds() == True # because 3 < 4 - - #************************************************************************** - - # trigger errors - + ) + + assert sig.is_signal_fixed() == True # because it has no samples + + assert sig.is_signal_bounded() == True # because it does + + assert sig.violates_bounds() == True # because 3 < 4 + + # ************************************************************************** + + # trigger errors + # by providing upper bounds with an inconsistent number of samples - + error_was_triggered = False try: sig = signal.Signal( number_samples=number_intervals, samples=None, lower_bounds=None, - upper_bounds=[10 for i in range(number_intervals-1)], # one too few - ) + upper_bounds=[10 for i in range(number_intervals - 1)], # one too few + ) except ValueError: error_was_triggered = True - assert error_was_triggered - + assert error_was_triggered + # by providing lower bounds with an inconsistent number of samples - + error_was_triggered = False try: sig = signal.Signal( number_samples=number_intervals, samples=None, - lower_bounds=[3 for i in range(number_intervals+1)], # one extra + lower_bounds=[3 for i in range(number_intervals + 1)], # one extra upper_bounds=None, - ) + ) except ValueError: error_was_triggered = True assert error_was_triggered - + # by providing upper bounds not as a list but as a numeric type - + error_was_triggered = False try: sig = signal.Signal( number_samples=number_intervals, samples=None, - lower_bounds=[3 for i in range(number_intervals)], # one extra + lower_bounds=[3 for i in range(number_intervals)], # one extra upper_bounds=6, - ) + ) except TypeError: error_was_triggered = True assert error_was_triggered - + # by providing lower bounds not as a list but as a numeric type - + error_was_triggered = False try: sig = signal.Signal( @@ -1481,19 +1462,19 @@ def example_bounded_signals(): samples=None, lower_bounds=2, upper_bounds=[5 for i in range(number_intervals)], - ) + ) except TypeError: error_was_triggered = True assert error_was_triggered - + # by providing upper bounds lower than the lower bounds - - lower_bounds=[5 for i in range(number_intervals)] - - upper_bounds=[7 for i in range(number_intervals)] - + + lower_bounds = [5 for i in range(number_intervals)] + + upper_bounds = [7 for i in range(number_intervals)] + upper_bounds[-1] = 3 - + error_was_triggered = False try: sig = signal.Signal( @@ -1501,19 +1482,19 @@ def example_bounded_signals(): samples=None, lower_bounds=lower_bounds, upper_bounds=upper_bounds, - ) + ) except ValueError: error_was_triggered = True assert error_was_triggered - + # by providing lower bounds higher than the uppper bounds - - lower_bounds=[5 for i in range(number_intervals)] - - upper_bounds=[7 for i in range(number_intervals)] - + + lower_bounds = [5 for i in range(number_intervals)] + + upper_bounds = [7 for i in range(number_intervals)] + lower_bounds[-1] = 9 - + error_was_triggered = False try: sig = signal.Signal( @@ -1521,160 +1502,153 @@ def example_bounded_signals(): samples=None, lower_bounds=lower_bounds, upper_bounds=upper_bounds, - ) + ) except ValueError: error_was_triggered = True assert error_was_triggered - -#****************************************************************************** -#****************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def example_free_signals(): - # number of time intervals - + number_intervals = 3 - - #************************************************************************** - + + # ************************************************************************** + # error-free examples - + # create a free signal without bounds via the main class - + sig = signal.Signal( number_samples=number_intervals, samples=None, lower_bounds=None, - upper_bounds=None - ) - - assert sig.is_signal_fixed() == False # because it has no samples - - assert sig.is_signal_bounded() == False # because it has none - - assert sig.violates_bounds() == False # because it has none - + upper_bounds=None, + ) + + assert sig.is_signal_fixed() == False # because it has no samples + + assert sig.is_signal_bounded() == False # because it has none + + assert sig.violates_bounds() == False # because it has none + # create a free signal via a specific class - - sig = signal.FreeSignal( - number_samples=number_intervals - ) - - assert sig.is_signal_fixed() == False # because it has no samples - - assert sig.is_signal_bounded() == False # because it has none - - assert sig.violates_bounds() == False # because it has none - + + sig = signal.FreeSignal(number_samples=number_intervals) + + assert sig.is_signal_fixed() == False # because it has no samples + + assert sig.is_signal_bounded() == False # because it has none + + assert sig.violates_bounds() == False # because it has none + # create a free signal without bounds via a specific class - - sig = signal.FreeUnboundedSignal( - number_samples=number_intervals - ) - - assert sig.is_signal_fixed() == False # because it has no samples - - assert sig.is_signal_bounded() == False # because it has none - - assert sig.violates_bounds() == False # because it has none - - #************************************************************************** - - # trigger errors - + + sig = signal.FreeUnboundedSignal(number_samples=number_intervals) + + assert sig.is_signal_fixed() == False # because it has no samples + + assert sig.is_signal_bounded() == False # because it has none + + assert sig.violates_bounds() == False # because it has none + + # ************************************************************************** + + # trigger errors + # by providing a float as the number of intervals - + error_was_triggered = False try: sig = signal.Signal( number_samples=float(number_intervals), samples=None, lower_bounds=None, - upper_bounds=None - ) + upper_bounds=None, + ) except TypeError: error_was_triggered = True assert error_was_triggered - -#****************************************************************************** -#****************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def example_fixed_signals(): - # number of time intervals - + number_intervals = 3 - - #************************************************************************** - + + # ************************************************************************** + # error-free examples - + # create a fixed input made up of reals - - sig = signal.FixedSignal( - samples=[random.random() for k in range(number_intervals)] - ) - - assert sig.is_signal_fixed() == True # because it is predetermined - - assert sig.is_signal_bounded() == False # because it has none - - assert sig.violates_bounds() == False # because it has none - + + sig = signal.FixedSignal(samples=[random.random() for k in range(number_intervals)]) + + assert sig.is_signal_fixed() == True # because it is predetermined + + assert sig.is_signal_bounded() == False # because it has none + + assert sig.violates_bounds() == False # because it has none + # create a fixed signal using the main class - + sig = signal.Signal( number_samples=number_intervals, samples=[random.random() for k in range(number_intervals)], lower_bounds=None, - upper_bounds=None - ) - - assert sig.is_signal_fixed() == True # because it is predetermined - - assert sig.is_signal_bounded() == False # because it has none - - assert sig.violates_bounds() == False # because it has none - - #************************************************************************** - - # trigger errors - + upper_bounds=None, + ) + + assert sig.is_signal_fixed() == True # because it is predetermined + + assert sig.is_signal_bounded() == False # because it has none + + assert sig.violates_bounds() == False # because it has none + + # ************************************************************************** + + # trigger errors + # by providing a None when creating a FixedSignal - + error_was_triggered = False try: - sig = signal.FixedSignal( - samples=None - ) + sig = signal.FixedSignal(samples=None) except TypeError: error_was_triggered = True assert error_was_triggered - + # by providing an empty list - + error_was_triggered = False try: - sig = signal.FixedSignal( - samples=[] - ) + sig = signal.FixedSignal(samples=[]) except ValueError: error_was_triggered = True assert error_was_triggered - + # by providing the number of samples as a float - + error_was_triggered = False try: sig = signal.Signal( number_samples=float(number_intervals), samples=[random.random() for k in range(number_intervals)], lower_bounds=None, - upper_bounds=None - ) + upper_bounds=None, + ) except TypeError: error_was_triggered = True assert error_was_triggered - -#****************************************************************************** -#****************************************************************************** \ No newline at end of file + + +# ****************************************************************************** +# ****************************************************************************** diff --git a/tests/test_all.py b/tests/test_all.py index 4cae3ee..8fce4ff 100644 --- a/tests/test_all.py +++ b/tests/test_all.py @@ -3,35 +3,35 @@ import random from topupheat.pipes.single import StandardisedPipeDatabase -from topupheat.common.fluids import FluidDatabase#, Fluid +from topupheat.common.fluids import FluidDatabase # , Fluid from examples_esipp_network import examples as examples_esipp_network from examples_esipp_problem import examples as examples_esipp_problem from examples_esipp import examples as examples_esipp from examples_signal import examples as examples_signal -#****************************************************************************** -#****************************************************************************** +# ****************************************************************************** +# ****************************************************************************** # test with: !python -m pytest -s --cov --cov-report term-missing + def test_suite(): - test_examples_dynsys = True # test_examples_dynsys = False - + test_examples_esipp_network = True # test_examples_esipp_network = False - + test_examples_esipp_problem = True # test_examples_esipp_problem = False - + test_examples_esipp = True # test_examples_esipp = False - + test_examples_signal = True # test_examples_signal = False - + # test_examples_converter = True # test_examples_dynsys = True # test_examples_esipp_network = True @@ -39,158 +39,143 @@ def test_suite(): # test_examples_esipp = True # test_examples_gis = True # test_examples_signal = True - - #************************************************************************** - - solver = 'scip' - #scip_exec_path = '/usr/bin/scip' - #solver_options = {'executable': scip_exec_path} + + # ************************************************************************** + + solver = "scip" + # scip_exec_path = '/usr/bin/scip' + # solver_options = {'executable': scip_exec_path} solver_options = {} - + # solver = 'cplex' # # cplex_exec_path = '/home/pmlpm/Software/CPLEX/cplex/bin/x86-64_linux/cplex' # cplex_exec_path = '/home/pmlpm/CPLEX/cplex/bin/x86-64_linux/cplex' # #solver_options = {} # solver_options = {'executable':cplex_exec_path} - + list_solvers = [ - 'fake_solver', - 'cbc', - 'glpk', - 'scip', + "fake_solver", + "cbc", + "glpk", + "scip", #'cplex' - ] - + ] + list_solver_options = [ - None, # fake - None, # cbc - {'tee': False}, # glpk - None, # scip + None, # fake + None, # cbc + {"tee": False}, # glpk + None, # scip # cplex - #{'executable': cplex_exec_path}, - ] - - #************************************************************************** - - seed_number = random.randint(1,int(1e5)) - + # {'executable': cplex_exec_path}, + ] + + # ************************************************************************** + + seed_number = random.randint(1, int(1e5)) + # seed_number = 13501 - - print('Seed number: ' + str(seed_number)) - - #************************************************************************** - + + print("Seed number: " + str(seed_number)) + + # ************************************************************************** + # load pipe data - + singlepipedata_files = [ - 'tests/data/isoplus_singlepipes_s1.csv', - ] - + "tests/data/isoplus_singlepipes_s1.csv", + ] + singlepipedb = StandardisedPipeDatabase(source=singlepipedata_files) # twin pipe data files - + twinpipedata_files = [ - 'tests/data/isoplus_twinpipes_s1.csv', - ] - + "tests/data/isoplus_twinpipes_s1.csv", + ] + twinpipedb = StandardisedPipeDatabase(source=twinpipedata_files) - + # ************************************************************************** - + # load fluid data - + # get water properties' database - - waterdata_file = 'tests/data/incropera2006_saturated_water.csv' - water_db = FluidDatabase(fluid='fluid', - phase='l', - source=waterdata_file) - + waterdata_file = "tests/data/incropera2006_saturated_water.csv" + + water_db = FluidDatabase(fluid="fluid", phase="l", source=waterdata_file) + # # get oil properties' database - + # oildata_file = '/some_folder/incropera2006_engine_oil.csv' # oil_db = FluidDatabase(fluid='oil', - # phase='l', + # phase='l', # source=oildata_file) - + # # get air properties' database - + # airdata_file = '/some_folder/incropera2006_air_1atm.csv' - + # air_db = FluidDatabase(fluid='air', # phase='g', # source=airdata_file) - + # load osm/osmnx data - - #************************************************************************** - #************************************************************************** - + + # ************************************************************************** + # ************************************************************************** + # esipp-network - + if test_examples_esipp_network: - - print('\'esipp-network\': testing about to start...') - + print("'esipp-network': testing about to start...") + examples_esipp_network() - - print('\'esipp-network\': testing complete.') - - #************************************************************************** - + + print("'esipp-network': testing complete.") + + # ************************************************************************** + # esipp-problem - + if test_examples_esipp_problem: - - print('\'esipp-problem\': testing about to start...') - - examples_esipp_problem(solver, - solver_options, - init_aux_sets=False) - - examples_esipp_problem(solver, - solver_options, - init_aux_sets=True) - - print('\'esipp-problem\': testing complete.') - - #************************************************************************** - + print("'esipp-problem': testing about to start...") + + examples_esipp_problem(solver, solver_options, init_aux_sets=False) + + examples_esipp_problem(solver, solver_options, init_aux_sets=True) + + print("'esipp-problem': testing complete.") + + # ************************************************************************** + # esipp - + if test_examples_esipp: - - print('\'esipp\': testing about to start...') - - examples_esipp(solver, - solver_options, - seed_number, - init_aux_sets=False) - - examples_esipp(solver, - solver_options, - seed_number, - init_aux_sets=True) - - print('\'esipp-problem\': testing complete.') - - #************************************************************************** - + print("'esipp': testing about to start...") + + examples_esipp(solver, solver_options, seed_number, init_aux_sets=False) + + examples_esipp(solver, solver_options, seed_number, init_aux_sets=True) + + print("'esipp-problem': testing complete.") + + # ************************************************************************** + # signal - + if test_examples_signal: - - print('\'signal\': testing about to start...') - + print("'signal': testing about to start...") + examples_signal() - - print('\'signal\': testing: testing complete.') - - #************************************************************************** - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** \ No newline at end of file + + print("'signal': testing: testing complete.") + + # ************************************************************************** + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** diff --git a/tests/test_data_finance.py b/tests/test_data_finance.py index e1b3af3..051f3c2 100644 --- a/tests/test_data_finance.py +++ b/tests/test_data_finance.py @@ -10,185 +10,130 @@ from src.topupopt.data.finance.utils import ArcInvestments # local, external import math -#****************************************************************************** -#****************************************************************************** +# ****************************************************************************** +# ****************************************************************************** + class TestArcInvestments: - def test_object_creation(self): - discount_rate = 0.035 analysis_period = 20 - discount_rates = tuple( - discount_rate for i in range(analysis_period) - ) - cash_flows_npv = [ - 3.673079208612225, - 1.5610827143687658, - 17, - 117, - 1842 - ] - abs_cash_flows_npv = [ - 1e-1, - 1e-1, - 1e-1, - 0.5, - 1 - ] + discount_rates = tuple(discount_rate for i in range(analysis_period)) + cash_flows_npv = [3.673079208612225, 1.5610827143687658, 17, 117, 1842] + abs_cash_flows_npv = [1e-1, 1e-1, 1e-1, 0.5, 1] investments = [] - + # limited longevity operation, does not go beyond planning horizon cash_flow = 1 cash_flow_start = 1 myinv = Investment(discount_rates=discount_rates) myinv.add_operational_cash_flows( - cash_flow=cash_flow, - start_period=cash_flow_start, - longevity=4 - ) + cash_flow=cash_flow, start_period=cash_flow_start, longevity=4 + ) investments.append(myinv) - + # limited longevity operation, goes beyond planning horizon cash_flow = 1 cash_flow_start = 18 myinv = Investment(discount_rates=discount_rates) myinv.add_operational_cash_flows( - cash_flow=cash_flow, - start_period=cash_flow_start, - longevity=4 - ) + cash_flow=cash_flow, start_period=cash_flow_start, longevity=4 + ) investments.append(myinv) - + # D&V omkostninger kundeanlæg: Sengeløse Skole cash_flow = 1.2 cash_flow_start = 1 myinv = Investment(discount_rates=discount_rates) myinv.add_operational_cash_flows( - cash_flow=cash_flow, - start_period=cash_flow_start, - longevity=None - ) + cash_flow=cash_flow, start_period=cash_flow_start, longevity=None + ) investments.append(myinv) - + # D&V omkostninger kundeanlæg: Større forbrugere - cash_flow = 6/10 + cash_flow = 6 / 10 myinv = Investment(discount_rates=discount_rates) myinv.add_operational_cash_flows( - cash_flow=10*cash_flow, - start_period=1, - longevity=None - ) + cash_flow=10 * cash_flow, start_period=1, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=3*cash_flow, - start_period=2, - longevity=None - ) + cash_flow=3 * cash_flow, start_period=2, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=1*cash_flow, - start_period=3, - longevity=None - ) + cash_flow=1 * cash_flow, start_period=3, longevity=None + ) investments.append(myinv) - + # D&V omkostninger kundeanlæg: Mindre forbrugere cash_flow = 0.320 myinv = Investment(discount_rates=discount_rates) myinv.add_operational_cash_flows( - cash_flow=197*cash_flow, - start_period=1, - longevity=None - ) + cash_flow=197 * cash_flow, start_period=1, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=(233-197)*cash_flow, - start_period=2, - longevity=None - ) + cash_flow=(233 - 197) * cash_flow, start_period=2, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=(269-233)*cash_flow, - start_period=3, - longevity=None - ) + cash_flow=(269 - 233) * cash_flow, start_period=3, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=(305-269)*cash_flow, - start_period=4, - longevity=None - ) + cash_flow=(305 - 269) * cash_flow, start_period=4, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=(341-305)*cash_flow, - start_period=5, - longevity=None - ) + cash_flow=(341 - 305) * cash_flow, start_period=5, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=(377-341)*cash_flow, - start_period=6, - longevity=None - ) + cash_flow=(377 - 341) * cash_flow, start_period=6, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=(413-377)*cash_flow, - start_period=7, - longevity=None - ) + cash_flow=(413 - 377) * cash_flow, start_period=7, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=(449-413)*cash_flow, - start_period=8, - longevity=None - ) + cash_flow=(449 - 413) * cash_flow, start_period=8, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=(488-449)*cash_flow, - start_period=9, - longevity=None - ) + cash_flow=(488 - 449) * cash_flow, start_period=9, longevity=None + ) investments.append(myinv) # check - for inv, true_npv, _abs in zip( - investments, - cash_flows_npv, - abs_cash_flows_npv - ): + for inv, true_npv, _abs in zip(investments, cash_flows_npv, abs_cash_flows_npv): assert math.isclose(inv.net_present_value(), true_npv, abs_tol=_abs) # create object number_options = 5 static_loss = { - (h, q, k): 1+q*0.25+h*0.15+k*0.05 + (h, q, k): 1 + q * 0.25 + h * 0.15 + k * 0.05 for h in range(number_options) for q in range(2) for k in range(3) - } + } arc_invs = ArcInvestments( investments=investments, - name='any', - efficiency=None, + name="any", + efficiency=None, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - #minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + # minimum_cost=tuple(1+o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=False, static_loss=static_loss, - validate=True - ) + validate=True, + ) # check the costs for _npv, true_npv, _abs in zip( - arc_invs.minimum_cost, - cash_flows_npv, - abs_cash_flows_npv - ): + arc_invs.minimum_cost, cash_flows_npv, abs_cash_flows_npv + ): assert math.isclose(_npv, true_npv, abs_tol=_abs) # change something in the investments for inv in arc_invs.investments: inv.add_investment( - investment=1, - investment_period=0, - investment_longevity=10 - ) + investment=1, investment_period=0, investment_longevity=10 + ) # update the minimum costs arc_invs.update_minimum_cost() # make sure the comparison fails for _npv, true_npv, _abs in zip( - arc_invs.minimum_cost, - cash_flows_npv, - abs_cash_flows_npv - ): + arc_invs.minimum_cost, cash_flows_npv, abs_cash_flows_npv + ): error_raised = False try: assert math.isclose(_npv, true_npv, abs_tol=_abs) @@ -197,1417 +142,1285 @@ class TestArcInvestments: assert error_raised # new true values new_true_npv = [ - 4.673079208612225, - 2.561082714368766, - 18.054883962342764, - 117.50524073646935, - 1843.1804103901486 - ] + 4.673079208612225, + 2.561082714368766, + 18.054883962342764, + 117.50524073646935, + 1843.1804103901486, + ] # print([inv.net_present_value() for inv in arc_invs]) for _npv, true_npv, _abs in zip( - arc_invs.minimum_cost, - new_true_npv, - abs_cash_flows_npv - ): - assert math.isclose(_npv, true_npv, abs_tol=_abs) + arc_invs.minimum_cost, new_true_npv, abs_cash_flows_npv + ): + assert math.isclose(_npv, true_npv, abs_tol=_abs) + # ***************************************************************************** # ***************************************************************************** + class TestDataFinance: - # TODO: make sure that all methods work with variable discount rates - + # assert that the discount factors match - + def test_operational_cash_flows(self): - discount_rate = 0.035 - + analysis_period = 20 - - discount_rates = tuple( - discount_rate for i in range(analysis_period) - ) - + + discount_rates = tuple(discount_rate for i in range(analysis_period)) + # limited longevity operation, does not go beyond planning horizon - + cash_flow = 1 cash_flow_start = 1 - cash_flows_npv = 3.673079208612225 + cash_flows_npv = 3.673079208612225 myinv = Investment(discount_rates=discount_rates) myinv.add_operational_cash_flows( - cash_flow=cash_flow, - start_period=cash_flow_start, - longevity=4 - ) + cash_flow=cash_flow, start_period=cash_flow_start, longevity=4 + ) mynpv = myinv.net_present_value() assert math.isclose(mynpv, cash_flows_npv, abs_tol=1e-1) - + # limited longevity operation, goes beyond planning horizon - + cash_flow = 1 cash_flow_start = 18 cash_flows_npv = 1.5610827143687658 myinv = Investment(discount_rates=discount_rates) myinv.add_operational_cash_flows( - cash_flow=cash_flow, - start_period=cash_flow_start, - longevity=4 - ) + cash_flow=cash_flow, start_period=cash_flow_start, longevity=4 + ) mynpv = myinv.net_present_value() assert math.isclose(mynpv, cash_flows_npv, abs_tol=1e-1) - + # D&V omkostninger kundeanlæg: Sengeløse Skole - + cash_flow = 1.2 - + cash_flow_start = 1 - + cash_flows_npv = 17 - + myinv = Investment(discount_rates=discount_rates) myinv.add_operational_cash_flows( - cash_flow=cash_flow, - start_period=cash_flow_start, - longevity=None - ) + cash_flow=cash_flow, start_period=cash_flow_start, longevity=None + ) mynpv = myinv.net_present_value() - + assert math.isclose(mynpv, cash_flows_npv, abs_tol=1e-1) - + # D&V omkostninger kundeanlæg: Større forbrugere - + cash_flows_npv = 117 - cash_flow = 6/10 - + cash_flow = 6 / 10 + myinv = Investment(discount_rates=discount_rates) myinv.add_operational_cash_flows( - cash_flow=10*cash_flow, - start_period=1, - longevity=None - ) + cash_flow=10 * cash_flow, start_period=1, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=3*cash_flow, - start_period=2, - longevity=None - ) + cash_flow=3 * cash_flow, start_period=2, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=1*cash_flow, - start_period=3, - longevity=None - ) + cash_flow=1 * cash_flow, start_period=3, longevity=None + ) mynpv = myinv.net_present_value() - + assert math.isclose(mynpv, cash_flows_npv, abs_tol=0.5) - + # D&V omkostninger kundeanlæg: Mindre forbrugere - + cash_flows_npv = 1842 cash_flow = 0.320 - + myinv = Investment(discount_rates=discount_rates) myinv.add_operational_cash_flows( - cash_flow=197*cash_flow, - start_period=1, - longevity=None - ) + cash_flow=197 * cash_flow, start_period=1, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=(233-197)*cash_flow, - start_period=2, - longevity=None - ) + cash_flow=(233 - 197) * cash_flow, start_period=2, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=(269-233)*cash_flow, - start_period=3, - longevity=None - ) + cash_flow=(269 - 233) * cash_flow, start_period=3, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=(305-269)*cash_flow, - start_period=4, - longevity=None - ) + cash_flow=(305 - 269) * cash_flow, start_period=4, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=(341-305)*cash_flow, - start_period=5, - longevity=None - ) + cash_flow=(341 - 305) * cash_flow, start_period=5, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=(377-341)*cash_flow, - start_period=6, - longevity=None - ) + cash_flow=(377 - 341) * cash_flow, start_period=6, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=(413-377)*cash_flow, - start_period=7, - longevity=None - ) + cash_flow=(413 - 377) * cash_flow, start_period=7, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=(449-413)*cash_flow, - start_period=8, - longevity=None - ) + cash_flow=(449 - 413) * cash_flow, start_period=8, longevity=None + ) myinv.add_operational_cash_flows( - cash_flow=(488-449)*cash_flow, - start_period=9, - longevity=None - ) + cash_flow=(488 - 449) * cash_flow, start_period=9, longevity=None + ) mynpv = myinv.net_present_value() - + assert math.isclose(mynpv, cash_flows_npv, abs_tol=1) - + # ************************************************************************* # ************************************************************************* - + # assert that the discount factors match - + def test_discount_factors(self): - - years = [2021, - 2022, - 2023, - 2024, - 2025, - 2026, - 2027, - 2028, - 2029, - 2030, - 2031, - 2036, - 2037, - 2038, - 2039, - 2040, - 2041] - - factors = [1.000, - 0.966, - 0.934, - 0.902, - 0.871, - 0.842, - 0.814, - 0.786, - 0.759, - 0.734, - 0.709, - 0.597, - 0.577, - 0.557, - 0.538, - 0.520, - 0.503] - + years = [ + 2021, + 2022, + 2023, + 2024, + 2025, + 2026, + 2027, + 2028, + 2029, + 2030, + 2031, + 2036, + 2037, + 2038, + 2039, + 2040, + 2041, + ] + + factors = [ + 1.000, + 0.966, + 0.934, + 0.902, + 0.871, + 0.842, + 0.814, + 0.786, + 0.759, + 0.734, + 0.709, + 0.597, + 0.577, + 0.557, + 0.538, + 0.520, + 0.503, + ] + test_factors = [ - discount_factor( - [0.035 for i in range(year-years[0])] - ) - for year in years - ] - + discount_factor([0.035 for i in range(year - years[0])]) for year in years + ] + for i, factor in enumerate(factors): - assert math.isclose(factor, test_factors[i], abs_tol=0.001) - + # ********************************************************************* - + # variable discount factors - + discount_rates = [0.035, 0.05, 0.075, 0.06] discount_factors = [1.00000, 0.96618, 0.92017, 0.85598, 0.80753] - + test_discount_factors = [ - discount_factor( - discount_rates[0:t] - ) - for t in range(len(discount_rates)+1) - ] - + discount_factor(discount_rates[0:t]) for t in range(len(discount_rates) + 1) + ] + assert len(discount_factors) == len(test_discount_factors) - + for df_true, df in zip(discount_factors, test_discount_factors): - assert math.isclose(df_true, df, abs_tol=0.001) - + # ************************************************************************* # ************************************************************************* - + def test_salvage_value_sengelose_linear_depreciation(self): - # ********************************************************************* # ********************************************************************* - + # example # 1: Investering kundeanlæg, Sengeløse Skole - + commissioning_delay_after_investment = 0 - + investment_period = 1 - - investment = 180 # 1E3 DKK - - investment_longevity = 25 # if simultaneous_commissioning_investment else 25 # years - - analysis_period_span = 20 # years - + + investment = 180 # 1E3 DKK + + investment_longevity = ( + 25 # if simultaneous_commissioning_investment else 25 # years + ) + + analysis_period_span = 20 # years + discount_rate = 0.035 - + # discount rates' tuple: size does not change with first_period_is_present_time - - discount_rates = tuple( - discount_rate for i in range(analysis_period_span) - ) - + + discount_rates = tuple(discount_rate for i in range(analysis_period_span)) + # ********************************************************************* - + residual_value = salvage_value_linear_depreciation( - investment=investment, - investment_period=investment_period, - investment_longevity=investment_longevity, + investment=investment, + investment_period=investment_period, + investment_longevity=investment_longevity, analysis_period_span=analysis_period_span, - commissioning_delay_after_investment=commissioning_delay_after_investment - ) - + commissioning_delay_after_investment=commissioning_delay_after_investment, + ) + assert math.isclose(residual_value, 36, abs_tol=1) - - net_cash_flows = list( - 0 - for i in range(analysis_period_span+1) - ) + + net_cash_flows = list(0 for i in range(analysis_period_span + 1)) net_cash_flows[investment_period] = investment - net_cash_flows[analysis_period_span] = ( - -residual_value - ) - + net_cash_flows[analysis_period_span] = -residual_value + npv_inv_horizon = npv( - discount_rates=discount_rates, - net_cash_flows=net_cash_flows - ) + discount_rates=discount_rates, net_cash_flows=net_cash_flows + ) assert math.isclose(npv_inv_horizon, 155.82067163872074, abs_tol=1e-3) - + myinv = Investment(discount_rates=discount_rates) - myinv.add_investment(investment=investment, - investment_period=investment_period, - investment_longevity=investment_longevity, - commissioning_delay_after_investment=commissioning_delay_after_investment, - salvage_value_method='asd') + myinv.add_investment( + investment=investment, + investment_period=investment_period, + investment_longevity=investment_longevity, + commissioning_delay_after_investment=commissioning_delay_after_investment, + salvage_value_method="asd", + ) mynpv = myinv.net_present_value() assert math.isclose(mynpv, 155.82067163872074, abs_tol=1e-3) - + # ********************************************************************* # ********************************************************************* - + # example # 2: Forsyningsledning - + investment_period = 1 - - investment = 13000 # 1E3 DKK - - investment_longevity = 60 # years - - analysis_period_span = 20 # years - + + investment = 13000 # 1E3 DKK + + investment_longevity = 60 # years + + analysis_period_span = 20 # years + discount_rate = 0.035 - - discount_rates = tuple( - discount_rate for i in range(analysis_period_span) - ) - + + discount_rates = tuple(discount_rate for i in range(analysis_period_span)) + # ********************************************************************* - + # using mean annual asset devaluation method - + residual_value = salvage_value_linear_depreciation( - investment=investment, - investment_period=investment_period, - investment_longevity=investment_longevity, + investment=investment, + investment_period=investment_period, + investment_longevity=investment_longevity, analysis_period_span=analysis_period_span, - commissioning_delay_after_investment=commissioning_delay_after_investment - ) - + commissioning_delay_after_investment=commissioning_delay_after_investment, + ) + assert math.isclose(residual_value, 8667, abs_tol=1) - - net_cash_flows = list( - 0 - for i in range(analysis_period_span+1) - ) + + net_cash_flows = list(0 for i in range(analysis_period_span + 1)) net_cash_flows[investment_period] = investment - net_cash_flows[analysis_period_span] = ( - -residual_value - ) - + net_cash_flows[analysis_period_span] = -residual_value + npv_inv_horizon = npv( - discount_rates=discount_rates, - net_cash_flows=net_cash_flows - ) + discount_rates=discount_rates, net_cash_flows=net_cash_flows + ) assert math.isclose(npv_inv_horizon, 8204.815475022142, abs_tol=1e-3) - + myinv = Investment(discount_rates=discount_rates) - myinv.add_investment(investment=investment, - investment_period=investment_period, - investment_longevity=investment_longevity, - commissioning_delay_after_investment=commissioning_delay_after_investment, - salvage_value_method='asd') + myinv.add_investment( + investment=investment, + investment_period=investment_period, + investment_longevity=investment_longevity, + commissioning_delay_after_investment=commissioning_delay_after_investment, + salvage_value_method="asd", + ) mynpv = myinv.net_present_value() assert math.isclose(mynpv, 8204.815475022142, abs_tol=1e-3) - + # ********************************************************************* # ********************************************************************* - + # example 3: Boosterpumpeanlæg - + investment_period = 7 - - investment = 1500 # 1E3 DKK - - investment_longevity = 25 # years - - analysis_period_span = 20 # years - + + investment = 1500 # 1E3 DKK + + investment_longevity = 25 # years + + analysis_period_span = 20 # years + discount_rate = 0.035 - - discount_rates = tuple( - discount_rate for i in range(analysis_period_span) - ) - + + discount_rates = tuple(discount_rate for i in range(analysis_period_span)) + # ********************************************************************* - + residual_value = salvage_value_linear_depreciation( - investment=investment, - investment_period=investment_period, - investment_longevity=investment_longevity, + investment=investment, + investment_period=investment_period, + investment_longevity=investment_longevity, analysis_period_span=analysis_period_span, - commissioning_delay_after_investment=commissioning_delay_after_investment - ) - + commissioning_delay_after_investment=commissioning_delay_after_investment, + ) + assert math.isclose(residual_value, 660, abs_tol=1e-3) - - net_cash_flows = list( - 0 - for i in range(analysis_period_span+1) - ) + + net_cash_flows = list(0 for i in range(analysis_period_span + 1)) net_cash_flows[investment_period] = investment - net_cash_flows[analysis_period_span] = ( - -residual_value - ) - + net_cash_flows[analysis_period_span] = -residual_value + npv_inv_horizon = npv( - discount_rates=discount_rates, - net_cash_flows=net_cash_flows - ) + discount_rates=discount_rates, net_cash_flows=net_cash_flows + ) assert math.isclose(npv_inv_horizon, 847, abs_tol=0.3) - + myinv = Investment(discount_rates=discount_rates) - myinv.add_investment(investment=investment, - investment_period=investment_period, - investment_longevity=investment_longevity, - commissioning_delay_after_investment=commissioning_delay_after_investment, - salvage_value_method='asd') + myinv.add_investment( + investment=investment, + investment_period=investment_period, + investment_longevity=investment_longevity, + commissioning_delay_after_investment=commissioning_delay_after_investment, + salvage_value_method="asd", + ) mynpv = myinv.net_present_value() assert math.isclose(mynpv, 847, abs_tol=0.3) - + # ********************************************************************* # ********************************************************************* - + # example 4: Investering, Sengeløse Skole, (varmepumper) - + investment_period = 1 - - investment = 1925 # 1E3 DKK - - investment_longevity = 20 # years - - analysis_period_span = 20 # years - + + investment = 1925 # 1E3 DKK + + investment_longevity = 20 # years + + analysis_period_span = 20 # years + discount_rate = 0.035 - - discount_rates = tuple( - discount_rate for i in range(analysis_period_span) - ) - + + discount_rates = tuple(discount_rate for i in range(analysis_period_span)) + # ********************************************************************* - + residual_value = salvage_value_linear_depreciation( - investment=investment, - investment_period=investment_period, - investment_longevity=investment_longevity, + investment=investment, + investment_period=investment_period, + investment_longevity=investment_longevity, analysis_period_span=analysis_period_span, - commissioning_delay_after_investment=commissioning_delay_after_investment - ) - + commissioning_delay_after_investment=commissioning_delay_after_investment, + ) + assert math.isclose(residual_value, 0, abs_tol=1e-3) - - net_cash_flows = list( - 0 - for i in range(analysis_period_span+1) - ) + + net_cash_flows = list(0 for i in range(analysis_period_span + 1)) net_cash_flows[investment_period] = investment - net_cash_flows[analysis_period_span] = ( - -residual_value - ) - + net_cash_flows[analysis_period_span] = -residual_value + npv_inv_horizon = npv( - discount_rates=discount_rates, - net_cash_flows=net_cash_flows - ) - + discount_rates=discount_rates, net_cash_flows=net_cash_flows + ) + assert math.isclose(npv_inv_horizon, 1860, abs_tol=0.3) - + myinv = Investment(discount_rates=discount_rates) - myinv.add_investment(investment=investment, - investment_period=investment_period, - investment_longevity=investment_longevity, - commissioning_delay_after_investment=commissioning_delay_after_investment, - salvage_value_method='asd') + myinv.add_investment( + investment=investment, + investment_period=investment_period, + investment_longevity=investment_longevity, + commissioning_delay_after_investment=commissioning_delay_after_investment, + salvage_value_method="asd", + ) mynpv = myinv.net_present_value() assert math.isclose(mynpv, 1860, abs_tol=0.3) - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_scrap_value_annuity(self): - - # Source: + # Source: # Vejledning i samfundsøkonomiske analyser på energiområdet, juli 2021 # Energistyrelsen, page 19 - + investment_period = 0 - - investment = 1E6 # 1E3 DKK - - investment_longevity = 30 # years - - analysis_period_span = 20 # years - + + investment = 1e6 # 1E3 DKK + + investment_longevity = 30 # years + + analysis_period_span = 20 # years + discount_rate = 0.035 - + discount_rates = tuple( - discount_rate - for i in range(investment_longevity+investment_period) - ) - + discount_rate for i in range(investment_longevity + investment_period) + ) + # ********************************************************************* # ********************************************************************* - + # calculate the net present value with the salvage value deducted - + # annuity method - + annuity = ( - investment* - discount_rate/ - (1-(1+discount_rate)**(-investment_longevity)) - ) - + investment + * discount_rate + / (1 - (1 + discount_rate) ** (-investment_longevity)) + ) + net_cash_flows = list( - annuity - for i in range(investment_longevity+investment_period+1) - ) - - for year_index in range(investment_period+1): + annuity for i in range(investment_longevity + investment_period + 1) + ) + + for year_index in range(investment_period + 1): net_cash_flows[year_index] = 0 - + npv_inv_horizon = npv( - discount_rates=discount_rates[ - 0:analysis_period_span - ], - net_cash_flows=net_cash_flows[ - 0:analysis_period_span+1 - ] - ) - + discount_rates=discount_rates[0:analysis_period_span], + net_cash_flows=net_cash_flows[0 : analysis_period_span + 1], + ) + assert math.isclose(npv_inv_horizon, 772747.2928688908, abs_tol=1e-3) - + # ********************************************************************* - + # net present value for the whole investment - + npv_asset_long = npv( - discount_rates=discount_rates, - net_cash_flows=net_cash_flows - ) - - assert math.isclose(npv_asset_long, 1E6, abs_tol=1e-3) - + discount_rates=discount_rates, net_cash_flows=net_cash_flows + ) + + assert math.isclose(npv_asset_long, 1e6, abs_tol=1e-3) + # calculate discounted salvage value directly - + npv_salvage = present_salvage_value_annuity( - investment=investment, - investment_longevity=investment_longevity, - investment_period=investment_period, - discount_rate=discount_rate, + investment=investment, + investment_longevity=investment_longevity, + investment_period=investment_period, + discount_rate=discount_rate, analysis_period_span=analysis_period_span, - ) - - assert math.isclose(npv_salvage, npv_asset_long-npv_inv_horizon, abs_tol=1e-3) - + ) + + assert math.isclose(npv_salvage, npv_asset_long - npv_inv_horizon, abs_tol=1e-3) + # salvage value, as seen from the last period - + und_salvage_value = salvage_value_annuity( investment=investment, - discount_rate=discount_rate, - investment_longevity=investment_longevity, - investment_period=investment_period, - analysis_period_span=analysis_period_span - ) - + discount_rate=discount_rate, + investment_longevity=investment_longevity, + investment_period=investment_period, + analysis_period_span=analysis_period_span, + ) + assert math.isclose(und_salvage_value, 452184.9058419504, abs_tol=1e-3) - + # ********************************************************************* - + # use only the part of discount_rates that overlaps with the planni. period - + myinv = Investment(discount_rates=discount_rates[0:analysis_period_span]) - - myinv.add_investment(investment=investment, - investment_period=investment_period, - investment_longevity=investment_longevity) + + myinv.add_investment( + investment=investment, + investment_period=investment_period, + investment_longevity=investment_longevity, + ) mynpv = myinv.net_present_value() - + assert math.isclose(mynpv, npv_inv_horizon, abs_tol=1e-3) - - # ********************************************************************* + + # ********************************************************************* # ********************************************************************* - + # trigger ValueError - + error_triggered = False - investment_period = analysis_period_span+1 + investment_period = analysis_period_span + 1 try: npv_salvage = present_salvage_value_annuity( - investment=investment, - investment_longevity=investment_longevity, - investment_period=investment_period, - discount_rate=discount_rate, + investment=investment, + investment_longevity=investment_longevity, + investment_period=investment_period, + discount_rate=discount_rate, analysis_period_span=analysis_period_span, - ) + ) except ValueError: error_triggered = True assert error_triggered - + # ********************************************************************* - + investment = 1 - + investment_period = 0 - + investment_longevity = 4 - + analysis_period_span = 3 - + discount_rate = 0.035 - + # ********************************************************************* - + # analysis period equals longevity: no salvage value - + npv_salvage, annuity = present_salvage_value_annuity( - investment=investment, - investment_longevity=investment_longevity, - investment_period=investment_period, - discount_rate=discount_rate, - analysis_period_span=analysis_period_span+1, - return_annuity=True - ) - + investment=investment, + investment_longevity=investment_longevity, + investment_period=investment_period, + discount_rate=discount_rate, + analysis_period_span=analysis_period_span + 1, + return_annuity=True, + ) + assert npv_salvage == 0.0 assert annuity > 0 - + # ********************************************************************* - + # increased longevity - + npv_salvage, annuity2 = present_salvage_value_annuity( - investment=investment, - investment_longevity=investment_longevity+1, - investment_period=investment_period, - discount_rate=discount_rate, + investment=investment, + investment_longevity=investment_longevity + 1, + investment_period=investment_period, + discount_rate=discount_rate, analysis_period_span=analysis_period_span, - return_annuity=True - ) - + return_annuity=True, + ) + assert math.isclose(npv_salvage, 0.37948959437673335, abs_tol=1e-3) assert annuity > annuity2 - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_scrap_value_annuity_longer_longevity(self): - - # Source: + # Source: # Vejledning i samfundsøkonomiske analyser på energiområdet, juli 2021 # Energistyrelsen, page 19 - + investment_period = 0 - - investment = 1E6 # 1E3 DKK - - investment_longevity = 35 # years - - analysis_period_span = 20 # years - + + investment = 1e6 # 1E3 DKK + + investment_longevity = 35 # years + + analysis_period_span = 20 # years + discount_rate = 0.035 - + discount_rates = tuple( - discount_rate - for i in range(investment_longevity+investment_period) - ) - + discount_rate for i in range(investment_longevity + investment_period) + ) + # ********************************************************************* # ********************************************************************* - + # calculate the net present value with the salvage value deducted - + # annuity method - + annuity = ( - investment* - discount_rate/ - (1-(1+discount_rate)**(-investment_longevity)) - ) - + investment + * discount_rate + / (1 - (1 + discount_rate) ** (-investment_longevity)) + ) + net_cash_flows = list( - annuity - for i in range(investment_longevity+investment_period+1) - ) - - for year_index in range(investment_period+1): + annuity for i in range(investment_longevity + investment_period + 1) + ) + + for year_index in range(investment_period + 1): net_cash_flows[year_index] = 0 - + npv_inv_horizon = npv( - discount_rates=discount_rates[ - 0:analysis_period_span - ], - net_cash_flows=net_cash_flows[ - 0:analysis_period_span+1 - ] - ) - + discount_rates=discount_rates[0:analysis_period_span], + net_cash_flows=net_cash_flows[0 : analysis_period_span + 1], + ) + assert math.isclose(npv_inv_horizon, 710596.68, abs_tol=1e-2) - + # ********************************************************************* - + # net present value for the whole investment - + npv_asset_long = npv( - discount_rates=discount_rates, - net_cash_flows=net_cash_flows - ) - - assert math.isclose(npv_asset_long, 1E6, abs_tol=1e-3) - + discount_rates=discount_rates, net_cash_flows=net_cash_flows + ) + + assert math.isclose(npv_asset_long, 1e6, abs_tol=1e-3) + # calculate discounted salvage value directly - + npv_salvage = present_salvage_value_annuity( - investment=investment, - investment_longevity=investment_longevity, - investment_period=investment_period, - discount_rate=discount_rate, + investment=investment, + investment_longevity=investment_longevity, + investment_period=investment_period, + discount_rate=discount_rate, analysis_period_span=analysis_period_span, - ) - - assert math.isclose( - npv_salvage, npv_asset_long-npv_inv_horizon, abs_tol=1e-3 - ) - + ) + + assert math.isclose(npv_salvage, npv_asset_long - npv_inv_horizon, abs_tol=1e-3) + # salvage value, as seen from the last period - + und_salvage_value = salvage_value_annuity( investment=investment, - discount_rate=discount_rate, - investment_longevity=investment_longevity, - investment_period=investment_period, - analysis_period_span=analysis_period_span - ) - + discount_rate=discount_rate, + investment_longevity=investment_longevity, + investment_period=investment_period, + analysis_period_span=analysis_period_span, + ) + assert math.isclose(und_salvage_value, 575851.51, abs_tol=1e-3) - + # ************************************************************************* # ************************************************************************* - + def test_scrap_value_annuity_starting_later(self): - - # Source: + # Source: # Vejledning i samfundsøkonomiske analyser på energiområdet, juli 2021 # Energistyrelsen, page 19 - + investment_period = 10 - - investment = 1E6 # 1E3 DKK - - investment_longevity = 30 # years - - analysis_period_span = 20 # years - + + investment = 1e6 # 1E3 DKK + + investment_longevity = 30 # years + + analysis_period_span = 20 # years + discount_rate = 0.035 - + discount_rates = tuple( - discount_rate - for i in range(investment_longevity+investment_period) - ) - + discount_rate for i in range(investment_longevity + investment_period) + ) + # ********************************************************************* # ********************************************************************* - + # calculate the net present value with the salvage value deducted - + # annuity method - + annuity = ( - investment* - discount_rate/ - (1-(1+discount_rate)**(-investment_longevity)) - ) - + investment + * discount_rate + / (1 - (1 + discount_rate) ** (-investment_longevity)) + ) + net_cash_flows = list( - annuity - for i in range(investment_longevity+investment_period+1) - ) - - for year_index in range(investment_period+1): + annuity for i in range(investment_longevity + investment_period + 1) + ) + + for year_index in range(investment_period + 1): net_cash_flows[year_index] = 0 - + npv_inv_horizon = npv( - discount_rates=discount_rates[ - 0:analysis_period_span - ], - net_cash_flows=net_cash_flows[ - 0:analysis_period_span+1 - ] - ) - + discount_rates=discount_rates[0:analysis_period_span], + net_cash_flows=net_cash_flows[0 : analysis_period_span + 1], + ) + assert math.isclose(npv_inv_horizon, 320562.3870269, abs_tol=1e-2) - + # ********************************************************************* - + # net present value for the whole investment - + npv_asset_long = npv( - discount_rates=discount_rates, - net_cash_flows=net_cash_flows - ) - + discount_rates=discount_rates, net_cash_flows=net_cash_flows + ) + assert math.isclose(npv_asset_long, 708918.8137098, abs_tol=1e-3) - + # calculate discounted salvage value directly - + npv_salvage = present_salvage_value_annuity( - investment=investment, - investment_longevity=investment_longevity, - investment_period=investment_period, - discount_rate=discount_rate, + investment=investment, + investment_longevity=investment_longevity, + investment_period=investment_period, + discount_rate=discount_rate, analysis_period_span=analysis_period_span, - ) - - assert math.isclose( - npv_salvage, npv_asset_long-npv_inv_horizon, abs_tol=1e-3 - ) + ) + + assert math.isclose(npv_salvage, npv_asset_long - npv_inv_horizon, abs_tol=1e-3) assert math.isclose(npv_salvage, 388356.4266828, abs_tol=1e-3) - + # salvage value, as seen from the last period - + und_salvage_value = salvage_value_annuity( investment=investment, - discount_rate=discount_rate, - investment_longevity=investment_longevity, - investment_period=investment_period, - analysis_period_span=analysis_period_span - ) - + discount_rate=discount_rate, + investment_longevity=investment_longevity, + investment_period=investment_period, + analysis_period_span=analysis_period_span, + ) + assert math.isclose(und_salvage_value, 772747.2928689, abs_tol=1e-3) - + # ************************************************************************* # ************************************************************************* - + def test_scrap_value_annuity_longer_planning_period(self): - - # Source: + # Source: # Vejledning i samfundsøkonomiske analyser på energiområdet, juli 2021 # Energistyrelsen, page 19 - + investment_period = 0 - - investment = 1E6 # 1E3 DKK - - investment_longevity = 30 # years - - analysis_period_span = 23 # years - + + investment = 1e6 # 1E3 DKK + + investment_longevity = 30 # years + + analysis_period_span = 23 # years + discount_rate = 0.035 - + discount_rates = tuple( - discount_rate - for i in range(investment_longevity+investment_period) - ) - + discount_rate for i in range(investment_longevity + investment_period) + ) + # ********************************************************************* - + # calculate the net present value with the salvage value deducted - + # annuity method - + annuity = ( - investment* - discount_rate/ - (1-(1+discount_rate)**(-investment_longevity)) - ) - + investment + * discount_rate + / (1 - (1 + discount_rate) ** (-investment_longevity)) + ) + net_cash_flows = list( - annuity - for i in range(investment_longevity+investment_period+1) - ) - - for year_index in range(investment_period+1): + annuity for i in range(investment_longevity + investment_period + 1) + ) + + for year_index in range(investment_period + 1): net_cash_flows[year_index] = 0 - + npv_inv_horizon = npv( - discount_rates=discount_rates[ - 0:analysis_period_span - ], - net_cash_flows=net_cash_flows[ - 0:analysis_period_span+1 - ] - ) - + discount_rates=discount_rates[0:analysis_period_span], + net_cash_flows=net_cash_flows[0 : analysis_period_span + 1], + ) + assert math.isclose(npv_inv_horizon, 849302.517460684, abs_tol=1e-3) - + # ********************************************************************* - + # net present value for the whole investment - + npv_asset_long = npv( - discount_rates=discount_rates, - net_cash_flows=net_cash_flows - ) - - assert math.isclose(npv_asset_long, 1E6, abs_tol=1e-3) - + discount_rates=discount_rates, net_cash_flows=net_cash_flows + ) + + assert math.isclose(npv_asset_long, 1e6, abs_tol=1e-3) + # calculate discounted salvage value directly - + npv_salvage = present_salvage_value_annuity( - investment=investment, - investment_longevity=investment_longevity, - investment_period=investment_period, - discount_rate=discount_rate, + investment=investment, + investment_longevity=investment_longevity, + investment_period=investment_period, + discount_rate=discount_rate, analysis_period_span=analysis_period_span, - ) - - assert math.isclose( - npv_salvage, npv_asset_long-npv_inv_horizon, abs_tol=1e-3 - ) - + ) + + assert math.isclose(npv_salvage, npv_asset_long - npv_inv_horizon, abs_tol=1e-3) + # salvage value, as seen from the last period - + und_salvage_value = salvage_value_annuity( investment=investment, - discount_rate=discount_rate, - investment_longevity=investment_longevity, - investment_period=investment_period, - analysis_period_span=analysis_period_span - ) - + discount_rate=discount_rate, + investment_longevity=investment_longevity, + investment_period=investment_period, + analysis_period_span=analysis_period_span, + ) + assert math.isclose(und_salvage_value, 332455.89838989300, abs_tol=1e-3) - + # ************************************************************************* # ************************************************************************* - + def test_scrap_value_annuity_matching_periods(self): - - # Source: + # Source: # Vejledning i samfundsøkonomiske analyser på energiområdet, juli 2021 # Energistyrelsen, page 19 - + investment_period = 0 - - investment = 1E6 # 1E3 DKK - - investment_longevity = 20 # years - - analysis_period_span = 20 # years - + + investment = 1e6 # 1E3 DKK + + investment_longevity = 20 # years + + analysis_period_span = 20 # years + discount_rate = 0.035 - + discount_rates = tuple( - discount_rate - for i in range(investment_longevity+investment_period) - ) - + discount_rate for i in range(investment_longevity + investment_period) + ) + # ********************************************************************* - + # calculate the net present value with the salvage value deducted - + # annuity method - + annuity = ( - investment* - discount_rate/ - (1-(1+discount_rate)**(-investment_longevity)) - ) - + investment + * discount_rate + / (1 - (1 + discount_rate) ** (-investment_longevity)) + ) + net_cash_flows = list( - annuity - for i in range(investment_longevity+investment_period+1) - ) - - for year_index in range(investment_period+1): + annuity for i in range(investment_longevity + investment_period + 1) + ) + + for year_index in range(investment_period + 1): net_cash_flows[year_index] = 0 - + npv_inv_horizon = npv( - discount_rates=discount_rates[ - 0:analysis_period_span - ], - net_cash_flows=net_cash_flows[ - 0:analysis_period_span+1 - ] - ) - - assert math.isclose(npv_inv_horizon, 1E6, abs_tol=1e-3) - + discount_rates=discount_rates[0:analysis_period_span], + net_cash_flows=net_cash_flows[0 : analysis_period_span + 1], + ) + + assert math.isclose(npv_inv_horizon, 1e6, abs_tol=1e-3) + # ********************************************************************* - + # net present value for the whole investment - + npv_asset_long = npv( - discount_rates=discount_rates, - net_cash_flows=net_cash_flows - ) - - assert math.isclose(npv_asset_long, 1E6, abs_tol=1e-3) - + discount_rates=discount_rates, net_cash_flows=net_cash_flows + ) + + assert math.isclose(npv_asset_long, 1e6, abs_tol=1e-3) + # calculate discounted salvage value directly - + npv_salvage = present_salvage_value_annuity( - investment=investment, - investment_longevity=investment_longevity, - investment_period=investment_period, - discount_rate=discount_rate, + investment=investment, + investment_longevity=investment_longevity, + investment_period=investment_period, + discount_rate=discount_rate, analysis_period_span=analysis_period_span, - ) - - assert math.isclose( - npv_salvage, npv_asset_long-npv_inv_horizon, abs_tol=1e-3 - ) - + ) + + assert math.isclose(npv_salvage, npv_asset_long - npv_inv_horizon, abs_tol=1e-3) + # salvage value, as seen from the last period - + und_salvage_value = salvage_value_annuity( investment=investment, - discount_rate=discount_rate, - investment_longevity=investment_longevity, - investment_period=investment_period, - analysis_period_span=analysis_period_span - ) - + discount_rate=discount_rate, + investment_longevity=investment_longevity, + investment_period=investment_period, + analysis_period_span=analysis_period_span, + ) + assert math.isclose(und_salvage_value, 0, abs_tol=1e-3) - + # ************************************************************************* # ************************************************************************* - + def test_scrap_value_annuity_ending_before_horizon(self): - - # Source: + # Source: # Vejledning i samfundsøkonomiske analyser på energiområdet, juli 2021 # Energistyrelsen, page 19 - + investment_period = 0 - - investment = 1E6 # 1E3 DKK - - investment_longevity = 15 # years - - analysis_period_span = 20 # years - + + investment = 1e6 # 1E3 DKK + + investment_longevity = 15 # years + + analysis_period_span = 20 # years + discount_rate = 0.035 - + discount_rates = tuple( - discount_rate - for i in range(investment_longevity+investment_period) - ) - + discount_rate for i in range(investment_longevity + investment_period) + ) + # ********************************************************************* - + # calculate the net present value with the salvage value deducted - + # annuity method - + annuity = ( - investment* - discount_rate/ - (1-(1+discount_rate)**(-investment_longevity)) - ) - + investment + * discount_rate + / (1 - (1 + discount_rate) ** (-investment_longevity)) + ) + net_cash_flows = list( - annuity - for i in range(investment_longevity+investment_period+1) - ) - - for year_index in range(investment_period+1): + annuity for i in range(investment_longevity + investment_period + 1) + ) + + for year_index in range(investment_period + 1): net_cash_flows[year_index] = 0 - + npv_inv_horizon = npv( - discount_rates=discount_rates[ - 0:analysis_period_span - ], - net_cash_flows=net_cash_flows[ - 0:analysis_period_span+1 - ] - ) - - assert math.isclose(npv_inv_horizon, 1E6, abs_tol=1e-3) - + discount_rates=discount_rates[0:analysis_period_span], + net_cash_flows=net_cash_flows[0 : analysis_period_span + 1], + ) + + assert math.isclose(npv_inv_horizon, 1e6, abs_tol=1e-3) + # ********************************************************************* - + # net present value for the whole investment - + npv_asset_long = npv( - discount_rates=discount_rates, - net_cash_flows=net_cash_flows - ) - - assert math.isclose(npv_asset_long, 1E6, abs_tol=1e-3) - + discount_rates=discount_rates, net_cash_flows=net_cash_flows + ) + + assert math.isclose(npv_asset_long, 1e6, abs_tol=1e-3) + # calculate discounted salvage value directly - + npv_salvage = present_salvage_value_annuity( - investment=investment, - investment_longevity=investment_longevity, - investment_period=investment_period, - discount_rate=discount_rate, + investment=investment, + investment_longevity=investment_longevity, + investment_period=investment_period, + discount_rate=discount_rate, analysis_period_span=analysis_period_span, - ) - - assert math.isclose( - npv_salvage, npv_asset_long-npv_inv_horizon, abs_tol=1e-3 - ) - + ) + + assert math.isclose(npv_salvage, npv_asset_long - npv_inv_horizon, abs_tol=1e-3) + # salvage value, as seen from the last period - + und_salvage_value = salvage_value_annuity( investment=investment, - discount_rate=discount_rate, - investment_longevity=investment_longevity, - investment_period=investment_period, - analysis_period_span=analysis_period_span - ) - + discount_rate=discount_rate, + investment_longevity=investment_longevity, + investment_period=investment_period, + analysis_period_span=analysis_period_span, + ) + assert math.isclose(und_salvage_value, 0, abs_tol=1e-3) - + # ************************************************************************* # ************************************************************************* - + def test_scrap_value_commissioning_delay_linear_depreciation(self): - - #************************************************************************** - + # ************************************************************************** + investment = 10 - + investment_period = 0 - + investment_longevity = 4 - + analysis_period_span = 3 - + # ********************************************************************* - + # the investment still produces benefits after the evaluation phase - + residual_value = salvage_value_linear_depreciation( - investment=investment, - investment_period=investment_period, - investment_longevity=investment_longevity, + investment=investment, + investment_period=investment_period, + investment_longevity=investment_longevity, analysis_period_span=analysis_period_span, - ) - + ) + assert residual_value == 2.5 - + # the investment is delayed - + investment_period = 1 - + residual_value = salvage_value_linear_depreciation( - investment=investment, - investment_period=investment_period, - investment_longevity=investment_longevity, + investment=investment, + investment_period=investment_period, + investment_longevity=investment_longevity, analysis_period_span=analysis_period_span, - ) - + ) + assert residual_value == 5.0 - + # the investment is delayed even more - + investment_period = 2 - + residual_value = salvage_value_linear_depreciation( - investment=investment, - investment_period=investment_period, - investment_longevity=investment_longevity, + investment=investment, + investment_period=investment_period, + investment_longevity=investment_longevity, analysis_period_span=analysis_period_span, - ) - + ) + assert residual_value == 7.5 - + # the evaluation phase is longer - + investment_period = 0 - + analysis_period_span = 4 - + residual_value = salvage_value_linear_depreciation( - investment=investment, - investment_period=investment_period, - investment_longevity=investment_longevity, + investment=investment, + investment_period=investment_period, + investment_longevity=investment_longevity, analysis_period_span=analysis_period_span, - ) - - assert residual_value == 0.0 - + ) + + assert residual_value == 0.0 + # trigger ValueError: the investment takes place after the eval. phase - - investment_period = analysis_period_span+1 - + + investment_period = analysis_period_span + 1 + error_triggered = False try: residual_value = salvage_value_linear_depreciation( - investment=investment, - investment_period=investment_period, - investment_longevity=investment_longevity, - analysis_period_span=analysis_period_span - ) + investment=investment, + investment_period=investment_period, + investment_longevity=investment_longevity, + analysis_period_span=analysis_period_span, + ) except ValueError: error_triggered = True assert error_triggered - + # ************************************************************************* # ************************************************************************* - + def test_npv(self): - # ********************************************************************* - + # data - - R_t = [0, - 21579, - 4002, - 3302, - 2952, - 2952, - 2952, - 2952, - 2952, - 3198, - 0, - 0, - 0, - 16154, - 2952, - 6452, - -21930] - - n_periods = len(R_t)-1 - - R_t2 = [ncf_t*1.5 for ncf_t in R_t] - + + R_t = [ + 0, + 21579, + 4002, + 3302, + 2952, + 2952, + 2952, + 2952, + 2952, + 3198, + 0, + 0, + 0, + 16154, + 2952, + 6452, + -21930, + ] + + n_periods = len(R_t) - 1 + + R_t2 = [ncf_t * 1.5 for ncf_t in R_t] + i_t = tuple([0.035 for k in range(n_periods)]) - - i_t2 = tuple([ii_t*1.5 for ii_t in i_t]) - + + i_t2 = tuple([ii_t * 1.5 for ii_t in i_t]) + # ********************************************************************* - + # compute the NPV via the object - + my_inv = Investment(i_t, R_t) - - assert math.isclose(my_inv.net_present_value(), - 45287.96018387402, - abs_tol=0.001) - + + assert math.isclose( + my_inv.net_present_value(), 45287.96018387402, abs_tol=0.001 + ) + # compute the NPV via the object using i_t2 and R_t - + my_inv.discount_rates = i_t2 my_npv = my_inv.net_present_value() - - assert math.isclose(my_npv, - 42923.405014, - abs_tol=0.001) - + + assert math.isclose(my_npv, 42923.405014, abs_tol=0.001) + # compute the NPV via the object using i_t2 and R_t2 - + my_inv.net_cash_flows = R_t2 my_npv = my_inv.net_present_value() - - assert math.isclose(my_npv, - 64385.107522, - abs_tol=0.001) - + + assert math.isclose(my_npv, 64385.107522, abs_tol=0.001) + # compute the NPV via the _npv method using i_t and R_t2 - + my_inv.discount_rates = i_t my_inv.net_cash_flows = R_t2 my_npv = my_inv.net_present_value() - - assert math.isclose(my_npv, - 67931.940276, - abs_tol=0.001) - + + assert math.isclose(my_npv, 67931.940276, abs_tol=0.001) + # compute the NPV via the npv method using i_t and R_t - - my_npv, my_df = npv(discount_rates=i_t, - net_cash_flows=R_t, - return_discount_factors=True) - - assert math.isclose(my_npv, - 45287.960184, - abs_tol=0.001) - + + my_npv, my_df = npv( + discount_rates=i_t, net_cash_flows=R_t, return_discount_factors=True + ) + + assert math.isclose(my_npv, 45287.960184, abs_tol=0.001) + # create new object without specifying the net cash flows - + my_inv = Investment(discount_rates=i_t) - + for ncf_t in my_inv.net_cash_flows: - assert ncf_t >= 0 - + my_inv.net_cash_flows = R_t my_npv = my_inv.net_present_value() - - assert math.isclose(my_npv, - 45287.960184, - abs_tol=0.001) - + + assert math.isclose(my_npv, 45287.960184, abs_tol=0.001) + # create new object by specifying the discount rate and the analysis period - - my_inv = Investment(None, - net_cash_flows=R_t, - discount_rate=i_t[0], - analysis_period_span=len(i_t)) - + + my_inv = Investment( + None, + net_cash_flows=R_t, + discount_rate=i_t[0], + analysis_period_span=len(i_t), + ) + my_npv = my_inv.net_present_value() - - assert math.isclose(my_npv, - 45287.960184, - abs_tol=0.001) - + + assert math.isclose(my_npv, 45287.960184, abs_tol=0.001) + # ********************************************************************* - + # force errors - + # ********************************************************************* - + # TypeError('The discount rates must be provided as a tuple.') - + error_triggered = False try: my_inv = Investment(list(i_t), R_t) except TypeError: error_triggered = True assert error_triggered - + # ********************************************************************* - - # ValueError('The duration of the period under analysis must be positive.') - + + # ValueError('The duration of the period under analysis must be positive.') + error_triggered = False try: my_inv = Investment(tuple()) except ValueError: error_triggered = True - assert error_triggered - - # ********************************************************************* - + assert error_triggered + + # ********************************************************************* + # TypeError('The discount rate must be provided as a float.') - + error_triggered = False try: my_inv = Investment(None, None, 5, 10) except TypeError: error_triggered = True - assert error_triggered - - # ********************************************************************* - + assert error_triggered + + # ********************************************************************* + # ValueError('The discount rate must be in the open interval between 0 and 1.) - + error_triggered = False try: my_inv = Investment(None, None, 1.35, 10) except ValueError: error_triggered = True - assert error_triggered - - # ********************************************************************* - + assert error_triggered + + # ********************************************************************* + # TypeError('The duration of the period under consideration must be provided as an integer.') - + error_triggered = False try: my_inv = Investment(None, None, 0.35, 10.0) except TypeError: error_triggered = True - assert error_triggered - - # ********************************************************************* - - # ValueError('The duration of the period under analysis must be positive.) - + assert error_triggered + + # ********************************************************************* + + # ValueError('The duration of the period under analysis must be positive.) + error_triggered = False try: my_inv = Investment(None, None, 0.35, 0) except ValueError: error_triggered = True - assert error_triggered - + assert error_triggered + # ********************************************************************* - + # TypeError('The net cash flows must be provided as a list.') - + error_triggered = False try: my_inv = Investment(i_t, tuple(R_t)) except TypeError: error_triggered = True assert error_triggered - + # ********************************************************************* - + # trigger the error for differently-sized inputs using npv() and not _npv() - + number_errors = 0 - + try: - my_npv = npv(i_t[0:-1], R_t) - + except ValueError: - number_errors += 1 - + assert number_errors == 1 - + # ********************************************************************* - + # trigger the error for differently-sized inputs using the __init__ method - + number_errors = 0 - + try: - my_inv = Investment(i_t[0:-1], R_t) - + except ValueError: - number_errors += 1 - + assert number_errors == 1 - + # ********************************************************************* -#****************************************************************************** -#****************************************************************************** + +# ****************************************************************************** +# ****************************************************************************** diff --git a/tests/test_data_utils.py b/tests/test_data_utils.py index 4ba332a..2ddfbf0 100644 --- a/tests/test_data_utils.py +++ b/tests/test_data_utils.py @@ -1,7 +1,7 @@ # imports # standard - + import random import math @@ -12,20 +12,19 @@ from statistics import mean from src.topupopt.data.misc import utils -#****************************************************************************** -#****************************************************************************** +# ****************************************************************************** +# ****************************************************************************** + class TestDataUtils: - # ************************************************************************* # ************************************************************************* def test_profile_synching2(self): - integration_result = 10446 ratio_min_avg = 0.2 - min_to_max_ratio = ratio_min_avg/(2-ratio_min_avg) - + min_to_max_ratio = ratio_min_avg / (2 - ratio_min_avg) + avg_state = [ 2.66, 2.34, @@ -38,382 +37,344 @@ class TestDataUtils: 14.1, 10.48, 6.74, - 3.16] - + 3.16, + ] + time_interval_durations = [ - 31, # jan - 28, # fev - 31, # mar - 30, # apr - 31, # may - 30, # june - 31, # july - 31, # august - 30, # september - 31, # october - 30, # november - 31 # december - ] - + 31, # jan + 28, # fev + 31, # mar + 30, # apr + 31, # may + 30, # june + 31, # july + 31, # august + 30, # september + 31, # october + 30, # november + 31, # december + ] + # ********************************************************************* # ********************************************************************* - + # state correlates with output - + new_profile = utils.create_profile_using_time_weighted_state( - integration_result=integration_result, - avg_state=avg_state, - time_interval_durations=time_interval_durations, + integration_result=integration_result, + avg_state=avg_state, + time_interval_durations=time_interval_durations, min_to_max_ratio=min_to_max_ratio, - state_correlates_with_output=False - ) - + state_correlates_with_output=False, + ) + expected_result = [ - 1500.0513102636057, - 1436.2189684321309, - 1500.0513102636044, - 1206.6051909345115, - 896.2493366213225, - 525.7218723351705, - 283.3475134825171, - 185.83058429361876, - 270.8127439165165, - 538.2566419011698, - 861.4985340132666, - 1241.355993542566 - ] - + 1500.0513102636057, + 1436.2189684321309, + 1500.0513102636044, + 1206.6051909345115, + 896.2493366213225, + 525.7218723351705, + 283.3475134825171, + 185.83058429361876, + 270.8127439165165, + 538.2566419011698, + 861.4985340132666, + 1241.355993542566, + ] + abs_tol = 1e-3 - - assert math.isclose( - sum(new_profile), - integration_result, - abs_tol=abs_tol - ) - + + assert math.isclose(sum(new_profile), integration_result, abs_tol=abs_tol) + for sample, expected_sample in zip(new_profile, expected_result): - - assert math.isclose( - sample, - expected_sample, - abs_tol=abs_tol - ) - + assert math.isclose(sample, expected_sample, abs_tol=abs_tol) + # ********************************************************************* # ********************************************************************* - + # state does not correlate with output - + # state correlates with output - + new_profile = utils.create_profile_using_time_weighted_state( - integration_result=integration_result, - avg_state=avg_state, - time_interval_durations=time_interval_durations, + integration_result=integration_result, + avg_state=avg_state, + time_interval_durations=time_interval_durations, min_to_max_ratio=min_to_max_ratio, - state_correlates_with_output=True - ) - + state_correlates_with_output=True, + ) + expected_result = [ - 274.3377308322865, - 166.45500417060902, - 274.33773083228533, - 510.54549399699533, - 878.1397044745678, - 1191.4288125963367, - 1491.041527613374, - 1588.5584568022714, - 1446.3379410149894, - 1236.132399194721, - 855.6521509182398, - 533.0330475533233 - ] - + 274.3377308322865, + 166.45500417060902, + 274.33773083228533, + 510.54549399699533, + 878.1397044745678, + 1191.4288125963367, + 1491.041527613374, + 1588.5584568022714, + 1446.3379410149894, + 1236.132399194721, + 855.6521509182398, + 533.0330475533233, + ] + abs_tol = 1e-3 - - assert math.isclose( - sum(new_profile), - integration_result, - abs_tol=abs_tol - ) - + + assert math.isclose(sum(new_profile), integration_result, abs_tol=abs_tol) + for sample, expected_sample in zip(new_profile, expected_result): - - assert math.isclose( - sample, - expected_sample, - abs_tol=abs_tol - ) - + assert math.isclose(sample, expected_sample, abs_tol=abs_tol) + # ********************************************************************* # ********************************************************************* - + # find out the peaks of the sinusoidal profile - + pmax, pmin = utils.max_min_sinusoidal_profile( integration_result=integration_result, period=sum(time_interval_durations), time_interval_duration=mean(time_interval_durations), - min_to_max_ratio=min_to_max_ratio - ) - + min_to_max_ratio=min_to_max_ratio, + ) + expected_pmax, expected_pmin = 1558.972133279683, 182.02786672031687 - - assert math.isclose( - pmax, - expected_pmax, - abs_tol=1e-3 - ) - - assert math.isclose( - pmin, - expected_pmin, - abs_tol=1e-3 - ) - + + assert math.isclose(pmax, expected_pmax, abs_tol=1e-3) + + assert math.isclose(pmin, expected_pmin, abs_tol=1e-3) + # ********************************************************************* # ********************************************************************* - + # raise exception - + error_triggered = False time_interval_durations.pop(0) try: new_profile = utils.create_profile_using_time_weighted_state( - integration_result=integration_result, - avg_state=avg_state, - time_interval_durations=time_interval_durations, + integration_result=integration_result, + avg_state=avg_state, + time_interval_durations=time_interval_durations, min_to_max_ratio=min_to_max_ratio, - state_correlates_with_output=True - ) + state_correlates_with_output=True, + ) except ValueError: error_triggered = True assert error_triggered - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_profile_synching(self): - # synch, normal, ex1 - - profile = [1,2,3,4] - reference_profile = [2,3,4,1] + + profile = [1, 2, 3, 4] + reference_profile = [2, 3, 4, 1] synched_profile = utils.synch_profile(profile, reference_profile) - true_synched_profile = [2,3,4,1] + true_synched_profile = [2, 3, 4, 1] assert repr(synched_profile) == repr(true_synched_profile) - + # synch, normal, ex2 - - profile = [-2,-1,1,2,0] - reference_profile = [2,3,4,1,5] + + profile = [-2, -1, 1, 2, 0] + reference_profile = [2, 3, 4, 1, 5] synched_profile = utils.synch_profile(profile, reference_profile) - true_synched_profile = [-1,0,1,-2,2] + true_synched_profile = [-1, 0, 1, -2, 2] assert repr(synched_profile) == repr(true_synched_profile) - + # synch, alternative, ex1 - - profile = [1,2,3,4] - reference_profile = [2,3,4,1] + + profile = [1, 2, 3, 4] + reference_profile = [2, 3, 4, 1] synched_profile = utils.synch_profile(profile, reference_profile, synch=False) - true_synched_profile = [3,2,1,4] + true_synched_profile = [3, 2, 1, 4] assert repr(synched_profile) == repr(true_synched_profile) - - + # ************************************************************************* # ************************************************************************* - + def test_profile_generation(self): - # ********************************************************************* - + # fixed time interval durations - + number_tests = 10 - + for test_index in range(number_tests): - - integration_period = 365*24*3600 - - number_intervals = random.randint(1,8760) - - phase_shift_radians = 2*math.pi*random.random() - + integration_period = 365 * 24 * 3600 + + number_intervals = random.randint(1, 8760) + + phase_shift_radians = 2 * math.pi * random.random() + time_interval_durations = [ - round(integration_period/number_intervals) + round(integration_period / number_intervals) for i in range(number_intervals) - ] - + ] + integration_result = 100 - + min_to_max_ratio = 0.2 - + profile = utils.discrete_sinusoid_matching_integral( - integration_result, - time_interval_durations, - min_to_max_ratio, - phase_shift_radians=phase_shift_radians) - - assert math.isclose( - sum(profile), integration_result, - abs_tol=0.01 - ) - + time_interval_durations, + min_to_max_ratio, + phase_shift_radians=phase_shift_radians, + ) + + assert math.isclose(sum(profile), integration_result, abs_tol=0.01) + # ********************************************************************* - + # import matplotlib.pyplot as plt - + # # Data for plotting # x = [i for i in range(number_intervals)] # y = profile - + # fig, ax = plt.subplots() # ax.plot(x, y) - + # ax.set(xlabel='time (s)', ylabel='voltage (mV)', # title='About as simple as it gets, folks') # ax.grid() - + # #fig.savefig("test.png") # plt.show() - + # ********************************************************************* - + # variable time step durations - + number_tests = 10 - + for test_index in range(number_tests): - - number_intervals = random.randint(10,8760) - + number_intervals = random.randint(10, 8760) + time_interval_durations = [ - random.random()*3.6e3 - for i in range(number_intervals) - ] - + random.random() * 3.6e3 for i in range(number_intervals) + ] + integration_period = sum(time_interval_durations) - - phase_shift_radians = 2*math.pi*random.random() - + + phase_shift_radians = 2 * math.pi * random.random() + integration_result = 100 - + min_to_max_ratio = 0.2 - + profile = utils.discrete_sinusoid_matching_integral( - integration_result, - time_interval_durations, - min_to_max_ratio, - phase_shift_radians=phase_shift_radians) - - assert math.isclose( - sum(profile), integration_result, - abs_tol=0.01 - ) - + time_interval_durations, + min_to_max_ratio, + phase_shift_radians=phase_shift_radians, + ) + + assert math.isclose(sum(profile), integration_result, abs_tol=0.01) + # ********************************************************************* - + # # import matplotlib.pyplot as plt - + # t = [sum(time_interval_durations[0:i]) # for i in range(len(time_interval_durations)+1)] - + # # Data for plotting # x = [(t[i+1]+t[i])*0.5 # for i in range(number_intervals)] # time interval's center point # y = profile - + # fig, ax = plt.subplots() # ax.plot(x, y) - + # ax.set(xlabel='time (s)', ylabel='voltage (mV)', # title='About as simple as it gets, folks') # ax.grid() - + # #fig.savefig("test.png") # plt.show() - + # ********************************************************************* - + # use the default phase shift - - integration_period = 365*24*3600 - - number_intervals = random.randint(1,8760) - + + integration_period = 365 * 24 * 3600 + + number_intervals = random.randint(1, 8760) + time_interval_durations = [ - round(integration_period/number_intervals) + round(integration_period / number_intervals) for i in range(number_intervals) - ] - + ] + integration_result = 100 - + min_to_max_ratio = 0.2 - + profile = utils.discrete_sinusoid_matching_integral( - integration_result, - time_interval_durations, - min_to_max_ratio) - - assert math.isclose( - sum(profile), - integration_result, - abs_tol=0.01 - ) - + integration_result, time_interval_durations, min_to_max_ratio + ) + + assert math.isclose(sum(profile), integration_result, abs_tol=0.01) + # ************************************************************************* # ************************************************************************* - + def test_key_generation(self): - # generate_pseudo_unique_key - + key_list = (str(random.random()) for i in range(10)) - + new_key = utils.generate_pseudo_unique_key(key_list=key_list) - + assert new_key not in key_list - + # use an empty key list - + new_key = utils.generate_pseudo_unique_key(key_list=[]) - + assert new_key not in key_list - + # use zero iterations to force an error - + error_triggered = False try: - new_key = utils.generate_pseudo_unique_key(key_list=key_list, - max_iterations=0) + new_key = utils.generate_pseudo_unique_key( + key_list=key_list, max_iterations=0 + ) except Exception: error_triggered = True assert error_triggered - + # use a seed number to trigger more iterations - + import uuid + rand = random.Random() rand.seed(360) uuid.uuid4 = lambda: uuid.UUID(int=rand.getrandbits(128), version=4) - - key_list = ['3e225573-4e78-48c8-bb08-efbeeb795c22', - 'f6d30428-15d1-41e9-a952-0742eaaa5a31', - '8c29b906-2518-41c5-ada8-07b83508b5b8', - 'f9a72a39-1422-4a02-af97-906ce79c32a3', - 'b6941a48-10cc-465d-bf53-178bd2939bd1'] - + + key_list = [ + "3e225573-4e78-48c8-bb08-efbeeb795c22", + "f6d30428-15d1-41e9-a952-0742eaaa5a31", + "8c29b906-2518-41c5-ada8-07b83508b5b8", + "f9a72a39-1422-4a02-af97-906ce79c32a3", + "b6941a48-10cc-465d-bf53-178bd2939bd1", + ] + new_key = utils.generate_pseudo_unique_key(key_list=key_list) - + assert new_key not in key_list - - #************************************************************************** - #************************************************************************** -#****************************************************************************** -#****************************************************************************** \ No newline at end of file + # ************************************************************************** + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** diff --git a/tests/test_dhn.py b/tests/test_dhn.py index 96e4263..11a9382 100644 --- a/tests/test_dhn.py +++ b/tests/test_dhn.py @@ -23,95 +23,83 @@ from topupheat.common.fluids import FluidDatabase # ***************************************************************************** # ***************************************************************************** + class TestDistrictHeatingNetwork: - # TODO: method to check the validity of arcs - + def test_existing_pipe_trench(self): - # fluid data - waterdata_file = 'tests/data/incropera2006_saturated_water.csv' + waterdata_file = "tests/data/incropera2006_saturated_water.csv" phase = FluidDatabase.fluid_LIQUID - fluid_db = FluidDatabase( - fluid='fluid', - phase=phase, - source=waterdata_file - ) - - singlepipedata_files = ['tests/data/isoplus_singlepipes_s1.csv'] + fluid_db = FluidDatabase(fluid="fluid", phase=phase, source=waterdata_file) + + singlepipedata_files = ["tests/data/isoplus_singlepipes_s1.csv"] pipedb = StandardisedPipeDatabase(source=singlepipedata_files) pipe = StandardisedPipe( pipe_tuple=pipedb.pipe_tuples[0], - #e_eff=pipe_e_eff, - #sp=pipe_specific_price, - db=pipedb) - + # e_eff=pipe_e_eff, + # sp=pipe_specific_price, + db=pipedb, + ) + # network details - supply_temperature = 85+273.15 - return_temperature = 45+273.15 + supply_temperature = 85 + 273.15 + return_temperature = 45 + 273.15 pressure = 1e5 # trench - pipe_distance = 0.52 # m - pipe_depth = 0.66 # m + pipe_distance = 0.52 # m + pipe_depth = 0.66 # m # environmental - outdoor_temperature = 6+273.15 # K - h_gs = inf # 14.6 # W/m2K - soil_k = 1.5 # W/mK + outdoor_temperature = 6 + 273.15 # K + h_gs = inf # 14.6 # W/m2K + soil_k = 1.5 # W/mK # more information - max_specific_pressure_loss = 100 # Pa/m - + max_specific_pressure_loss = 100 # Pa/m + mytrench = trenches.SupplyReturnPipeTrench( - pipe_center_depth=pipe_depth, - pipe_center_distance=pipe_distance, - fluid_db=fluid_db, - phase=phase, - pressure=pressure, - supply_temperature=supply_temperature, - return_temperature=return_temperature, - max_specific_pressure_loss=max_specific_pressure_loss, - supply_pipe=pipe - ) - + pipe_center_depth=pipe_depth, + pipe_center_distance=pipe_distance, + fluid_db=fluid_db, + phase=phase, + pressure=pressure, + supply_temperature=supply_temperature, + return_temperature=return_temperature, + max_specific_pressure_loss=max_specific_pressure_loss, + supply_pipe=pipe, + ) + # PipeTrenchOptions myarcs = ExistingPipeTrench( - option_selected=0, - trench=mytrench, - name='hellotrench', - length=20 - ) - + option_selected=0, trench=mytrench, name="hellotrench", length=20 + ) + # add static loss scenario myarcs.set_static_losses( - scenario_key='scenario0', - ground_thermal_conductivity=soil_k, - ground_air_heat_transfer_coefficient=h_gs, - time_interval_duration=3600, - temperature_surroundings=outdoor_temperature - ) + scenario_key="scenario0", + ground_thermal_conductivity=soil_k, + ground_air_heat_transfer_coefficient=h_gs, + time_interval_duration=3600, + temperature_surroundings=outdoor_temperature, + ) # add another static loss scenario myarcs.set_static_losses( - scenario_key='scenario1', - ground_thermal_conductivity=soil_k+1, - ground_air_heat_transfer_coefficient=h_gs+1, - time_interval_duration=3600+100, - temperature_surroundings=outdoor_temperature+1 - ) + scenario_key="scenario1", + ground_thermal_conductivity=soil_k + 1, + ground_air_heat_transfer_coefficient=h_gs + 1, + time_interval_duration=3600 + 100, + temperature_surroundings=outdoor_temperature + 1, + ) number_steps = 3 myarcs.set_static_losses( - scenario_key='scenario2', - ground_thermal_conductivity=[soil_k for i in range(number_steps) - ], - ground_air_heat_transfer_coefficient=[ - h_gs for i in range(number_steps) - ], - time_interval_duration=[3600 for i in range(number_steps)], - temperature_surroundings=[ - outdoor_temperature for i in range(number_steps) - ] - ) - + scenario_key="scenario2", + ground_thermal_conductivity=[soil_k for i in range(number_steps)], + ground_air_heat_transfer_coefficient=[h_gs for i in range(number_steps)], + time_interval_duration=[3600 for i in range(number_steps)], + temperature_surroundings=[outdoor_temperature for i in range(number_steps)], + ) + # test arcs - + n = myarcs.number_options() assert myarcs.has_been_selected() assert len(myarcs.capacity) == n @@ -124,112 +112,97 @@ class TestDistrictHeatingNetwork: for (h, q, k), sl in myarcs.static_loss.items(): assert isinstance(sl, Real) assert sl >= 0 - + # redefine the capacity capacity = tuple(myarcs.capacity) - myarcs.set_capacity( - max_specific_pressure_loss=max_specific_pressure_loss+100 - ) + myarcs.set_capacity(max_specific_pressure_loss=max_specific_pressure_loss + 100) assert len(capacity) == len(myarcs.capacity) for _c1, _c2 in zip(capacity, myarcs.capacity): assert _c1 != _c2 - + # redefine the minimum costs min_cost = tuple(myarcs.minimum_cost) - myarcs.set_minimum_cost(minimum_cost=[_mc+1 for _mc in min_cost]) + myarcs.set_minimum_cost(minimum_cost=[_mc + 1 for _mc in min_cost]) assert len(min_cost) == len(myarcs.minimum_cost) for _mc1, _mc2 in zip(min_cost, myarcs.minimum_cost): - assert _mc1+1 == _mc2 - + assert _mc1 + 1 == _mc2 + # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_creating_single_arcs(self): - # fluid data - waterdata_file = 'tests/data/incropera2006_saturated_water.csv' + waterdata_file = "tests/data/incropera2006_saturated_water.csv" phase = FluidDatabase.fluid_LIQUID - fluid_db = FluidDatabase( - fluid='fluid', - phase=phase, - source=waterdata_file - ) - - singlepipedata_files = ['tests/data/isoplus_singlepipes_s1.csv'] + fluid_db = FluidDatabase(fluid="fluid", phase=phase, source=waterdata_file) + + singlepipedata_files = ["tests/data/isoplus_singlepipes_s1.csv"] pipedb = StandardisedPipeDatabase(source=singlepipedata_files) pipe = StandardisedPipe( pipe_tuple=pipedb.pipe_tuples[0], - #e_eff=pipe_e_eff, - #sp=pipe_specific_price, - db=pipedb) - + # e_eff=pipe_e_eff, + # sp=pipe_specific_price, + db=pipedb, + ) + # network details - supply_temperature = 85+273.15 - return_temperature = 45+273.15 + supply_temperature = 85 + 273.15 + return_temperature = 45 + 273.15 pressure = 1e5 # trench - pipe_distance = 0.52 # m - pipe_depth = 0.66 # m + pipe_distance = 0.52 # m + pipe_depth = 0.66 # m # environmental - outdoor_temperature = 6+273.15 # K - h_gs = inf # 14.6 # W/m2K - soil_k = 1.5 # W/mK + outdoor_temperature = 6 + 273.15 # K + h_gs = inf # 14.6 # W/m2K + soil_k = 1.5 # W/mK # more information - max_specific_pressure_loss = 100 # Pa/m - + max_specific_pressure_loss = 100 # Pa/m + mytrench = trenches.SupplyReturnPipeTrench( - pipe_center_depth=pipe_depth, - pipe_center_distance=pipe_distance, - fluid_db=fluid_db, - phase=phase, - pressure=pressure, - supply_temperature=supply_temperature, - return_temperature=return_temperature, - max_specific_pressure_loss=max_specific_pressure_loss, - supply_pipe=pipe - ) - + pipe_center_depth=pipe_depth, + pipe_center_distance=pipe_distance, + fluid_db=fluid_db, + phase=phase, + pressure=pressure, + supply_temperature=supply_temperature, + return_temperature=return_temperature, + max_specific_pressure_loss=max_specific_pressure_loss, + supply_pipe=pipe, + ) + # PipeTrenchOptions - myarcs = PipeTrenchOptions( - trench=mytrench, - name='hellotrench', - length=50 - ) - + myarcs = PipeTrenchOptions(trench=mytrench, name="hellotrench", length=50) + # add static loss scenario myarcs.set_static_losses( - scenario_key='scenario0', - ground_thermal_conductivity=soil_k, - ground_air_heat_transfer_coefficient=h_gs, - time_interval_duration=3600, - temperature_surroundings=outdoor_temperature - ) + scenario_key="scenario0", + ground_thermal_conductivity=soil_k, + ground_air_heat_transfer_coefficient=h_gs, + time_interval_duration=3600, + temperature_surroundings=outdoor_temperature, + ) # add another static loss scenario myarcs.set_static_losses( - scenario_key='scenario1', - ground_thermal_conductivity=soil_k+1, - ground_air_heat_transfer_coefficient=h_gs+1, - time_interval_duration=3600+100, - temperature_surroundings=outdoor_temperature+1 - ) + scenario_key="scenario1", + ground_thermal_conductivity=soil_k + 1, + ground_air_heat_transfer_coefficient=h_gs + 1, + time_interval_duration=3600 + 100, + temperature_surroundings=outdoor_temperature + 1, + ) number_steps = 3 myarcs.set_static_losses( - scenario_key='scenario2', - ground_thermal_conductivity=[soil_k for i in range(number_steps) - ], - ground_air_heat_transfer_coefficient=[ - h_gs for i in range(number_steps) - ], - time_interval_duration=[3600 for i in range(number_steps)], - temperature_surroundings=[ - outdoor_temperature for i in range(number_steps) - ] - ) - + scenario_key="scenario2", + ground_thermal_conductivity=[soil_k for i in range(number_steps)], + ground_air_heat_transfer_coefficient=[h_gs for i in range(number_steps)], + time_interval_duration=[3600 for i in range(number_steps)], + temperature_surroundings=[outdoor_temperature for i in range(number_steps)], + ) + # test arcs - + n = myarcs.number_options() assert not myarcs.has_been_selected() assert len(myarcs.capacity) == n @@ -242,141 +215,122 @@ class TestDistrictHeatingNetwork: for (h, q, k), sl in myarcs.static_loss.items(): assert isinstance(sl, Real) assert sl >= 0 - + # redefine the capacity capacity = tuple(myarcs.capacity) - myarcs.set_capacity( - max_specific_pressure_loss=max_specific_pressure_loss+100 - ) + myarcs.set_capacity(max_specific_pressure_loss=max_specific_pressure_loss + 100) assert len(capacity) == len(myarcs.capacity) for _c1, _c2 in zip(capacity, myarcs.capacity): assert _c1 != _c2 - + # redefine the minimum costs min_cost = tuple(myarcs.minimum_cost) - myarcs.set_minimum_cost(minimum_cost=[_mc+1 for _mc in min_cost]) + myarcs.set_minimum_cost(minimum_cost=[_mc + 1 for _mc in min_cost]) assert len(min_cost) == len(myarcs.minimum_cost) for _mc1, _mc2 in zip(min_cost, myarcs.minimum_cost): - assert _mc1+1 == _mc2 - + assert _mc1 + 1 == _mc2 + # ********************************************************************* - + # create arcs object with multiple static loss values as a first case - + # PipeTrenchOptions - myarcs = PipeTrenchOptions( - trench=mytrench, - name='hellotrench', - length=50 - ) - + myarcs = PipeTrenchOptions(trench=mytrench, name="hellotrench", length=50) + number_steps = 3 myarcs.set_static_losses( - scenario_key='scenario2', - ground_thermal_conductivity=[soil_k for i in range(number_steps)], - ground_air_heat_transfer_coefficient=[ - h_gs for i in range(number_steps) - ], - time_interval_duration=[3600 for i in range(number_steps)], - temperature_surroundings=[ - outdoor_temperature for i in range(number_steps) - ] - ) - + scenario_key="scenario2", + ground_thermal_conductivity=[soil_k for i in range(number_steps)], + ground_air_heat_transfer_coefficient=[h_gs for i in range(number_steps)], + time_interval_duration=[3600 for i in range(number_steps)], + temperature_surroundings=[outdoor_temperature for i in range(number_steps)], + ) + # ************************************************************************* # ************************************************************************* - + def test_creating_single_arcs_investment(self): - # fluid data - waterdata_file = 'tests/data/incropera2006_saturated_water.csv' + waterdata_file = "tests/data/incropera2006_saturated_water.csv" phase = FluidDatabase.fluid_LIQUID - fluid_db = FluidDatabase( - fluid='fluid', - phase=phase, - source=waterdata_file - ) - - singlepipedata_files = ['tests/data/isoplus_singlepipes_s1.csv'] + fluid_db = FluidDatabase(fluid="fluid", phase=phase, source=waterdata_file) + + singlepipedata_files = ["tests/data/isoplus_singlepipes_s1.csv"] pipedb = StandardisedPipeDatabase(source=singlepipedata_files) pipe = StandardisedPipe( pipe_tuple=pipedb.pipe_tuples[0], - #e_eff=pipe_e_eff, - #sp=pipe_specific_price, - db=pipedb) - + # e_eff=pipe_e_eff, + # sp=pipe_specific_price, + db=pipedb, + ) + # network details - supply_temperature = 85+273.15 - return_temperature = 45+273.15 + supply_temperature = 85 + 273.15 + return_temperature = 45 + 273.15 pressure = 1e5 # trench - pipe_distance = 0.52 # m - pipe_depth = 0.66 # m + pipe_distance = 0.52 # m + pipe_depth = 0.66 # m # environmental - outdoor_temperature = 6+273.15 # K - h_gs = inf # 14.6 # W/m2K - soil_k = 1.5 # W/mK + outdoor_temperature = 6 + 273.15 # K + h_gs = inf # 14.6 # W/m2K + soil_k = 1.5 # W/mK # more information - max_specific_pressure_loss = 100 # Pa/m - + max_specific_pressure_loss = 100 # Pa/m + mytrench = trenches.SupplyReturnPipeTrench( - pipe_center_depth=pipe_depth, - pipe_center_distance=pipe_distance, - fluid_db=fluid_db, - phase=phase, - pressure=pressure, - supply_temperature=supply_temperature, - return_temperature=return_temperature, - max_specific_pressure_loss=max_specific_pressure_loss, - supply_pipe=pipe - ) - + pipe_center_depth=pipe_depth, + pipe_center_distance=pipe_distance, + fluid_db=fluid_db, + phase=phase, + pressure=pressure, + supply_temperature=supply_temperature, + return_temperature=return_temperature, + max_specific_pressure_loss=max_specific_pressure_loss, + supply_pipe=pipe, + ) + # investments number_periods = 20 discount_rate = 0.035 discount_rates = tuple([discount_rate for p in range(number_periods)]) inv = Investment(discount_rates=discount_rates) - + # PipeTrenchOptions myarcs = PipeTrenchInvestments( trench=mytrench, - name='hellotrench', + name="hellotrench", length=50, - investments=(inv,), - ) - + investments=(inv,), + ) + # add static loss scenario myarcs.set_static_losses( - scenario_key='scenario0', - ground_thermal_conductivity=soil_k, - ground_air_heat_transfer_coefficient=h_gs, - time_interval_duration=3600, - temperature_surroundings=outdoor_temperature - ) + scenario_key="scenario0", + ground_thermal_conductivity=soil_k, + ground_air_heat_transfer_coefficient=h_gs, + time_interval_duration=3600, + temperature_surroundings=outdoor_temperature, + ) # add another static loss scenario myarcs.set_static_losses( - scenario_key='scenario1', - ground_thermal_conductivity=soil_k+1, - ground_air_heat_transfer_coefficient=h_gs+1, - time_interval_duration=3600+100, - temperature_surroundings=outdoor_temperature+1 - ) + scenario_key="scenario1", + ground_thermal_conductivity=soil_k + 1, + ground_air_heat_transfer_coefficient=h_gs + 1, + time_interval_duration=3600 + 100, + temperature_surroundings=outdoor_temperature + 1, + ) number_steps = 3 myarcs.set_static_losses( - scenario_key='scenario2', - ground_thermal_conductivity=[soil_k for i in range(number_steps) - ], - ground_air_heat_transfer_coefficient=[ - h_gs for i in range(number_steps) - ], - time_interval_duration=[3600 for i in range(number_steps)], - temperature_surroundings=[ - outdoor_temperature for i in range(number_steps) - ] - ) - + scenario_key="scenario2", + ground_thermal_conductivity=[soil_k for i in range(number_steps)], + ground_air_heat_transfer_coefficient=[h_gs for i in range(number_steps)], + time_interval_duration=[3600 for i in range(number_steps)], + temperature_surroundings=[outdoor_temperature for i in range(number_steps)], + ) + # test arcs - + n = myarcs.number_options() assert not myarcs.has_been_selected() assert len(myarcs.capacity) == n @@ -389,144 +343,115 @@ class TestDistrictHeatingNetwork: for (h, q, k), sl in myarcs.static_loss.items(): assert isinstance(sl, Real) assert sl >= 0 - + # redefine the capacity capacity = tuple(myarcs.capacity) - myarcs.set_capacity( - max_specific_pressure_loss=max_specific_pressure_loss+100 - ) + myarcs.set_capacity(max_specific_pressure_loss=max_specific_pressure_loss + 100) assert len(capacity) == len(myarcs.capacity) for _c1, _c2 in zip(capacity, myarcs.capacity): assert _c1 != _c2 - + # redefine the minimum costs min_cost = tuple(myarcs.minimum_cost) - myarcs.set_minimum_cost(minimum_cost=[_mc+1 for _mc in min_cost]) + myarcs.set_minimum_cost(minimum_cost=[_mc + 1 for _mc in min_cost]) assert len(min_cost) == len(myarcs.minimum_cost) for _mc1, _mc2 in zip(min_cost, myarcs.minimum_cost): - assert _mc1+1 == _mc2 - + assert _mc1 + 1 == _mc2 + # ********************************************************************* - + # create arcs object with multiple static loss values as a first case - + # PipeTrenchOptions - myarcs = PipeTrenchOptions( - trench=mytrench, - name='hellotrench', - length=50 - ) - + myarcs = PipeTrenchOptions(trench=mytrench, name="hellotrench", length=50) + number_steps = 3 myarcs.set_static_losses( - scenario_key='scenario2', - ground_thermal_conductivity=[soil_k for i in range(number_steps)], - ground_air_heat_transfer_coefficient=[ - h_gs for i in range(number_steps) - ], - time_interval_duration=[3600 for i in range(number_steps)], - temperature_surroundings=[ - outdoor_temperature for i in range(number_steps) - ] - ) - + scenario_key="scenario2", + ground_thermal_conductivity=[soil_k for i in range(number_steps)], + ground_air_heat_transfer_coefficient=[h_gs for i in range(number_steps)], + time_interval_duration=[3600 for i in range(number_steps)], + temperature_surroundings=[outdoor_temperature for i in range(number_steps)], + ) + # ************************************************************************* # ************************************************************************* - + def test_creating_multiple_arcs(self): - # fluid data - waterdata_file = 'tests/data/incropera2006_saturated_water.csv' + waterdata_file = "tests/data/incropera2006_saturated_water.csv" phase = FluidDatabase.fluid_LIQUID - fluid_db = FluidDatabase( - fluid='fluid', - phase=phase, - source=waterdata_file - ) - - singlepipedata_files = ['tests/data/isoplus_singlepipes_s1.csv'] + fluid_db = FluidDatabase(fluid="fluid", phase=phase, source=waterdata_file) + + singlepipedata_files = ["tests/data/isoplus_singlepipes_s1.csv"] pipedb = StandardisedPipeDatabase(source=singlepipedata_files) pipe = StandardisedPipe( pipe_tuple=pipedb.pipe_tuples[0], - #e_eff=pipe_e_eff, - #sp=pipe_specific_price, - db=pipedb) - + # e_eff=pipe_e_eff, + # sp=pipe_specific_price, + db=pipedb, + ) + # network details - supply_temperature = 85+273.15 - return_temperature = 45+273.15 + supply_temperature = 85 + 273.15 + return_temperature = 45 + 273.15 pressure = 1e5 # trench - pipe_distance = 0.52 # m - pipe_depth = 0.66 # m + pipe_distance = 0.52 # m + pipe_depth = 0.66 # m # environmental - outdoor_temperature = 6+273.15 # K - h_gs = inf # 14.6 # W/m2K - soil_k = 1.5 # W/mK + outdoor_temperature = 6 + 273.15 # K + h_gs = inf # 14.6 # W/m2K + soil_k = 1.5 # W/mK # more information - max_specific_pressure_loss = 100 # Pa/m + max_specific_pressure_loss = 100 # Pa/m number_options = 2 - + mytrench = trenches.SupplyReturnPipeTrench( - pipe_center_depth=[pipe_depth for i in range(number_options)], - pipe_center_distance=[ - pipe_distance for i in range(number_options) - ], - fluid_db=fluid_db, - phase=phase, - pressure=[pressure for i in range(number_options)], - supply_temperature=[ - supply_temperature for i in range(number_options) - ], - return_temperature=[ - return_temperature for i in range(number_options) - ], + pipe_center_depth=[pipe_depth for i in range(number_options)], + pipe_center_distance=[pipe_distance for i in range(number_options)], + fluid_db=fluid_db, + phase=phase, + pressure=[pressure for i in range(number_options)], + supply_temperature=[supply_temperature for i in range(number_options)], + return_temperature=[return_temperature for i in range(number_options)], max_specific_pressure_loss=[ max_specific_pressure_loss for i in range(number_options) - ], - supply_pipe=[pipe for i in range(number_options)] - ) - + ], + supply_pipe=[pipe for i in range(number_options)], + ) + # PipeTrenchOptions - myarcs = PipeTrenchOptions( - trench=mytrench, - name='hellotrench', - length=50 - ) - + myarcs = PipeTrenchOptions(trench=mytrench, name="hellotrench", length=50) + # add static loss scenario myarcs.set_static_losses( - scenario_key='scenario0', - ground_thermal_conductivity=soil_k, - ground_air_heat_transfer_coefficient=h_gs, - time_interval_duration=3600, - temperature_surroundings=outdoor_temperature - ) + scenario_key="scenario0", + ground_thermal_conductivity=soil_k, + ground_air_heat_transfer_coefficient=h_gs, + time_interval_duration=3600, + temperature_surroundings=outdoor_temperature, + ) # add another static loss scenario myarcs.set_static_losses( - scenario_key='scenario1', - ground_thermal_conductivity=soil_k+1, - ground_air_heat_transfer_coefficient=h_gs+1, - time_interval_duration=3600+100, - temperature_surroundings=outdoor_temperature+1 - ) + scenario_key="scenario1", + ground_thermal_conductivity=soil_k + 1, + ground_air_heat_transfer_coefficient=h_gs + 1, + time_interval_duration=3600 + 100, + temperature_surroundings=outdoor_temperature + 1, + ) # add static loss scenario number_steps = 3 myarcs.set_static_losses( - scenario_key='scenario2', - ground_thermal_conductivity=[soil_k for i in range(number_steps) - ], - ground_air_heat_transfer_coefficient=[ - h_gs for i in range(number_steps) - ], - time_interval_duration=[3600 for i in range(number_steps)], - temperature_surroundings=[ - outdoor_temperature for i in range(number_steps) - ] - ) - + scenario_key="scenario2", + ground_thermal_conductivity=[soil_k for i in range(number_steps)], + ground_air_heat_transfer_coefficient=[h_gs for i in range(number_steps)], + time_interval_duration=[3600 for i in range(number_steps)], + temperature_surroundings=[outdoor_temperature for i in range(number_steps)], + ) + # test arcs - + assert number_options == myarcs.number_options() assert len(myarcs.capacity) == number_options assert len(myarcs.minimum_cost) == number_options @@ -538,62 +463,54 @@ class TestDistrictHeatingNetwork: for (h, q, k), sl in myarcs.static_loss.items(): assert isinstance(sl, Real) assert sl >= 0 - + # redefine the capacity capacity = tuple(myarcs.capacity) myarcs.set_capacity( max_specific_pressure_loss=[ - max_specific_pressure_loss+100 for i in range(number_options) - ] - ) + max_specific_pressure_loss + 100 for i in range(number_options) + ] + ) assert len(capacity) == len(myarcs.capacity) for _c1, _c2 in zip(capacity, myarcs.capacity): assert _c1 != _c2 - + # redefine the minimum costs min_cost = tuple(myarcs.minimum_cost) - myarcs.set_minimum_cost(minimum_cost=[_mc+1 for _mc in min_cost]) + myarcs.set_minimum_cost(minimum_cost=[_mc + 1 for _mc in min_cost]) assert len(min_cost) == len(myarcs.minimum_cost) for _mc1, _mc2 in zip(min_cost, myarcs.minimum_cost): - assert _mc1+1 == _mc2 - + assert _mc1 + 1 == _mc2 + # try redefining the capacity with a single input (non-list, non-tuple) error_raised = False try: myarcs.set_capacity( - max_specific_pressure_loss=max_specific_pressure_loss+100 - ) + max_specific_pressure_loss=max_specific_pressure_loss + 100 + ) except TypeError: # vector mode and only one max specific pressure loss value was provided error_raised = True assert error_raised - + # ********************************************************************* - + # PipeTrenchOptions - myarcs = PipeTrenchOptions( - trench=mytrench, - name='hellotrench', - length=50 - ) - + myarcs = PipeTrenchOptions(trench=mytrench, name="hellotrench", length=50) + number_steps = 3 myarcs.set_static_losses( - scenario_key='scenario2', - ground_thermal_conductivity=[soil_k for i in range(number_steps) - ], - ground_air_heat_transfer_coefficient=[ - h_gs for i in range(number_steps) - ], - time_interval_duration=[3600 for i in range(number_steps)], - temperature_surroundings=[ - outdoor_temperature for i in range(number_steps) - ] - ) - + scenario_key="scenario2", + ground_thermal_conductivity=[soil_k for i in range(number_steps)], + ground_air_heat_transfer_coefficient=[h_gs for i in range(number_steps)], + time_interval_duration=[3600 for i in range(number_steps)], + temperature_surroundings=[outdoor_temperature for i in range(number_steps)], + ) + # ************************************************************************* # ************************************************************************* - + + # ***************************************************************************** # ***************************************************************************** @@ -602,15 +519,15 @@ class TestDistrictHeatingNetwork: # def example_pipe_trench_objects(fluid_db, # single_pipe_db, # twin_pipe_db): - + # #************************************************************************** # #************************************************************************** - + # # water pipes - -# list_single_pipe_tuples = [pipe_tuple + +# list_single_pipe_tuples = [pipe_tuple # for pipe_tuple in single_pipe_db.pipe_tuples] - + # list_twin_pipe_tuples = [pipe_tuple # for pipe_tuple in twin_pipe_db.pipe_tuples] @@ -619,30 +536,30 @@ class TestDistrictHeatingNetwork: # db=single_pipe_db) # for i, pipe_tuple in enumerate(list_single_pipe_tuples) # ] - + # list_twin_pipes = [ # StandardisedTwinPipe(pipe_tuple=pipe_tuple, # db=twin_pipe_db) # for i, pipe_tuple in enumerate(list_twin_pipe_tuples) # ] - + # #************************************************************************** - + # # what does it do? # # >> Creates a distric heating trench object with multiple options - + # # seed number - + # seed_number = 249 - + # rand.seed(seed_number) - + # # number of intervals - + # number_intervals = 3 - + # time_interval_duration = [rand.random() for k in range(number_intervals)] - + # # network # dhn_supply_temperature = 100+273.15 # K @@ -650,7 +567,7 @@ class TestDistrictHeatingNetwork: # dhn_return_temperature = 60+273.15 # K # dhn_max_specific_pressure_loss = 100 # Pa - + # # trench # trench_pipe_depth = 3 @@ -661,7 +578,7 @@ class TestDistrictHeatingNetwork: # trench_ground_surface_temperature = [ # 7.8+273.15 for i in range(number_intervals)] # K - + # trench_ground_air_heat_transfer_coefficient = 14.6 # W/m2 # # pipe details @@ -669,27 +586,27 @@ class TestDistrictHeatingNetwork: # pipe_length = 1000 # pipe_relative_roughness = 1e-3 - + # #************************************************************************** # #************************************************************************** - + # # single pipe trenches - + # trench_tech = trenches.SupplyReturnPipeTrenchWithIdenticalPipes( # pipes=list_single_pipes, -# fluid_database=fluid_db, -# ground_thermal_conductivity=trench_ground_thermal_conductivity, -# ground_air_heat_transfer_coefficient=trench_ground_air_heat_transfer_coefficient, -# pipe_center_depth=trench_pipe_depth, -# pipe_center_distance=trench_pipe_distance, -# supply_temperature=dhn_supply_temperature, -# return_temperature=dhn_return_temperature, -# max_specific_pressure_loss=dhn_max_specific_pressure_loss, -# time_interval_duration=time_interval_duration, -# surroundings_temperature=trench_ground_surface_temperature) - +# fluid_database=fluid_db, +# ground_thermal_conductivity=trench_ground_thermal_conductivity, +# ground_air_heat_transfer_coefficient=trench_ground_air_heat_transfer_coefficient, +# pipe_center_depth=trench_pipe_depth, +# pipe_center_distance=trench_pipe_distance, +# supply_temperature=dhn_supply_temperature, +# return_temperature=dhn_return_temperature, +# max_specific_pressure_loss=dhn_max_specific_pressure_loss, +# time_interval_duration=time_interval_duration, +# surroundings_temperature=trench_ground_surface_temperature) + # # single pipe, no external cost, no offset - + # pipe_trench_obj = PipeTrench(name='hello', # trenches={0: trench_tech}, # length=pipe_length, @@ -698,11 +615,11 @@ class TestDistrictHeatingNetwork: # minimum_cost=None, # minimum_cost_offset=None, # validate=True) - + # original_min_cost = tuple(pipe_trench_obj.minimum_cost) - + # # single pipe, no external cost, offset - + # pipe_trench_obj = PipeTrench(name='hello', # trenches={0: trench_tech}, # length=pipe_length, @@ -714,16 +631,16 @@ class TestDistrictHeatingNetwork: # for pipe in list_single_pipes # ), # validate=True) - -# for orig_cost, new_cost in zip(original_min_cost, + +# for orig_cost, new_cost in zip(original_min_cost, # pipe_trench_obj.minimum_cost): - + # assert orig_cost <= new_cost - + # # single pipe, external cost, no offset - + # external_cost = tuple(0.2+min_cost for min_cost in original_min_cost) - + # pipe_trench_obj = PipeTrench(name='hello', # trenches={0: trench_tech}, # length=pipe_length, @@ -732,11 +649,11 @@ class TestDistrictHeatingNetwork: # minimum_cost=external_cost, # minimum_cost_offset=None, # validate=True) - + # assert external_cost == pipe_trench_obj.minimum_cost - + # # single pipe, external cost, offset - + # error_triggered = False # try: # pipe_trench_obj = PipeTrench(name='hello', @@ -750,9 +667,9 @@ class TestDistrictHeatingNetwork: # except TypeError: # error_triggered = True # assert error_triggered - + # # use list as minimum cost offset - + # error_triggered = False # try: # pipe_trench_obj = PipeTrench(name='hello', @@ -769,27 +686,27 @@ class TestDistrictHeatingNetwork: # except TypeError: # error_triggered = True # assert error_triggered - + # #************************************************************************** # #************************************************************************** - + # # twin pipe trenches - + # trench_tech = trenches.SupplyReturnPipeTrenchWithIdenticalPipes( # pipes=list_twin_pipes, -# fluid_database=fluid_db, -# ground_thermal_conductivity=trench_ground_thermal_conductivity, -# ground_air_heat_transfer_coefficient=trench_ground_air_heat_transfer_coefficient, -# pipe_center_depth=trench_pipe_depth, -# pipe_center_distance=trench_pipe_distance, -# supply_temperature=dhn_supply_temperature, -# return_temperature=dhn_return_temperature, -# max_specific_pressure_loss=dhn_max_specific_pressure_loss, -# time_interval_duration=time_interval_duration, -# surroundings_temperature=trench_ground_surface_temperature) - +# fluid_database=fluid_db, +# ground_thermal_conductivity=trench_ground_thermal_conductivity, +# ground_air_heat_transfer_coefficient=trench_ground_air_heat_transfer_coefficient, +# pipe_center_depth=trench_pipe_depth, +# pipe_center_distance=trench_pipe_distance, +# supply_temperature=dhn_supply_temperature, +# return_temperature=dhn_return_temperature, +# max_specific_pressure_loss=dhn_max_specific_pressure_loss, +# time_interval_duration=time_interval_duration, +# surroundings_temperature=trench_ground_surface_temperature) + # # single pipe, no external cost, no offset - + # pipe_trench_obj = PipeTrench(name='hello', # trenches={0: trench_tech}, # length=pipe_length, @@ -798,11 +715,11 @@ class TestDistrictHeatingNetwork: # minimum_cost=None, # minimum_cost_offset=None, # validate=True) - + # original_min_cost = tuple(pipe_trench_obj.minimum_cost) - + # # single pipe, no external cost, offset - + # pipe_trench_obj = PipeTrench(name='hello', # trenches={0: trench_tech}, # length=pipe_length, @@ -814,16 +731,16 @@ class TestDistrictHeatingNetwork: # for pipe in list_twin_pipes # ), # validate=True) - -# for orig_cost, new_cost in zip(original_min_cost, + +# for orig_cost, new_cost in zip(original_min_cost, # pipe_trench_obj.minimum_cost): - + # assert orig_cost <= new_cost - + # # single pipe, external cost, no offset - + # external_cost = tuple(0.2+min_cost for min_cost in original_min_cost) - + # pipe_trench_obj = PipeTrench(name='hello', # trenches={0: trench_tech}, # length=pipe_length, @@ -832,11 +749,11 @@ class TestDistrictHeatingNetwork: # minimum_cost=external_cost, # minimum_cost_offset=None, # validate=True) - + # assert external_cost == pipe_trench_obj.minimum_cost - + # # single pipe, external cost, offset - + # error_triggered = False # try: # pipe_trench_obj = PipeTrench(name='hello', @@ -850,9 +767,9 @@ class TestDistrictHeatingNetwork: # except TypeError: # error_triggered = True # assert error_triggered - + # # use list as minimum cost offset - + # error_triggered = False # try: # pipe_trench_obj = PipeTrench(name='hello', @@ -872,6 +789,6 @@ class TestDistrictHeatingNetwork: # #************************************************************************** # #************************************************************************** - + +# #****************************************************************************** # #****************************************************************************** -# #****************************************************************************** \ No newline at end of file diff --git a/tests/test_dhn_utils.py b/tests/test_dhn_utils.py index 147428b..0b75ff0 100644 --- a/tests/test_dhn_utils.py +++ b/tests/test_dhn_utils.py @@ -3,225 +3,189 @@ # standard from math import inf import random + # external import osmnx as ox + # internal from src.topupopt.problems.esipp.network import Network, ArcsWithoutLosses + # import src.topupopt.data.dhn.network as tuo_dhn import src.topupopt.data.dhn.utils as utils from src.topupopt.data.dhn.network import PipeTrenchOptions from topupheat.pipes.single import StandardisedPipe, StandardisedPipeDatabase import topupheat.pipes.trenches as trenches -from topupheat.common.fluids import FluidDatabase#, Fluid +from topupheat.common.fluids import FluidDatabase # , Fluid # ***************************************************************************** # ***************************************************************************** -class TestDistrictHeatingNetworkUtils: +class TestDistrictHeatingNetworkUtils: # ************************************************************************* # ************************************************************************* - + def test_cost_pipes_single_arc(self): - # fluid data - waterdata_file = 'tests/data/incropera2006_saturated_water.csv' + waterdata_file = "tests/data/incropera2006_saturated_water.csv" phase = FluidDatabase.fluid_LIQUID - fluid_db = FluidDatabase( - fluid='fluid', - phase=phase, - source=waterdata_file - ) - - singlepipedata_files = ['tests/data/isoplus_singlepipes_s1.csv'] + fluid_db = FluidDatabase(fluid="fluid", phase=phase, source=waterdata_file) + + singlepipedata_files = ["tests/data/isoplus_singlepipes_s1.csv"] pipedb = StandardisedPipeDatabase(source=singlepipedata_files) pipe = StandardisedPipe( pipe_tuple=pipedb.pipe_tuples[0], - #e_eff=pipe_e_eff, - #sp=pipe_specific_price, - db=pipedb) - + # e_eff=pipe_e_eff, + # sp=pipe_specific_price, + db=pipedb, + ) + # network details - supply_temperature = 85+273.15 - return_temperature = 45+273.15 + supply_temperature = 85 + 273.15 + return_temperature = 45 + 273.15 pressure = 1e5 # trench - pipe_distance = 0.52 # m - pipe_depth = 0.66 # m + pipe_distance = 0.52 # m + pipe_depth = 0.66 # m # environmental - outdoor_temperature = 6+273.15 # K - h_gs = inf # 14.6 # W/m2K - soil_k = 1.5 # W/mK + outdoor_temperature = 6 + 273.15 # K + h_gs = inf # 14.6 # W/m2K + soil_k = 1.5 # W/mK # more information - max_specific_pressure_loss = 100 # Pa/m - + max_specific_pressure_loss = 100 # Pa/m + mytrench = trenches.SupplyReturnPipeTrench( - pipe_center_depth=pipe_depth, - pipe_center_distance=pipe_distance, - fluid_db=fluid_db, - phase=phase, - pressure=pressure, - supply_temperature=supply_temperature, - return_temperature=return_temperature, - max_specific_pressure_loss=max_specific_pressure_loss, - supply_pipe=pipe - ) - + pipe_center_depth=pipe_depth, + pipe_center_distance=pipe_distance, + fluid_db=fluid_db, + phase=phase, + pressure=pressure, + supply_temperature=supply_temperature, + return_temperature=return_temperature, + max_specific_pressure_loss=max_specific_pressure_loss, + supply_pipe=pipe, + ) + # create arcs object with multiple static loss values as a first case - + trench_length = 50 - + # PipeTrenchOptions myarcs = PipeTrenchOptions( - trench=mytrench, - name='hellotrench', - length=trench_length - ) - + trench=mytrench, name="hellotrench", length=trench_length + ) + number_steps = 3 myarcs.set_static_losses( - scenario_key='scenario2', - ground_thermal_conductivity=[soil_k for i in range(number_steps)], - ground_air_heat_transfer_coefficient=[ - h_gs for i in range(number_steps) - ], - time_interval_duration=[3600 for i in range(number_steps)], - temperature_surroundings=[ - outdoor_temperature for i in range(number_steps) - ] - ) - + scenario_key="scenario2", + ground_thermal_conductivity=[soil_k for i in range(number_steps)], + ground_air_heat_transfer_coefficient=[h_gs for i in range(number_steps)], + time_interval_duration=[3600 for i in range(number_steps)], + temperature_surroundings=[outdoor_temperature for i in range(number_steps)], + ) + mypipecosts = utils.cost_pipes(mytrench, trench_length) assert mypipecosts == (50.0,) - + # unrecognised input: using a string for the trench length error_raised = False try: - mypipecosts = utils.cost_pipes(mytrench, '50') + mypipecosts = utils.cost_pipes(mytrench, "50") except ValueError: error_raised = True assert error_raised - + # ************************************************************************* # ************************************************************************* - + def test_cost_pipes_multiple_arcs(self): - # fluid data - waterdata_file = 'tests/data/incropera2006_saturated_water.csv' + waterdata_file = "tests/data/incropera2006_saturated_water.csv" phase = FluidDatabase.fluid_LIQUID - fluid_db = FluidDatabase( - fluid='fluid', - phase=phase, - source=waterdata_file - ) - - singlepipedata_files = ['tests/data/isoplus_singlepipes_s1.csv'] + fluid_db = FluidDatabase(fluid="fluid", phase=phase, source=waterdata_file) + + singlepipedata_files = ["tests/data/isoplus_singlepipes_s1.csv"] pipedb = StandardisedPipeDatabase(source=singlepipedata_files) - pipe = StandardisedPipe( - pipe_tuple=pipedb.pipe_tuples[0], - db=pipedb) - + pipe = StandardisedPipe(pipe_tuple=pipedb.pipe_tuples[0], db=pipedb) + # network details - supply_temperature = 85+273.15 - return_temperature = 45+273.15 + supply_temperature = 85 + 273.15 + return_temperature = 45 + 273.15 pressure = 1e5 # trench - pipe_distance = 0.52 # m - pipe_depth = 0.66 # m + pipe_distance = 0.52 # m + pipe_depth = 0.66 # m # environmental - outdoor_temperature = 6+273.15 # K - h_gs = inf # 14.6 # W/m2K - soil_k = 1.5 # W/mK + outdoor_temperature = 6 + 273.15 # K + h_gs = inf # 14.6 # W/m2K + soil_k = 1.5 # W/mK # more information - max_specific_pressure_loss = 100 # Pa/m + max_specific_pressure_loss = 100 # Pa/m number_options = 2 - + mytrench = trenches.SupplyReturnPipeTrench( - pipe_center_depth=[pipe_depth for i in range(number_options)], - pipe_center_distance=[ - pipe_distance for i in range(number_options) - ], - fluid_db=fluid_db, - phase=phase, - pressure=[pressure for i in range(number_options)], - supply_temperature=[ - supply_temperature for i in range(number_options) - ], - return_temperature=[ - return_temperature for i in range(number_options) - ], + pipe_center_depth=[pipe_depth for i in range(number_options)], + pipe_center_distance=[pipe_distance for i in range(number_options)], + fluid_db=fluid_db, + phase=phase, + pressure=[pressure for i in range(number_options)], + supply_temperature=[supply_temperature for i in range(number_options)], + return_temperature=[return_temperature for i in range(number_options)], max_specific_pressure_loss=[ max_specific_pressure_loss for i in range(number_options) - ], - supply_pipe=[pipe for i in range(number_options)] - ) - + ], + supply_pipe=[pipe for i in range(number_options)], + ) + # PipeTrenchOptions - myarcs = PipeTrenchOptions( - trench=mytrench, - name='hellotrench', - length=50 - ) - + myarcs = PipeTrenchOptions(trench=mytrench, name="hellotrench", length=50) + # add static loss scenario myarcs.set_static_losses( - scenario_key='scenario0', - ground_thermal_conductivity=soil_k, - ground_air_heat_transfer_coefficient=h_gs, - time_interval_duration=3600, - temperature_surroundings=outdoor_temperature - ) + scenario_key="scenario0", + ground_thermal_conductivity=soil_k, + ground_air_heat_transfer_coefficient=h_gs, + time_interval_duration=3600, + temperature_surroundings=outdoor_temperature, + ) # add another static loss scenario myarcs.set_static_losses( - scenario_key='scenario1', - ground_thermal_conductivity=soil_k+1, - ground_air_heat_transfer_coefficient=h_gs+1, - time_interval_duration=3600+100, - temperature_surroundings=outdoor_temperature+1 - ) + scenario_key="scenario1", + ground_thermal_conductivity=soil_k + 1, + ground_air_heat_transfer_coefficient=h_gs + 1, + time_interval_duration=3600 + 100, + temperature_surroundings=outdoor_temperature + 1, + ) # add static loss scenario number_steps = 3 myarcs.set_static_losses( - scenario_key='scenario2', - ground_thermal_conductivity=[soil_k for i in range(number_steps) - ], - ground_air_heat_transfer_coefficient=[ - h_gs for i in range(number_steps) - ], - time_interval_duration=[3600 for i in range(number_steps)], - temperature_surroundings=[ - outdoor_temperature for i in range(number_steps) - ] - ) + scenario_key="scenario2", + ground_thermal_conductivity=[soil_k for i in range(number_steps)], + ground_air_heat_transfer_coefficient=[h_gs for i in range(number_steps)], + time_interval_duration=[3600 for i in range(number_steps)], + temperature_surroundings=[outdoor_temperature for i in range(number_steps)], + ) trench_length = 50 - + # PipeTrenchOptions myarcs = PipeTrenchOptions( - trench=mytrench, - name='hellotrench', - length=trench_length - ) - + trench=mytrench, name="hellotrench", length=trench_length + ) + number_steps = 3 myarcs.set_static_losses( - scenario_key='scenario2', - ground_thermal_conductivity=[soil_k for i in range(number_steps) - ], - ground_air_heat_transfer_coefficient=[ - h_gs for i in range(number_steps) - ], - time_interval_duration=[3600 for i in range(number_steps)], - temperature_surroundings=[ - outdoor_temperature for i in range(number_steps) - ] - ) - + scenario_key="scenario2", + ground_thermal_conductivity=[soil_k for i in range(number_steps)], + ground_air_heat_transfer_coefficient=[h_gs for i in range(number_steps)], + time_interval_duration=[3600 for i in range(number_steps)], + temperature_surroundings=[outdoor_temperature for i in range(number_steps)], + ) + mypipecosts = utils.cost_pipes(mytrench, trench_length) assert mypipecosts == (100.0, 100.0) mypipecosts = utils.cost_pipes(mytrench, (trench_length, trench_length)) assert mypipecosts == (100.0, 100.0) - + # unrecognised input: using a list for the trench lengths error_raised = False try: @@ -229,24 +193,23 @@ class TestDistrictHeatingNetworkUtils: except ValueError: error_raised = True assert error_raised - + # ************************************************************************* # ************************************************************************* - + def test_plotting_heating_demand(self): - # g = 'dh' # q = 0 # p = 0 # any # months = [ - # 'Jan', - # 'Fev', - # 'Mar', - # 'Apr', - # 'May', - # 'Jun', - # 'Jul', + # 'Jan', + # 'Fev', + # 'Mar', + # 'Apr', + # 'May', + # 'Jun', + # 'Jul', # 'Aug', # 'Sep', # 'Oct', @@ -263,7 +226,7 @@ class TestDistrictHeatingNetworkUtils: # flow_in_k[(g,q,p,k)] # for k in ipp.instance.set_K_q[q] # ] - + monthly_end_use_demand = [ 1466.7343572178731, 1558.9721332796835, @@ -276,9 +239,9 @@ class TestDistrictHeatingNetworkUtils: 274.26564278212857, 526.2639333601578, 870.4999999999999, - 1214.7360666398413 - ] - + 1214.7360666398413, + ] + monthly_total_demand = [ 1628.7218570926373, 1721.9500038070653, @@ -291,9 +254,9 @@ class TestDistrictHeatingNetworkUtils: 400.8473918244898, 664.0492504106618, 1019.8602740534218, - 1375.1761123698027 - ] - + 1375.1761123698027, + ] + # monthly_losses = [ # 161.98749987476413, # 162.9778705273818, @@ -307,354 +270,276 @@ class TestDistrictHeatingNetworkUtils: # 137.78531705050398, # 149.3602740534219, # 160.4400457299614] - + monthly_losses = [ - total_demand-end_use_demand + total_demand - end_use_demand for end_use_demand, total_demand in zip( - monthly_end_use_demand, - monthly_total_demand - ) - ] - + monthly_end_use_demand, monthly_total_demand + ) + ] + months = [ - 'Jan', - 'Fev', - 'Mar', - 'Apr', - 'May', - 'Jun', - 'Jul', - 'Aug', - 'Sep', - 'Oct', - 'Nov', - 'Dec' - ] - + "Jan", + "Fev", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", + ] + utils.plot_heating_demand( - losses=monthly_losses, - end_use_demand=monthly_end_use_demand, - labels=months) - + losses=monthly_losses, end_use_demand=monthly_end_use_demand, labels=months + ) + # ************************************************************************* # ************************************************************************* - + def test_summarising_output(self): - # fluid data - waterdata_file = 'tests/data/incropera2006_saturated_water.csv' + waterdata_file = "tests/data/incropera2006_saturated_water.csv" phase = FluidDatabase.fluid_LIQUID - fluid_db = FluidDatabase( - fluid='fluid', - phase=phase, - source=waterdata_file - ) - - singlepipedata_files = ['tests/data/isoplus_singlepipes_s1.csv'] + fluid_db = FluidDatabase(fluid="fluid", phase=phase, source=waterdata_file) + + singlepipedata_files = ["tests/data/isoplus_singlepipes_s1.csv"] pipedb = StandardisedPipeDatabase(source=singlepipedata_files) - + # network details - supply_temperature = 85+273.15 - return_temperature = 45+273.15 + supply_temperature = 85 + 273.15 + return_temperature = 45 + 273.15 pressure = 1e5 # trench - pipe_distance = 0.52 # m - pipe_depth = 0.66 # m + pipe_distance = 0.52 # m + pipe_depth = 0.66 # m # environmental - outdoor_temperature = 6+273.15 # K - h_gs = inf # 14.6 # W/m2K - soil_k = 1.5 # W/mK + outdoor_temperature = 6 + 273.15 # K + h_gs = inf # 14.6 # W/m2K + soil_k = 1.5 # W/mK # more information - max_specific_pressure_loss = 100 # Pa/m - number_options = 3 + max_specific_pressure_loss = 100 # Pa/m + number_options = 3 mytrench = trenches.SupplyReturnPipeTrench( - pipe_center_depth=[ - pipe_depth - for i in range(number_options) - ], - pipe_center_distance=[ - pipe_distance - for i in range(number_options) - ], - fluid_db=fluid_db, - phase=phase, - pressure=[ - pressure - for i in range(number_options) - ], - supply_temperature=[ - supply_temperature - for i in range(number_options) - ], - return_temperature=[ - return_temperature - for i in range(number_options) - ], - max_specific_pressure_loss=[ - max_specific_pressure_loss - for i in range(number_options) - ], - supply_pipe=[ - StandardisedPipe( - pipe_tuple=pipedb.pipe_tuples[i], - db=pipedb) + pipe_center_depth=[pipe_depth for i in range(number_options)], + pipe_center_distance=[pipe_distance for i in range(number_options)], + fluid_db=fluid_db, + phase=phase, + pressure=[pressure for i in range(number_options)], + supply_temperature=[supply_temperature for i in range(number_options)], + return_temperature=[return_temperature for i in range(number_options)], + max_specific_pressure_loss=[ + max_specific_pressure_loss for i in range(number_options) + ], + supply_pipe=[ + StandardisedPipe(pipe_tuple=pipedb.pipe_tuples[i], db=pipedb) for i in range(number_options) - ] - ) - + ], + ) + # ********************************************************************* - + # arc 1 trench_length1 = 50 myarcs1 = PipeTrenchOptions( - trench=mytrench, - name='hellotrench', - length=trench_length1 - ) + trench=mytrench, name="hellotrench", length=trench_length1 + ) # add static loss scenario myarcs1.set_static_losses( - scenario_key='scenario0', - ground_thermal_conductivity=soil_k, - ground_air_heat_transfer_coefficient=h_gs, - time_interval_duration=3600, - temperature_surroundings=outdoor_temperature - ) + scenario_key="scenario0", + ground_thermal_conductivity=soil_k, + ground_air_heat_transfer_coefficient=h_gs, + time_interval_duration=3600, + temperature_surroundings=outdoor_temperature, + ) # add another static loss scenario myarcs1.set_static_losses( - scenario_key='scenario1', - ground_thermal_conductivity=soil_k+1, - ground_air_heat_transfer_coefficient=h_gs+1, - time_interval_duration=3600+100, - temperature_surroundings=outdoor_temperature+1 - ) + scenario_key="scenario1", + ground_thermal_conductivity=soil_k + 1, + ground_air_heat_transfer_coefficient=h_gs + 1, + time_interval_duration=3600 + 100, + temperature_surroundings=outdoor_temperature + 1, + ) # set the option myarcs1.options_selected[2] = True - + # ********************************************************************* - + # arc 2 trench_length2 = 25 myarcs2 = PipeTrenchOptions( - trench=mytrench, - name='hellotrench', - length=trench_length2 - ) + trench=mytrench, name="hellotrench", length=trench_length2 + ) # add static loss scenario myarcs2.set_static_losses( - scenario_key='scenario0', - ground_thermal_conductivity=soil_k, - ground_air_heat_transfer_coefficient=h_gs, - time_interval_duration=3600, - temperature_surroundings=outdoor_temperature - ) + scenario_key="scenario0", + ground_thermal_conductivity=soil_k, + ground_air_heat_transfer_coefficient=h_gs, + time_interval_duration=3600, + temperature_surroundings=outdoor_temperature, + ) # add another static loss scenario myarcs2.set_static_losses( - scenario_key='scenario1', - ground_thermal_conductivity=soil_k+1, - ground_air_heat_transfer_coefficient=h_gs+1, - time_interval_duration=3600+100, - temperature_surroundings=outdoor_temperature+1 - ) + scenario_key="scenario1", + ground_thermal_conductivity=soil_k + 1, + ground_air_heat_transfer_coefficient=h_gs + 1, + time_interval_duration=3600 + 100, + temperature_surroundings=outdoor_temperature + 1, + ) # set the option myarcs2.options_selected[0] = True - - + # ********************************************************************* - + # create network - mynet = Network() - mynet.add_directed_arc( - node_key_a=0, - node_key_b=1, - arcs=myarcs1) + mynet = Network() + mynet.add_directed_arc(node_key_a=0, node_key_b=1, arcs=myarcs1) + mynet.add_directed_arc(node_key_a=1, node_key_b=2, arcs=myarcs2) mynet.add_directed_arc( - node_key_a=1, - node_key_b=2, - arcs=myarcs2 - ) - mynet.add_directed_arc( - 0, + 0, 2, arcs=ArcsWithoutLosses( - name='hello', - capacity=[1,2,3], - minimum_cost=[4, 10, 16], - specific_capacity_cost=3, - capacity_is_instantaneous=False - ) - ) - + name="hello", + capacity=[1, 2, 3], + minimum_cost=[4, 10, 16], + specific_capacity_cost=3, + capacity_is_instantaneous=False, + ), + ) + # ********************************************************************* - + out = utils.summarise_network_by_pipe_technology(mynet, False) - - assert 'DN20' in out and out['DN20'] == 25 - assert 'DN32' in out and out['DN32'] == 50 - + + assert "DN20" in out and out["DN20"] == 25 + assert "DN32" in out and out["DN32"] == 50 + utils.summarise_network_by_pipe_technology(mynet, True) - + # ************************************************************************* # ************************************************************************* - + def test_summarising_output_osmnx(self): - # get the network _protonet = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', - custom_filter=( - '["highway"~"residential|tertiary|unclassified|service"]' - ), - truncate_by_edge=True - ) - + (55.71654, 9.11728), + network_type="drive", + custom_filter=('["highway"~"residential|tertiary|unclassified|service"]'), + truncate_by_edge=True, + ) + # create a network object network = Network(incoming_graph_data=_protonet) - + # fluid data - waterdata_file = 'tests/data/incropera2006_saturated_water.csv' + waterdata_file = "tests/data/incropera2006_saturated_water.csv" phase = FluidDatabase.fluid_LIQUID - fluid_db = FluidDatabase( - fluid='fluid', - phase=phase, - source=waterdata_file - ) - - singlepipedata_files = ['tests/data/isoplus_singlepipes_s1.csv'] + fluid_db = FluidDatabase(fluid="fluid", phase=phase, source=waterdata_file) + + singlepipedata_files = ["tests/data/isoplus_singlepipes_s1.csv"] pipedb = StandardisedPipeDatabase(source=singlepipedata_files) - - + # network details - supply_temperature = 85+273.15 - return_temperature = 45+273.15 + supply_temperature = 85 + 273.15 + return_temperature = 45 + 273.15 pressure = 1e5 # trench - pipe_distance = 0.52 # m - pipe_depth = 0.66 # m + pipe_distance = 0.52 # m + pipe_depth = 0.66 # m # environmental - outdoor_temperature = 6+273.15 # K - h_gs = inf # 14.6 # W/m2K - soil_k = 1.5 # W/mK + outdoor_temperature = 6 + 273.15 # K + h_gs = inf # 14.6 # W/m2K + soil_k = 1.5 # W/mK # more information - max_specific_pressure_loss = 100 # Pa/m - + max_specific_pressure_loss = 100 # Pa/m + number_options = 4 - + mytrench = trenches.SupplyReturnPipeTrench( - pipe_center_depth=[ - pipe_depth - for i in range(number_options) - ], - pipe_center_distance=[ - pipe_distance - for i in range(number_options) - ], - fluid_db=fluid_db, - phase=phase, - pressure=[ - pressure - for i in range(number_options) - ], - supply_temperature=[ - supply_temperature - for i in range(number_options) - ], - return_temperature=[ - return_temperature - for i in range(number_options) - ], - max_specific_pressure_loss=[ - max_specific_pressure_loss - for i in range(number_options) - ], - supply_pipe=[ - StandardisedPipe( - pipe_tuple=pipedb.pipe_tuples[i], - db=pipedb) + pipe_center_depth=[pipe_depth for i in range(number_options)], + pipe_center_distance=[pipe_distance for i in range(number_options)], + fluid_db=fluid_db, + phase=phase, + pressure=[pressure for i in range(number_options)], + supply_temperature=[supply_temperature for i in range(number_options)], + return_temperature=[return_temperature for i in range(number_options)], + max_specific_pressure_loss=[ + max_specific_pressure_loss for i in range(number_options) + ], + supply_pipe=[ + StandardisedPipe(pipe_tuple=pipedb.pipe_tuples[i], db=pipedb) for i in range(number_options) - ] - ) - + ], + ) + # ********************************************************************* - + for edge_key in network.edges(keys=True): # set up arc myarcs = PipeTrenchOptions( trench=mytrench, - name='hellotrench', - length=network.edges[edge_key]['length'] - ) + name="hellotrench", + length=network.edges[edge_key]["length"], + ) # add static loss scenario myarcs.set_static_losses( - scenario_key='scenario0', - ground_thermal_conductivity=soil_k, - ground_air_heat_transfer_coefficient=h_gs, - time_interval_duration=3600, - temperature_surroundings=outdoor_temperature - ) + scenario_key="scenario0", + ground_thermal_conductivity=soil_k, + ground_air_heat_transfer_coefficient=h_gs, + time_interval_duration=3600, + temperature_surroundings=outdoor_temperature, + ) # add another static loss scenario myarcs.set_static_losses( - scenario_key='scenario1', - ground_thermal_conductivity=soil_k+1, - ground_air_heat_transfer_coefficient=h_gs+1, - time_interval_duration=3600+100, - temperature_surroundings=outdoor_temperature+1 - ) + scenario_key="scenario1", + ground_thermal_conductivity=soil_k + 1, + ground_air_heat_transfer_coefficient=h_gs + 1, + time_interval_duration=3600 + 100, + temperature_surroundings=outdoor_temperature + 1, + ) # set the option myarcs.options_selected[ - random.randint(0, myarcs.number_options()-1) - ] = True + random.randint(0, myarcs.number_options() - 1) + ] = True # modify the arc - network.modify_network_arc( - *edge_key, - {Network.KEY_ARC_TECH: myarcs} - ) + network.modify_network_arc(*edge_key, {Network.KEY_ARC_TECH: myarcs}) # deselect one of the trenches - trench_index = random.randint(0, network.number_of_edges()-1) + trench_index = random.randint(0, network.number_of_edges() - 1) edge_key = tuple(network.edges(keys=True))[trench_index] network.edges[edge_key][Network.KEY_ARC_TECH].options_selected[ - network.edges[edge_key][ - Network.KEY_ARC_TECH].options_selected.index(True) - ] = False - + network.edges[edge_key][Network.KEY_ARC_TECH].options_selected.index(True) + ] = False + # add non-trench Arcs object network.add_directed_arc( - 0, - 2, - arcs=ArcsWithoutLosses( - name='hello', - capacity=[1,2,3], - minimum_cost=[4, 10, 16], - specific_capacity_cost=3, - capacity_is_instantaneous=False - ) - ) - # update the nodes - network.add_node( 0, - x=55, - y=12 - ) - network.add_node( 2, - x=55.01, - y=12.01 - ) - + arcs=ArcsWithoutLosses( + name="hello", + capacity=[1, 2, 3], + minimum_cost=[4, 10, 16], + specific_capacity_cost=3, + capacity_is_instantaneous=False, + ), + ) + # update the nodes + network.add_node(0, x=55, y=12) + network.add_node(2, x=55.01, y=12.01) + # ********************************************************************* - + utils.summarise_network_by_pipe_technology(network, False) - - utils.plot_network_layout( - network=network, - include_basemap=False) - - utils.plot_network_layout( - network=network, - include_basemap=True) - + + utils.plot_network_layout(network=network, include_basemap=False) + + utils.plot_network_layout(network=network, include_basemap=True) + + # ************************************************************************* # ************************************************************************* - # ************************************************************************* - -#****************************************************************************** -#****************************************************************************** \ No newline at end of file + + +# ****************************************************************************** +# ****************************************************************************** diff --git a/tests/test_esipp_converter.py b/tests/test_esipp_converter.py index 2e1f9ac..d049d0e 100644 --- a/tests/test_esipp_converter.py +++ b/tests/test_esipp_converter.py @@ -16,269 +16,276 @@ import src.topupopt.problems.esipp.converter as cvn import src.topupopt.problems.esipp.signal as sgn -#****************************************************************************** -#****************************************************************************** - -class TestConverter(): - - #************************************************************************** - #************************************************************************** - +# ****************************************************************************** +# ****************************************************************************** + + +class TestConverter: + # ************************************************************************** + # ************************************************************************** + # test converters # 1) regular and irregular time steps # 2) time invariant and time varying models # 3) integrate and do not integrate outputs # 4) generate coefficients - + # test creating a stateless converter without outputs - + # test creating a stateless converter with 1 output - + # test creating a stateless converter with 2 output - + # test creating a converter based on a single ODE system without outputs - + # test creating a converter based on a single ODE system with 1 output - + # test creating a converter based on a single ODE system with 2 output - + # test creating a converter based on a multiple ODE system without outputs - + # test creating a converter based on a multiple ODE system with 1 output - + # test creating a converter based on a multiple ODE multi-output system - - #************************************************************************** - #************************************************************************** - + # ************************************************************************** + # ************************************************************************** + def test_full_converter_regular(self): - - time_step_durations = [1,1,1,1] + time_step_durations = [1, 1, 1, 1] method_full_converter(time_step_durations) - #************************************************************************** - #************************************************************************** - + # ************************************************************************** + # ************************************************************************** + def test_full_converter_irregular(self): - - time_step_durations = [1,1.5,0.5,1] + time_step_durations = [1, 1.5, 0.5, 1] method_full_converter(time_step_durations) - #************************************************************************** - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** + # ************************************************************************** + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def get_stateless_model_data(relative_amplitude_variation: float = 0.0): - mrh_deviation = random.random() - 0.5 - - Aw = 6.22 # original: 6.22 m2 - - min_rel_heat = 0.2*(1+relative_amplitude_variation*mrh_deviation) - + + Aw = 6.22 # original: 6.22 m2 + + min_rel_heat = 0.2 * (1 + relative_amplitude_variation * mrh_deviation) + return Aw, min_rel_heat - -#****************************************************************************** -#****************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def get_single_ode_model_data(relative_amplitude_variation: float = 0.0): - # define how the coefficients change - + Ria_deviation = random.random() - 0.5 - + # define the (A, B, C and D) matrices # A: n*n # B: n*m # C: r*n # D: r*m - - Ci = 1.360*3600000 - Ria = ( - (1+relative_amplitude_variation*Ria_deviation)*5.31/3600000 - ) + + Ci = 1.360 * 3600000 + Ria = (1 + relative_amplitude_variation * Ria_deviation) * 5.31 / 3600000 Aw = 6.22 - + min_rel_heat = 0.2 - + x0 = np.array([20]) - + return Ci, Ria, Aw, min_rel_heat, x0 - + + # ***************************************************************************** # ***************************************************************************** + def get_multi_ode_model_data(relative_amplitude_variation: float = 0.0): - # define how the coefficients change - + Rih_deviation = random.random() - 0.5 - + Ria_deviation = random.random() - 0.5 - + # define the (A, B, C and D) matrices # A: n*n # B: n*m # C: r*n # D: r*m - + # from Bacher and Madsen (2011): model TiTh - - Ci = 1.360*3600000 # original: 1.36 kWh/ºC - Ch = 0.309*3600000 # original: 0.309 kWh/ºC + + Ci = 1.360 * 3600000 # original: 1.36 kWh/ºC + Ch = 0.309 * 3600000 # original: 0.309 kWh/ºC Ria = ( - (1+relative_amplitude_variation*Ria_deviation)*5.31/3600000 - ) # original: 5.31 ºC/kWh + (1 + relative_amplitude_variation * Ria_deviation) * 5.31 / 3600000 + ) # original: 5.31 ºC/kWh Rih = ( - (1+relative_amplitude_variation*Rih_deviation)*0.639/3600000 - ) # original: 0.639 ºC/kWh - Aw = 6.22 # original: 6.22 m2 - - Pw = 5000 # 5 kW - + (1 + relative_amplitude_variation * Rih_deviation) * 0.639 / 3600000 + ) # original: 0.639 ºC/kWh + Aw = 6.22 # original: 6.22 m2 + + Pw = 5000 # 5 kW + min_rel_heat = 0.2 - + x0 = np.array([20, 20]) - + return Ci, Ch, Ria, Rih, Aw, min_rel_heat, Pw, x0 - -#****************************************************************************** -#****************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def stateless_model(Aw, min_rel_heat): - # inputs: Ta, phi_s, phi_h above the minimum, phi_h status # outputs: solar irradiance, heat - - d = np.array([[0, Aw, 0, 0], - [0, 0, (1-min_rel_heat), min_rel_heat]]) - + + d = np.array([[0, Aw, 0, 0], [0, 0, (1 - min_rel_heat), min_rel_heat]]) + return None, None, None, d - -#****************************************************************************** -#****************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def single_node_model(Ci, Ria, Aw, min_rel_heat, Pw): - # states: Ti and Th # inputs: Ta, phi_s, phi_h above the minimum, phi_h status # outputs: solar irradiance, heat - - a = np.array([[-1/(Ria*Ci)]]) - b = np.array([[1/(Ci*Ria), Aw/Ci, Pw*(1-min_rel_heat)/Ci, Pw*min_rel_heat/Ci]]) - c = np.array([[0],[0]]) - d = np.array([[0, Aw, 0, 0], - [0, 0, Pw*(1-min_rel_heat), Pw*min_rel_heat]]) - + + a = np.array([[-1 / (Ria * Ci)]]) + b = np.array( + [ + [ + 1 / (Ci * Ria), + Aw / Ci, + Pw * (1 - min_rel_heat) / Ci, + Pw * min_rel_heat / Ci, + ] + ] + ) + c = np.array([[0], [0]]) + d = np.array([[0, Aw, 0, 0], [0, 0, Pw * (1 - min_rel_heat), Pw * min_rel_heat]]) + return a, b, c, d - -#****************************************************************************** -#****************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def two_node_model(Ci, Ch, Ria, Rih, Aw, min_rel_heat, Pw): - # states: Ti and Th # inputs: Ta, phi_s, phi_h above the minimum, phi_h status # outputs: solar irradiance, heat - - a = np.array([[-(1/Rih+1/Ria)/Ci, 1/(Ci*Rih)], - [1/(Ch*Rih), -1/(Ch*Rih)]]) - b = np.array([[1/(Ci*Ria), Aw/Ci, 0, 0], - [0, 0, Pw*(1-min_rel_heat)/Ch, Pw*min_rel_heat/Ch]]) - c = np.array([[0, 0], - [0, 0]]) - d = np.array([[0, Aw, 0, 0], - [0, 0, Pw*(1-min_rel_heat), Pw*min_rel_heat]]) - + + a = np.array( + [[-(1 / Rih + 1 / Ria) / Ci, 1 / (Ci * Rih)], [1 / (Ch * Rih), -1 / (Ch * Rih)]] + ) + b = np.array( + [ + [1 / (Ci * Ria), Aw / Ci, 0, 0], + [0, 0, Pw * (1 - min_rel_heat) / Ch, Pw * min_rel_heat / Ch], + ] + ) + c = np.array([[0, 0], [0, 0]]) + d = np.array([[0, Aw, 0, 0], [0, 0, Pw * (1 - min_rel_heat), Pw * min_rel_heat]]) + return a, b, c, d - -#****************************************************************************** -#****************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def get_two_node_model_signals(number_samples): - # signals - - # inputs: + + # inputs: # 1) ambient temperature (real, can be fixed later) # 2) solar irradiation (real, can be fixed later) # 3) relative heat above minimum (nnr) # 4) heater status (binary) - + list_inputs = [ sgn.FreeUnboundedSignal(number_samples), sgn.FreeUnboundedSignal(number_samples), sgn.NonNegativeRealSignal(number_samples), - sgn.BinarySignal(number_samples) - ] - + sgn.BinarySignal(number_samples), + ] + # states # 1) indoor temperature (real) # 2) heater temperature (real) - + list_states = [ sgn.FreeUnboundedSignal(number_samples), - sgn.FreeUnboundedSignal(number_samples) - ] - + sgn.FreeUnboundedSignal(number_samples), + ] + # outputs: # 1) solar gain (nnr) # 2) heat input (nnr) - + list_outputs = [ sgn.NonNegativeRealSignal(number_samples), - sgn.NonNegativeRealSignal(number_samples) - ] - + sgn.NonNegativeRealSignal(number_samples), + ] + return list_inputs, list_states, list_outputs - -#****************************************************************************** -#****************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** + def method_full_converter(time_step_durations: list): - # number of samples number_time_steps = len(time_step_durations) - + # get the coefficients Ci, Ch, Ria, Rih, Aw, min_rel_heat, Pw, x0 = get_multi_ode_model_data() - + # get the model a, b, c, d = two_node_model(Ci, Ch, Ria, Rih, Aw, min_rel_heat, Pw) - + # get the signals inputs, states, outputs = get_two_node_model_signals(number_time_steps) - + # create a dynamic system ds = dynsys.DynamicSystem( - time_interval_durations=time_step_durations, - A=a, - B=b, - C=c, - D=d) - + time_interval_durations=time_step_durations, A=a, B=b, C=c, D=d + ) + # create a converter cvn1 = cvn.Converter( - 'cvn1', + "cvn1", sys=ds, initial_states=x0, turn_key_cost=3, inputs=inputs, states=states, - outputs=outputs) - + outputs=outputs, + ) + # get the dictionaries - (a_innk, - b_inmk, - c_irnk, - d_irmk, - e_x_ink, - e_y_irk) = cvn1.matrix_dictionaries() - + (a_innk, b_inmk, c_irnk, d_irmk, e_x_ink, e_y_irk) = cvn1.matrix_dictionaries() + # TODO: check the dicts - -#****************************************************************************** -#****************************************************************************** \ No newline at end of file + + +# ****************************************************************************** +# ****************************************************************************** diff --git a/tests/test_esipp_dynsys.py b/tests/test_esipp_dynsys.py index 756e1f7..70bbb41 100644 --- a/tests/test_esipp_dynsys.py +++ b/tests/test_esipp_dynsys.py @@ -13,436 +13,452 @@ import src.topupopt.problems.esipp.dynsys as dynsys # ***************************************************************************** # ***************************************************************************** -class TestDynsys(): +class TestDynsys: # ************************************************************************* # ************************************************************************* - -# seed_number = random.randint(1,int(1e5)) - -# print_outputs = True - -# # with states and outputs - -# # test multi-ODE, multi-output dynamic systems while integrating outputs - -# examples_dynsys_multiode_multiout(True, print_outputs, seed_number) - -# # test multi-ODE, multi-output dynamic systems without integrating outputs - -# examples_dynsys_multiode_multiout(False, print_outputs, seed_number) - -# # test single ODE, multi-output dynamic systems while integrating outputs - -# examples_dynsys_singleode_multiout(True, print_outputs, seed_number) - -# # test single ODE, multi-output dynamic systems without integrating outputs - -# examples_dynsys_singleode_multiout(False, print_outputs, seed_number) - -# # test multi-ODE, single-output dynamic systems while integrating outputs - -# examples_dynsys_multiode_multiout(True, print_outputs, seed_number, 1) - -# # test multi-ODE, single-output dynamic systems without integrating outputs - -# examples_dynsys_multiode_multiout(False, print_outputs, seed_number, 1) - -# # test single-ODE, single-output dynamic systems while integrating outputs - -# examples_dynsys_singleode_multiout(True, print_outputs, seed_number, 1) -# # test single-ODE, single-output dynamic systems without integrating outputs - -# examples_dynsys_singleode_multiout(False, print_outputs, seed_number, 1) - -# # ************************************************************************* - -# # outputless - -# # test single-ODE, outputless dynamic systems while integrating outputs - -# examples_dynsys_singleode_multiout(True, print_outputs, seed_number, 0) - -# # test multi-ODE, outputless dynamic systems while integrating outputs - -# examples_dynsys_multiode_multiout(True, print_outputs, seed_number, 0) - -# # test single-ODE, outputless dynamic systems without integrating outputs - -# examples_dynsys_singleode_multiout(False, print_outputs, seed_number, 0) - -# # test multi-ODE, outputless dynamic systems without integrating outputs - -# examples_dynsys_multiode_multiout(False, print_outputs, seed_number, 0) - -# # outputless system via dynsys subclass - -# example_outputless_system_object() - -# # ************************************************************************* - -# # stateless - -# # test stateless, single-output dynamic systems while integrating outputs - -# examples_dynsys_stateless_multiout(True, print_outputs, seed_number, 1) - -# # test stateless, multi-output dynamic systems without integrating outputs - -# examples_dynsys_stateless_multiout(False, print_outputs, seed_number, 2) - -# # stateless system via dynsys subclass - -# example_stateless_system_object(True) -# example_stateless_system_object(False) - -# # ************************************************************************* -# # ************************************************************************* - -# # trigger errors - -# # test stateless, outputless dynamic systems while integrating outputs - -# number_errors = 0 - -# try: -# examples_dynsys_stateless_multiout(True, False, seed_number, 0) -# except Exception: -# number_errors += 1 - -# assert number_errors == 1 - -# # test stateless, outputless dynamic systems without integrating outputs - -# number_errors = 0 - -# try: -# examples_dynsys_stateless_multiout(False, False, seed_number, 0) -# except Exception: -# number_errors += 1 - -# assert number_errors == 1 - -# # test negative time duration - -# example_incorrect_time_step_durations() - -# # test unrecognised matrix formats - -# example_unrecognised_matrix_formats() - -# # different matrix sizes for the same problem, all other things being equal - -# example_varying_matrix_sizes(True) -# example_varying_matrix_sizes(False) - -# # test multiple A matrices and multiple non-matching time intervals - -# example_nonmatching_time_steps_and_matrices() - -# # test non-square A matrices - -# example_nonsquare_A_matrices() - -# # test incompatible A and B matrices (different number of rows) - -# example_incompatible_AB_matrices() - -# # test incompatible C and D matrices (different number of rows) - -# example_incompatible_CD_matrices() - -# # test incompatible A and C matrices (different number of columns) - -# example_incompatible_AC_matrices() - -# # test incompatible B and D matrices (different number of columns) - -# example_incompatible_BD_matrices() - -# # trigger incorrect input signal format error when simulating - -# example_single_time_step_model_incorrect_inputs() - -# # TODO: test only some matrices as being time invariant + # seed_number = random.randint(1,int(1e5)) + + # print_outputs = True + + # # with states and outputs + + # # test multi-ODE, multi-output dynamic systems while integrating outputs + + # examples_dynsys_multiode_multiout(True, print_outputs, seed_number) + + # # test multi-ODE, multi-output dynamic systems without integrating outputs + + # examples_dynsys_multiode_multiout(False, print_outputs, seed_number) + + # # test single ODE, multi-output dynamic systems while integrating outputs + + # examples_dynsys_singleode_multiout(True, print_outputs, seed_number) + + # # test single ODE, multi-output dynamic systems without integrating outputs + + # examples_dynsys_singleode_multiout(False, print_outputs, seed_number) + + # # test multi-ODE, single-output dynamic systems while integrating outputs + + # examples_dynsys_multiode_multiout(True, print_outputs, seed_number, 1) + + # # test multi-ODE, single-output dynamic systems without integrating outputs + + # examples_dynsys_multiode_multiout(False, print_outputs, seed_number, 1) + + # # test single-ODE, single-output dynamic systems while integrating outputs + + # examples_dynsys_singleode_multiout(True, print_outputs, seed_number, 1) + + # # test single-ODE, single-output dynamic systems without integrating outputs + + # examples_dynsys_singleode_multiout(False, print_outputs, seed_number, 1) + + # # ************************************************************************* + + # # outputless + + # # test single-ODE, outputless dynamic systems while integrating outputs + + # examples_dynsys_singleode_multiout(True, print_outputs, seed_number, 0) + + # # test multi-ODE, outputless dynamic systems while integrating outputs + + # examples_dynsys_multiode_multiout(True, print_outputs, seed_number, 0) + + # # test single-ODE, outputless dynamic systems without integrating outputs + + # examples_dynsys_singleode_multiout(False, print_outputs, seed_number, 0) + + # # test multi-ODE, outputless dynamic systems without integrating outputs + + # examples_dynsys_multiode_multiout(False, print_outputs, seed_number, 0) + + # # outputless system via dynsys subclass + + # example_outputless_system_object() + + # # ************************************************************************* + + # # stateless + + # # test stateless, single-output dynamic systems while integrating outputs + + # examples_dynsys_stateless_multiout(True, print_outputs, seed_number, 1) + + # # test stateless, multi-output dynamic systems without integrating outputs + + # examples_dynsys_stateless_multiout(False, print_outputs, seed_number, 2) + + # # stateless system via dynsys subclass + + # example_stateless_system_object(True) + # example_stateless_system_object(False) + + # # ************************************************************************* + # # ************************************************************************* + + # # trigger errors + + # # test stateless, outputless dynamic systems while integrating outputs + + # number_errors = 0 + + # try: + # examples_dynsys_stateless_multiout(True, False, seed_number, 0) + # except Exception: + # number_errors += 1 + + # assert number_errors == 1 + + # # test stateless, outputless dynamic systems without integrating outputs + # number_errors = 0 + + # try: + # examples_dynsys_stateless_multiout(False, False, seed_number, 0) + # except Exception: + # number_errors += 1 + + # assert number_errors == 1 + + # # test negative time duration + + # example_incorrect_time_step_durations() + + # # test unrecognised matrix formats + + # example_unrecognised_matrix_formats() + + # # different matrix sizes for the same problem, all other things being equal + + # example_varying_matrix_sizes(True) + # example_varying_matrix_sizes(False) + + # # test multiple A matrices and multiple non-matching time intervals + + # example_nonmatching_time_steps_and_matrices() + + # # test non-square A matrices + + # example_nonsquare_A_matrices() + + # # test incompatible A and B matrices (different number of rows) + + # example_incompatible_AB_matrices() + + # # test incompatible C and D matrices (different number of rows) + + # example_incompatible_CD_matrices() + + # # test incompatible A and C matrices (different number of columns) + + # example_incompatible_AC_matrices() + + # # test incompatible B and D matrices (different number of columns) + + # example_incompatible_BD_matrices() + + # # trigger incorrect input signal format error when simulating + + # example_single_time_step_model_incorrect_inputs() + + # # TODO: test only some matrices as being time invariant + + # ************************************************************************* # ************************************************************************* - # ************************************************************************* - + def test_incorrect_time_step_durations(self): - integrate_outputs = True - + # ********************************************************************* - + # negative time step duration - + # regular time steps (as an int) - + time_step_durations = [-1] - + # get the model - + Aw, min_rel_heat = get_stateless_model_data() - + _, _, _, D = stateless_model(Aw, min_rel_heat) - + number_errors = 0 try: _ = dynsys.StatelessSystem( - time_interval_durations=time_step_durations[0], + time_interval_durations=time_step_durations[0], D=D, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except ValueError: number_errors += 1 - + assert number_errors == 1 - + # ********************************************************************* - + # no time step duration (empty list) - + time_step_durations = [] - + # get the model - + Aw, min_rel_heat = get_stateless_model_data() - + _, _, _, D = stateless_model(Aw, min_rel_heat) - + number_errors = 0 try: _ = dynsys.StatelessSystem( - time_interval_durations=time_step_durations, + time_interval_durations=time_step_durations, D=D, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except ValueError: number_errors += 1 - + assert number_errors == 1 - + # ********************************************************************* - + # time step duration list with negative or zero time step durations - + time_step_durations = [-1, 3, 0] - + # get the model - + Aw, min_rel_heat = get_stateless_model_data() - + _, _, _, D = stateless_model(Aw, min_rel_heat) - + number_errors = 0 try: _ = dynsys.StatelessSystem( - time_interval_durations=time_step_durations, + time_interval_durations=time_step_durations, D=D, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except ValueError: number_errors += 1 - + assert number_errors == 1 - + # ********************************************************************* - + # time step duration list with non-numeric types - + time_step_durations = [None, 3, 3] - + # get the model - + Aw, min_rel_heat = get_stateless_model_data() - + _, _, _, D = stateless_model(Aw, min_rel_heat) - + number_errors = 0 try: _ = dynsys.StatelessSystem( - time_interval_durations=time_step_durations, + time_interval_durations=time_step_durations, D=D, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except TypeError: number_errors += 1 - + assert number_errors == 1 - + # ************************************************************************* # ************************************************************************* - + def test_single_time_step_model_incorrect_inputs(self): - integrate_y = True - + # regular time steps (as an int) - + time_step_durations = [1] - + # define a sequence of time steps - - t = np.array([sum(time_step_durations[0:i]) - for i in range(len(time_step_durations)+1)]) - + + t = np.array( + [sum(time_step_durations[0:i]) for i in range(len(time_step_durations) + 1)] + ) + # define the inputs - - list_inputs = [-5, 50, 0.1, 1, 5] # extra input signal to force error - - U = np.array([[input_i for dt in t] # extra time step to force special case - for input_i in list_inputs]) - + + list_inputs = [-5, 50, 0.1, 1, 5] # extra input signal to force error + + U = np.array( + [ + [input_i for dt in t] # extra time step to force special case + for input_i in list_inputs + ] + ) + # U = np.array([[input_i for _ in range(len(time_step_durations))] # for input_i in list_inputs]) - + # get the model - + Ci, Ch, Ria, Rih, Aw, min_rel_heat, x0 = get_multi_ode_model_data() - + A, B, C, D = two_node_model(Ci, Ch, Ria, Rih, Aw, min_rel_heat) - + ds = dynsys.DynamicSystem( - time_interval_durations=time_step_durations, + time_interval_durations=time_step_durations, A=A, B=B, C=C, D=D, - integrate_outputs=integrate_y) - + integrate_outputs=integrate_y, + ) + # define the initial conditions - + number_errors = 0 - + try: X, Y = ds.simulate(U, x0) except ValueError: number_errors += 1 - + assert number_errors == 1 - + # ************************************************************************* # ************************************************************************* - + def test_outputless_system_object(self): - # regular time steps time_step_durations = [1, 1, 1, 1] - + # define a sequence of time steps - t = np.array([sum(time_step_durations[0:i]) - for i in range(len(time_step_durations)+1)]) - + t = np.array( + [sum(time_step_durations[0:i]) for i in range(len(time_step_durations) + 1)] + ) + # define the inputs list_inputs = [-5, 50, 0.1, 1] - U = np.array([[input_i for _ in range(len(time_step_durations))] - for input_i in list_inputs]) - + U = np.array( + [ + [input_i for _ in range(len(time_step_durations))] + for input_i in list_inputs + ] + ) + # get the model Ci, Ch, Ria, Rih, Aw, min_rel_heat, x0 = get_multi_ode_model_data() A, B, _, _ = two_node_model(Ci, Ch, Ria, Rih, Aw, min_rel_heat) ds = dynsys.OutputlessSystem( - time_interval_durations=time_step_durations, - A=A, - B=B) - + time_interval_durations=time_step_durations, A=A, B=B + ) + # define the initial conditions X, Y = ds.simulate(U, x0) assert Y == None assert isinstance(X, np.ndarray) - + # ********************************************************************* - + # regular time steps (as an int) time_step_durations = [1] - + # define a sequence of time steps - t = np.array([sum(time_step_durations[0:i]) - for i in range(len(time_step_durations)+1)]) - + t = np.array( + [sum(time_step_durations[0:i]) for i in range(len(time_step_durations) + 1)] + ) + # define the inputs list_inputs = [-5, 50, 0.1, 1] - U = np.array([[input_i for _ in range(len(time_step_durations))] - for input_i in list_inputs]) - + U = np.array( + [ + [input_i for _ in range(len(time_step_durations))] + for input_i in list_inputs + ] + ) + # get the model Ci, Ch, Ria, Rih, Aw, min_rel_heat, x0 = get_multi_ode_model_data() A, B, _, _ = two_node_model(Ci, Ch, Ria, Rih, Aw, min_rel_heat) - + ds = dynsys.OutputlessSystem( - time_interval_durations=time_step_durations, - A=A, - B=B) - + time_interval_durations=time_step_durations, A=A, B=B + ) + # define the initial conditions X, Y = ds.simulate(U, x0) assert Y == None assert isinstance(X, np.ndarray) - + # ********************************************************************* - + # irregular time steps time_step_durations = [1, 1.5, 0.5, 1] - + # define a sequence of time steps - t = np.array([sum(time_step_durations[0:i]) - for i in range(len(time_step_durations)+1)]) - + t = np.array( + [sum(time_step_durations[0:i]) for i in range(len(time_step_durations) + 1)] + ) + # define the inputs list_inputs = [-5, 50, 0.1, 1] - U = np.array([[input_i for _ in range(len(time_step_durations))] - for input_i in list_inputs]) - + U = np.array( + [ + [input_i for _ in range(len(time_step_durations))] + for input_i in list_inputs + ] + ) + # get the model Ci, Ch, Ria, Rih, Aw, min_rel_heat, x0 = get_multi_ode_model_data() A, B, _, _ = two_node_model(Ci, Ch, Ria, Rih, Aw, min_rel_heat) ds = dynsys.OutputlessSystem( - time_interval_durations=time_step_durations, - A=A, - B=B) - + time_interval_durations=time_step_durations, A=A, B=B + ) + # define the initial conditions X, Y = ds.simulate(U, x0) assert Y == None assert isinstance(X, np.ndarray) - + # ************************************************************************* # ************************************************************************* - + def test_stateless_system_object_integration(self): - integrate_outputs = True method_stateless_system_object(integrate_outputs) - + # ************************************************************************* # ************************************************************************* - + def test_stateless_system_object_no_integration(self): - integrate_outputs = False method_stateless_system_object(integrate_outputs) - + # ************************************************************************* # ************************************************************************* def test_incompatible_BD_matrices(self): - integrate_y = True - + # B and D must have the same number of columns - + time_step_durations = [1, 1, 1] - + A = np.array([[4, 9], [1, 3]]) - + B = np.array([[5, 6, 1], [7, 2, 1]]) - + C = np.array([[4, 6]]) - + D = np.array([[3, 6]]) - + # test - + number_errors = 0 - + try: _ = dynsys.DynamicSystem( time_interval_durations=time_step_durations, @@ -450,35 +466,35 @@ class TestDynsys(): B=B, C=C, D=D, - integrate_outputs=integrate_y) + integrate_outputs=integrate_y, + ) except ValueError: number_errors += 1 - + assert number_errors == 1 - + # ************************************************************************* # ************************************************************************* - + def test_incompatible_AC_matrices(self): - integrate_y = True - + # A and C must have the same number of columns - + time_step_durations = [1, 1, 1] - + A = np.array([[4, 9], [1, 3]]) - + B = np.array([[5, 6, 1], [7, 2, 1]]) - + C = np.array([[4]]) - + D = np.array([[3, 6, 3]]) - + # test - + number_errors = 0 - + try: _ = dynsys.DynamicSystem( time_interval_durations=time_step_durations, @@ -486,35 +502,35 @@ class TestDynsys(): B=B, C=C, D=D, - integrate_outputs=integrate_y) + integrate_outputs=integrate_y, + ) except ValueError: number_errors += 1 - + assert number_errors == 1 - + # ***************************************************************************** # ***************************************************************************** - + def test_incompatible_CD_matrices(self): - integrate_y = True - + # C and D must have the same number of rows - + time_step_durations = [1, 1, 8] - + A = np.array([[4, 9], [1, 3]]) - + B = np.array([[5, 6, 1], [7, 2, 1]]) - + C = np.array([[4, 9], [1, 3]]) - + D = np.array([[3, 6, 3]]) - + # test - + number_errors = 0 - + try: _ = dynsys.DynamicSystem( time_interval_durations=time_step_durations, @@ -522,35 +538,35 @@ class TestDynsys(): B=B, C=C, D=D, - integrate_outputs=integrate_y) + integrate_outputs=integrate_y, + ) except ValueError: number_errors += 1 - + assert number_errors == 1 - + # ************************************************************************* # ************************************************************************* - + def test_incompatible_AB_matrices(self): - integrate_y = True - + # A and B must have the same number of rows - + time_step_durations = [1, 1, 8] - - A = np.array([[4, 9],[1, 3]]) - + + A = np.array([[4, 9], [1, 3]]) + B = np.array([[3, 6, 3]]) - + C = np.array([[2, 4]]) - + D = np.array([[5, 6, 1]]) - + # test A - + number_errors = 0 - + try: _ = dynsys.DynamicSystem( time_interval_durations=time_step_durations, @@ -558,35 +574,35 @@ class TestDynsys(): B=B, C=C, D=D, - integrate_outputs=integrate_y) + integrate_outputs=integrate_y, + ) except ValueError: number_errors += 1 - + assert number_errors == 1 - + # ************************************************************************* # ************************************************************************* - + def test_nonsquare_A_matrices(self): - integrate_y = True - + # Single non-square A matrix - + time_step_durations = [1, 1, 8] - + A = np.array([[4, 9]]) - + B = np.array([[3]]) - + C = np.array([[2]]) - + D = np.array([[5]]) - + # test A - + number_errors = 0 - + try: _ = dynsys.DynamicSystem( time_interval_durations=time_step_durations, @@ -594,26 +610,27 @@ class TestDynsys(): B=B, C=C, D=D, - integrate_outputs=integrate_y) + integrate_outputs=integrate_y, + ) except ValueError: number_errors += 1 - + assert number_errors == 1 - + # # multiple matrices, at least one of which is non-square - + # A = [np.array([[4]]), np.array([[1, 9]])] - + # B = [np.array([[3]]), np.array([[4]])] - + # C = [np.array([[2]]), np.array([[2]])] - + # D = [np.array([[8]]), np.array([[7]])] - + # # test A - + # number_errors = 0 - + # try: # _ = dynsys.DynamicSystem( # time_interval_durations=time_step_durations, @@ -624,32 +641,31 @@ class TestDynsys(): # integrate_outputs=integrate_y) # except ValueError: # number_errors += 1 - + # assert number_errors == 1 - + # ************************************************************************* # ************************************************************************* - + def test_nonmatching_time_steps_and_matrices(self): - integrate_y = True - + # multiple matrices and time intervals, but more time steps than matrices - + time_step_durations = [1, 1, 8] - - A = [np.array([[4]]),np.array([[1]])] - - B = [np.array([[3]]),np.array([[4]]),np.array([[4]])] - - C = [np.array([[2]]),np.array([[2]]),np.array([[2]])] - - D = [np.array([[8]]),np.array([[7]]),np.array([[7]])] - + + A = [np.array([[4]]), np.array([[1]])] + + B = [np.array([[3]]), np.array([[4]]), np.array([[4]])] + + C = [np.array([[2]]), np.array([[2]]), np.array([[2]])] + + D = [np.array([[8]]), np.array([[7]]), np.array([[7]])] + # test A - + number_errors = 0 - + try: _ = dynsys.DynamicSystem( time_interval_durations=time_step_durations, @@ -657,16 +673,17 @@ class TestDynsys(): B=B, C=C, D=D, - integrate_outputs=integrate_y) + integrate_outputs=integrate_y, + ) except ValueError: number_errors += 1 - + assert number_errors == 1 - + # test B - + number_errors = 0 - + try: _ = dynsys.DynamicSystem( time_interval_durations=time_step_durations, @@ -674,16 +691,17 @@ class TestDynsys(): B=A, C=C, D=D, - integrate_outputs=integrate_y) + integrate_outputs=integrate_y, + ) except ValueError: number_errors += 1 - + assert number_errors == 1 - + # test C - + number_errors = 0 - + try: _ = dynsys.DynamicSystem( time_interval_durations=time_step_durations, @@ -691,16 +709,17 @@ class TestDynsys(): B=B, C=A, D=D, - integrate_outputs=integrate_y) + integrate_outputs=integrate_y, + ) except ValueError: number_errors += 1 - + assert number_errors == 1 - + # test D - + number_errors = 0 - + try: _ = dynsys.DynamicSystem( time_interval_durations=time_step_durations, @@ -708,37 +727,37 @@ class TestDynsys(): B=B, C=C, D=A, - integrate_outputs=integrate_y) + integrate_outputs=integrate_y, + ) except ValueError: number_errors += 1 - + assert number_errors == 1 - + # multiple matrices and time intervals, but more matrices than time steps - + # ************************************************************************* # ************************************************************************* - + def test_unrecognised_matrix_formats(self): - integrate_outputs = False - + # single matrix: matrix as lists of lists - + time_step_durations = 8 - + A = 3 - + B = np.array([[3]]) - + C = np.array([[2]]) - + D = np.array([[8]]) - + # test A - + number_errors = 0 - + try: _ = dynsys.DynamicSystem( time_interval_durations=time_step_durations, @@ -746,16 +765,17 @@ class TestDynsys(): B=B, C=C, D=D, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except TypeError: number_errors += 1 - + assert number_errors == 1 - + # test B - + number_errors = 0 - + try: _ = dynsys.DynamicSystem( time_interval_durations=time_step_durations, @@ -763,16 +783,17 @@ class TestDynsys(): B=A, C=C, D=D, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except TypeError: number_errors += 1 - + assert number_errors == 1 - + # test C - + number_errors = 0 - + try: _ = dynsys.DynamicSystem( time_interval_durations=time_step_durations, @@ -780,16 +801,17 @@ class TestDynsys(): B=B, C=A, D=D, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except TypeError: number_errors += 1 - + assert number_errors == 1 - + # test D - + number_errors = 0 - + try: _ = dynsys.DynamicSystem( time_interval_durations=time_step_durations, @@ -797,30 +819,31 @@ class TestDynsys(): B=B, C=C, D=A, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except TypeError: number_errors += 1 - + assert number_errors == 1 - + # ********************************************************************* - + # multiple matrices: matrix as lists of lists - + time_step_durations = [1, 1] - + A = [[[3]], [[3]]] - - B = [np.array([[3]]),np.array([[4]])] - - C = [np.array([[2]]),np.array([[2]])] - - D = [np.array([[8]]),np.array([[7]])] - + + B = [np.array([[3]]), np.array([[4]])] + + C = [np.array([[2]]), np.array([[2]])] + + D = [np.array([[8]]), np.array([[7]])] + # test A - + number_errors = 0 - + try: _ = dynsys.DynamicSystem( time_interval_durations=time_step_durations, @@ -828,16 +851,17 @@ class TestDynsys(): B=B, C=C, D=D, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except TypeError: number_errors += 1 - + assert number_errors == 1 - + # test B - + number_errors = 0 - + try: _ = dynsys.DynamicSystem( time_interval_durations=time_step_durations, @@ -845,16 +869,17 @@ class TestDynsys(): B=A, C=C, D=D, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except TypeError: number_errors += 1 - + assert number_errors == 1 - + # test C - + number_errors = 0 - + try: _ = dynsys.DynamicSystem( time_interval_durations=time_step_durations, @@ -862,16 +887,17 @@ class TestDynsys(): B=B, C=A, D=D, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except TypeError: number_errors += 1 - + assert number_errors == 1 - + # test D - + number_errors = 0 - + try: _ = dynsys.DynamicSystem( time_interval_durations=time_step_durations, @@ -879,32 +905,32 @@ class TestDynsys(): B=B, C=C, D=A, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except TypeError: number_errors += 1 - + assert number_errors == 1 - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_varying_matrix_sizes_no_integration(self): - integrate_outputs = False - + time_step_durations = [1, 1] - + A1 = np.array([[1]]) - A2 = np.array([[1,3],[4,7]]) - + A2 = np.array([[1, 3], [4, 7]]) + A = [A1, A2] - B = [np.array([[3]]),np.array([[4]])] - C = [np.array([[2]]),np.array([[2]])] - D = [np.array([[8]]),np.array([[7]])] - + B = [np.array([[3]]), np.array([[4]])] + C = [np.array([[2]]), np.array([[2]])] + D = [np.array([[8]]), np.array([[7]])] + # test A number_errors = 0 try: @@ -914,11 +940,12 @@ class TestDynsys(): B=B, C=C, D=D, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except ValueError: number_errors += 1 assert number_errors == 1 - + # test B number_errors = 0 try: @@ -928,11 +955,12 @@ class TestDynsys(): B=A, C=C, D=D, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except ValueError: number_errors += 1 assert number_errors == 1 - + # test C number_errors = 0 try: @@ -942,11 +970,12 @@ class TestDynsys(): B=B, C=A, D=D, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except ValueError: number_errors += 1 assert number_errors == 1 - + # test D number_errors = 0 try: @@ -956,30 +985,30 @@ class TestDynsys(): B=B, C=C, D=A, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except ValueError: number_errors += 1 assert number_errors == 1 - + # ************************************************************************* # ************************************************************************* - + def test_varying_matrix_sizes_integration(self): - integrate_outputs = True - + time_step_durations = [1, 1] - + A1 = np.array([[1]]) - A2 = np.array([[1,3],[4,7]]) - + A2 = np.array([[1, 3], [4, 7]]) + A = [A1, A2] - B = [np.array([[3]]),np.array([[4]])] - C = [np.array([[2]]),np.array([[2]])] - D = [np.array([[8]]),np.array([[7]])] - + B = [np.array([[3]]), np.array([[4]])] + C = [np.array([[2]]), np.array([[2]])] + D = [np.array([[8]]), np.array([[7]])] + # test A - + number_errors = 0 try: _ = dynsys.DynamicSystem( @@ -988,13 +1017,14 @@ class TestDynsys(): B=B, C=C, D=D, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except ValueError: number_errors += 1 assert number_errors == 1 - + # test B - + number_errors = 0 try: _ = dynsys.DynamicSystem( @@ -1003,11 +1033,12 @@ class TestDynsys(): B=A, C=C, D=D, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except ValueError: number_errors += 1 assert number_errors == 1 - + # test C number_errors = 0 try: @@ -1017,13 +1048,14 @@ class TestDynsys(): B=B, C=A, D=D, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except ValueError: number_errors += 1 assert number_errors == 1 - + # test D - + number_errors = 0 try: _ = dynsys.DynamicSystem( @@ -1032,170 +1064,161 @@ class TestDynsys(): B=B, C=C, D=A, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) except ValueError: number_errors += 1 assert number_errors == 1 - + # ************************************************************************* # ************************************************************************* - + def test_dynsys_stateless_multiout(self): - integrate_outputs = False number_outputs = 2 method_dynsys_stateless_multiout(integrate_outputs, number_outputs) - + integrate_outputs = False number_outputs = 1 method_dynsys_stateless_multiout(integrate_outputs, number_outputs) - + integrate_outputs = True number_outputs = 2 method_dynsys_stateless_multiout(integrate_outputs, number_outputs) - + integrate_outputs = True number_outputs = 1 method_dynsys_stateless_multiout(integrate_outputs, number_outputs) - + # integrate_outputs = False # number_outputs = 0 # method_dynsys_stateless_multiout(integrate_outputs, number_outputs) - + # integrate_outputs = True # number_outputs = 0 # method_dynsys_stateless_multiout(integrate_outputs, number_outputs) - + # ************************************************************************* # ************************************************************************* - + def test_dynsys_multiode_multiout(self): - integrate_outputs = False number_outputs = 0 method_dynsys_multiode_multiout(integrate_outputs, number_outputs) - + integrate_outputs = True number_outputs = 0 method_dynsys_multiode_multiout(integrate_outputs, number_outputs) - + integrate_outputs = False number_outputs = 1 method_dynsys_multiode_multiout(integrate_outputs, number_outputs) - + integrate_outputs = True number_outputs = 1 method_dynsys_multiode_multiout(integrate_outputs, number_outputs) - + integrate_outputs = False number_outputs = 2 method_dynsys_multiode_multiout(integrate_outputs, number_outputs) - + integrate_outputs = True number_outputs = 2 method_dynsys_multiode_multiout(integrate_outputs, number_outputs) - + # ************************************************************************* # ************************************************************************* - + def test_dynsys_singleode_multiout(self): - integrate_outputs = True number_outputs = 2 method_dynsys_singleode_multiout(integrate_outputs, number_outputs) - + integrate_outputs = True number_outputs = 1 method_dynsys_singleode_multiout(integrate_outputs, number_outputs) - + integrate_outputs = True number_outputs = 0 method_dynsys_singleode_multiout(integrate_outputs, number_outputs) - + integrate_outputs = False number_outputs = 2 method_dynsys_singleode_multiout(integrate_outputs, number_outputs) - + integrate_outputs = False number_outputs = 1 method_dynsys_singleode_multiout(integrate_outputs, number_outputs) - + integrate_outputs = False number_outputs = 0 method_dynsys_singleode_multiout(integrate_outputs, number_outputs) - + # ************************************************************************* # ************************************************************************* - -def method_dynsys_stateless_multiout(integrate_outputs, number_outputs): + +def method_dynsys_stateless_multiout(integrate_outputs, number_outputs): # ************************************************************************* # ************************************************************************* - + # time steps list_regular_time_steps = [1, 1, 1, 1] list_irregular_time_steps = [1, 1.5, 0.5, 1] assert len(list_regular_time_steps) == len(list_irregular_time_steps) - + # define the inputs list_inputs = [-5, 50, 0.1, 1] # ************************************************************************* # ************************************************************************* - + # generate time varying problem - + list_A_matrices = [] list_B_matrices = [] list_C_matrices = [] list_D_matrices = [] for dt in list_irregular_time_steps: # data - Aw, min_rel_heat = get_stateless_model_data( - relative_amplitude_variation=0.1) + Aw, min_rel_heat = get_stateless_model_data(relative_amplitude_variation=0.1) # matrices - (A_matrix, - B_matrix, - C_matrix, - D_matrix) = stateless_model(Aw, min_rel_heat) + (A_matrix, B_matrix, C_matrix, D_matrix) = stateless_model(Aw, min_rel_heat) if number_outputs == 0: D_matrix = None elif number_outputs != None: - D_matrix = D_matrix[0:number_outputs,:] - + D_matrix = D_matrix[0:number_outputs, :] + list_A_matrices.append(A_matrix) list_B_matrices.append(B_matrix) list_C_matrices.append(C_matrix) list_D_matrices.append(D_matrix) - + if number_outputs == 0: list_D_matrices = None - + # ************************************************************************* # ************************************************************************* - + # generate time invariant problem - + # data - + Aw, min_rel_heat = get_stateless_model_data() - + # matrices - - (A_matrix, - B_matrix, - C_matrix, - D_matrix) = stateless_model(Aw, min_rel_heat) - + + (A_matrix, B_matrix, C_matrix, D_matrix) = stateless_model(Aw, min_rel_heat) + if number_outputs == 0: D_matrix = None elif number_outputs != None: - D_matrix = D_matrix[0:number_outputs,:] - + D_matrix = D_matrix[0:number_outputs, :] + # ************************************************************************* # ************************************************************************* - - # time invariant model, regular time steps + + # time invariant model, regular time steps x, y = example_dynsys_time_invariant( list_regular_time_steps, list_inputs, @@ -1204,14 +1227,15 @@ def method_dynsys_stateless_multiout(integrate_outputs, number_outputs): B_matrix, C_matrix, D_matrix, - x0=None) - + x0=None, + ) + # TODO: check results # ************************************************************************* # ************************************************************************* - - # time invariant model, irregular time steps + + # time invariant model, irregular time steps x, y = example_dynsys_time_invariant( list_irregular_time_steps, list_inputs, @@ -1220,183 +1244,190 @@ def method_dynsys_stateless_multiout(integrate_outputs, number_outputs): B_matrix, C_matrix, D_matrix, - x0=None) - + x0=None, + ) + # TODO: check results + # ***************************************************************************** # ***************************************************************************** - -def method_stateless_system_object(integrate_outputs: bool): + +def method_stateless_system_object(integrate_outputs: bool): # regular time steps time_step_durations = [1, 1, 1, 1] - + # define a sequence of time steps - t = np.array([sum(time_step_durations[0:i]) - for i in range(len(time_step_durations)+1)]) - + t = np.array( + [sum(time_step_durations[0:i]) for i in range(len(time_step_durations) + 1)] + ) + # define the inputs list_inputs = [-5, 50, 0.1, 1] - U = np.array([[input_i for _ in range(len(time_step_durations))] - for input_i in list_inputs]) - + U = np.array( + [[input_i for _ in range(len(time_step_durations))] for input_i in list_inputs] + ) + # get the model Aw, min_rel_heat = get_stateless_model_data() _, _, _, D = stateless_model(Aw, min_rel_heat) ds = dynsys.StatelessSystem( - time_interval_durations=time_step_durations, + time_interval_durations=time_step_durations, D=D, - integrate_outputs=integrate_outputs) - + integrate_outputs=integrate_outputs, + ) + # define the initial conditions X, Y = ds.simulate(U) assert X == None assert isinstance(Y, np.ndarray) if integrate_outputs: - assert Y.shape == (2,len(t)-1) # two outputs + assert Y.shape == (2, len(t) - 1) # two outputs else: - assert Y.shape == (2,len(t)) # two outputs - + assert Y.shape == (2, len(t)) # two outputs + # ********************************************************************* - + # regular time steps (as an int) time_step_durations = [1] - + # define a sequence of time steps - t = np.array([sum(time_step_durations[0:i]) - for i in range(len(time_step_durations)+1)]) - + t = np.array( + [sum(time_step_durations[0:i]) for i in range(len(time_step_durations) + 1)] + ) + # define the inputs list_inputs = [-5, 50, 0.1, 1] - U = np.array([[input_i for _ in range(len(time_step_durations))] - for input_i in list_inputs]) - + U = np.array( + [[input_i for _ in range(len(time_step_durations))] for input_i in list_inputs] + ) + # get the model Aw, min_rel_heat = get_stateless_model_data() _, _, _, D = stateless_model(Aw, min_rel_heat) ds = dynsys.StatelessSystem( - time_interval_durations=time_step_durations[0], + time_interval_durations=time_step_durations[0], D=D, - integrate_outputs=integrate_outputs) - + integrate_outputs=integrate_outputs, + ) + # define the initial conditions X, Y = ds.simulate(U) - # X, Y = ds.simulate(U[:,:-1]) # to avoid having the second - + # X, Y = ds.simulate(U[:,:-1]) # to avoid having the second + assert X == None assert isinstance(Y, np.ndarray) if integrate_outputs: - assert Y.shape == (2,len(t)-1) # two outputs + assert Y.shape == (2, len(t) - 1) # two outputs else: - assert Y.shape == (2,len(t)) # two outputs - + assert Y.shape == (2, len(t)) # two outputs + # ********************************************************************* - + # irregular time steps time_step_durations = [1, 1.5, 0.5, 1] # define a sequence of time steps - t = np.array([sum(time_step_durations[0:i]) - for i in range(len(time_step_durations)+1)]) + t = np.array( + [sum(time_step_durations[0:i]) for i in range(len(time_step_durations) + 1)] + ) # define the inputs list_inputs = [-5, 50, 0.1, 1] - U = np.array([[input_i for _ in range(len(time_step_durations))] - for input_i in list_inputs]) + U = np.array( + [[input_i for _ in range(len(time_step_durations))] for input_i in list_inputs] + ) # get the model Aw, min_rel_heat = get_stateless_model_data() _, _, _, D = stateless_model(Aw, min_rel_heat) ds = dynsys.StatelessSystem( - time_interval_durations=time_step_durations, + time_interval_durations=time_step_durations, D=D, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) # define the initial conditions X, Y = ds.simulate(U) assert X == None assert isinstance(Y, np.ndarray) if integrate_outputs: - assert Y.shape == (2,len(t)-1) # two outputs + assert Y.shape == (2, len(t) - 1) # two outputs else: - assert Y.shape == (2,len(t)) # two outputs - + assert Y.shape == (2, len(t)) # two outputs + + # ***************************************************************************** # ***************************************************************************** -def method_dynsys_multiode_multiout( - integrate_outputs: bool, - number_outputs: int - ): +def method_dynsys_multiode_multiout(integrate_outputs: bool, number_outputs: int): # ************************************************************************* - + # time steps list_regular_time_steps = [1, 1, 1, 1] list_irregular_time_steps = [1, 1.5, 0.5, 1] assert len(list_regular_time_steps) == len(list_irregular_time_steps) - + # define the inputs list_inputs = [-5, 50, 0.1, 1] - + # ************************************************************************* - + # generate time varying problem list_A_matrices = [] list_B_matrices = [] list_C_matrices = [] list_D_matrices = [] - + for dt in list_irregular_time_steps: - # data Ci, Ch, Ria, Rih, Aw, min_rel_heat, x0 = get_multi_ode_model_data( - relative_amplitude_variation=0.1) - + relative_amplitude_variation=0.1 + ) + # matrices - (A_matrix, - B_matrix, - C_matrix, - D_matrix) = two_node_model(Ci, Ch, Ria, Rih, Aw, min_rel_heat) - + (A_matrix, B_matrix, C_matrix, D_matrix) = two_node_model( + Ci, Ch, Ria, Rih, Aw, min_rel_heat + ) + if number_outputs == 0: C_matrix = None D_matrix = None - + elif number_outputs != None: - C_matrix = C_matrix[0:number_outputs,:] - D_matrix = D_matrix[0:number_outputs,:] - + C_matrix = C_matrix[0:number_outputs, :] + D_matrix = D_matrix[0:number_outputs, :] + list_A_matrices.append(A_matrix) list_B_matrices.append(B_matrix) list_C_matrices.append(C_matrix) list_D_matrices.append(D_matrix) - + if number_outputs == 0: list_C_matrices = None list_D_matrices = None - + # ************************************************************************* - + # generate time invariant problem - + # data Ci, Ch, Ria, Rih, Aw, min_rel_heat, x0 = get_multi_ode_model_data() - + # matrices - (A_matrix, - B_matrix, - C_matrix, - D_matrix) = two_node_model(Ci, Ch, Ria, Rih, Aw, min_rel_heat) - + (A_matrix, B_matrix, C_matrix, D_matrix) = two_node_model( + Ci, Ch, Ria, Rih, Aw, min_rel_heat + ) + if number_outputs == 0: C_matrix = None D_matrix = None elif number_outputs != None: - C_matrix = C_matrix[0:number_outputs,:] - D_matrix = D_matrix[0:number_outputs,:] - + C_matrix = C_matrix[0:number_outputs, :] + D_matrix = D_matrix[0:number_outputs, :] + # ************************************************************************* - + # time invariant model, regular time steps - + x_scipy, y_scipy = example_scipy_time_invariant_regular_steps( list_regular_time_steps, list_inputs, @@ -1405,8 +1436,9 @@ def method_dynsys_multiode_multiout( B_matrix, C_matrix, D_matrix, - x0) - + x0, + ) + x, y = example_dynsys_time_invariant( list_regular_time_steps, list_inputs, @@ -1415,19 +1447,20 @@ def method_dynsys_multiode_multiout( B_matrix, C_matrix, D_matrix, - x0) - - for (x_scipy_i, x_i) in zip(x_scipy, x): + x0, + ) + + for x_scipy_i, x_i in zip(x_scipy, x): np.testing.assert_allclose(x_i, x_scipy_i) - + if number_outputs != 0: - for (y_scipy_i, y_i) in zip(y_scipy, y): + for y_scipy_i, y_i in zip(y_scipy, y): np.testing.assert_allclose(y_i, y_scipy_i) - + # ************************************************************************* - + # time invariant model, irregular time steps - + x_scipy, y_scipy = example_scipy_time_invariant_irregular_steps( list_irregular_time_steps, list_inputs, @@ -1436,8 +1469,9 @@ def method_dynsys_multiode_multiout( B_matrix, C_matrix, D_matrix, - x0) - + x0, + ) + x, y = example_dynsys_time_invariant( list_irregular_time_steps, list_inputs, @@ -1446,19 +1480,20 @@ def method_dynsys_multiode_multiout( B_matrix, C_matrix, D_matrix, - x0) - - for (x_scipy_i, x_i) in zip(x_scipy, x): + x0, + ) + + for x_scipy_i, x_i in zip(x_scipy, x): np.testing.assert_allclose(x_i, x_scipy_i) - + if number_outputs != 0: - for (y_scipy_i, y_i) in zip(y_scipy, y): + for y_scipy_i, y_i in zip(y_scipy, y): np.testing.assert_allclose(y_i, y_scipy_i) - + # ************************************************************************* - + # time-varying model, regular time steps - + x_scipy, y_scipy = example_scipy_time_varying( list_regular_time_steps, list_inputs, @@ -1467,8 +1502,9 @@ def method_dynsys_multiode_multiout( list_B_matrices, list_C_matrices, list_D_matrices, - x0) - + x0, + ) + x, y = example_dynsys_time_varying( list_regular_time_steps, list_inputs, @@ -1477,19 +1513,20 @@ def method_dynsys_multiode_multiout( list_B_matrices, list_C_matrices, list_D_matrices, - x0) - - for (x_scipy_i, x_i) in zip(x_scipy, x): + x0, + ) + + for x_scipy_i, x_i in zip(x_scipy, x): np.testing.assert_allclose(x_i, x_scipy_i) - + if number_outputs != 0: - for (y_scipy_i, y_i) in zip(y_scipy, y): + for y_scipy_i, y_i in zip(y_scipy, y): np.testing.assert_allclose(y_i, y_scipy_i) - + # ************************************************************************* - + # time-varying model, irregular time steps - + x_scipy, y_scipy = example_scipy_time_varying( list_irregular_time_steps, list_inputs, @@ -1498,8 +1535,9 @@ def method_dynsys_multiode_multiout( list_B_matrices, list_C_matrices, list_D_matrices, - x0) - + x0, + ) + x, y = example_dynsys_time_varying( list_irregular_time_steps, list_inputs, @@ -1508,123 +1546,122 @@ def method_dynsys_multiode_multiout( list_B_matrices, list_C_matrices, list_D_matrices, - x0) - - for (x_scipy_i, x_i) in zip(x_scipy, x): + x0, + ) + + for x_scipy_i, x_i in zip(x_scipy, x): np.testing.assert_allclose(x_i, x_scipy_i) - + if number_outputs != 0: - for (y_scipy_i, y_i) in zip(y_scipy, y): + for y_scipy_i, y_i in zip(y_scipy, y): np.testing.assert_allclose(y_i, y_scipy_i) - + + # ***************************************************************************** # ***************************************************************************** # test single ODE, multi-output dynamic systems while integrating outputs - + # examples_dynsys_singleode_multiout(True, print_outputs, seed_number) - -# # test single ODE, multi-output dynamic systems without integrating outputs - + +# # test single ODE, multi-output dynamic systems without integrating outputs + # examples_dynsys_singleode_multiout(False, print_outputs, seed_number) - -# # test single-ODE, single-output dynamic systems while integrating outputs - + +# # test single-ODE, single-output dynamic systems while integrating outputs + # examples_dynsys_singleode_multiout(True, print_outputs, seed_number, 1) -# # test single-ODE, single-output dynamic systems without integrating outputs - -# examples_dynsys_singleode_multiout(False, print_outputs, seed_number, 1) - -# # ************************************************************************* - +# # test single-ODE, single-output dynamic systems without integrating outputs + +# examples_dynsys_singleode_multiout(False, print_outputs, seed_number, 1) + +# # ************************************************************************* + # # outputless - -# # test single-ODE, outputless dynamic systems while integrating outputs - + +# # test single-ODE, outputless dynamic systems while integrating outputs + # examples_dynsys_singleode_multiout(True, print_outputs, seed_number, 0) -# # test single-ODE, outputless dynamic systems without integrating outputs - -# examples_dynsys_singleode_multiout(False, print_outputs, seed_number, 0) +# # test single-ODE, outputless dynamic systems without integrating outputs + +# examples_dynsys_singleode_multiout(False, print_outputs, seed_number, 0) -def method_dynsys_singleode_multiout(integrate_outputs: bool, - number_outputs: int = 2): +def method_dynsys_singleode_multiout(integrate_outputs: bool, number_outputs: int = 2): # ************************************************************************* - + # time steps list_regular_time_steps = [1, 1, 1, 1] list_irregular_time_steps = [1, 1.5, 0.5, 1] assert len(list_regular_time_steps) == len(list_irregular_time_steps) - + # define the inputs list_inputs = [-5, 50, 0.1, 1] - + # ************************************************************************* - + # generate time varying problem list_A_matrices = [] list_B_matrices = [] list_C_matrices = [] list_D_matrices = [] - + for dt in list_irregular_time_steps: - # data - + Ci, Ria, Aw, min_rel_heat, x0 = get_single_ode_model_data( - relative_amplitude_variation=0.1) - + relative_amplitude_variation=0.1 + ) + # matrices - - (A_matrix, - B_matrix, - C_matrix, - D_matrix) = single_node_model(Ci, Ria, Aw, min_rel_heat) - + + (A_matrix, B_matrix, C_matrix, D_matrix) = single_node_model( + Ci, Ria, Aw, min_rel_heat + ) + if number_outputs == 0: C_matrix = None D_matrix = None elif number_outputs != None: - C_matrix = C_matrix[0:number_outputs,:] - D_matrix = D_matrix[0:number_outputs,:] - + C_matrix = C_matrix[0:number_outputs, :] + D_matrix = D_matrix[0:number_outputs, :] + list_A_matrices.append(A_matrix) list_B_matrices.append(B_matrix) list_C_matrices.append(C_matrix) list_D_matrices.append(D_matrix) - + if number_outputs == 0: list_C_matrices = None list_D_matrices = None - + # ************************************************************************* - + # generate time invariant problem - + # data - + Ci, Ria, Aw, min_rel_heat, x0 = get_single_ode_model_data() - + # matrices - - (A_matrix, - B_matrix, - C_matrix, - D_matrix) = single_node_model(Ci, Ria, Aw, min_rel_heat) + + (A_matrix, B_matrix, C_matrix, D_matrix) = single_node_model( + Ci, Ria, Aw, min_rel_heat + ) if number_outputs == 0: C_matrix = None D_matrix = None elif number_outputs != None: - C_matrix = C_matrix[0:number_outputs,:] - D_matrix = D_matrix[0:number_outputs,:] - + C_matrix = C_matrix[0:number_outputs, :] + D_matrix = D_matrix[0:number_outputs, :] + # ************************************************************************* - + # time invariant model, regular time steps - + x_scipy, y_scipy = example_scipy_time_invariant_regular_steps( list_regular_time_steps, list_inputs, @@ -1633,8 +1670,9 @@ def method_dynsys_singleode_multiout(integrate_outputs: bool, B_matrix, C_matrix, D_matrix, - x0) - + x0, + ) + x, y = example_dynsys_time_invariant( list_regular_time_steps, list_inputs, @@ -1643,19 +1681,20 @@ def method_dynsys_singleode_multiout(integrate_outputs: bool, B_matrix, C_matrix, D_matrix, - x0) - - for (x_scipy_i, x_i) in zip(x_scipy, x): + x0, + ) + + for x_scipy_i, x_i in zip(x_scipy, x): np.testing.assert_allclose(x_i, x_scipy_i) - + if number_outputs != 0: - for (y_scipy_i, y_i) in zip(y_scipy, y): + for y_scipy_i, y_i in zip(y_scipy, y): np.testing.assert_allclose(y_i, y_scipy_i) - + # ************************************************************************* - + # time invariant model, irregular time steps - + x_scipy, y_scipy = example_scipy_time_invariant_irregular_steps( list_irregular_time_steps, list_inputs, @@ -1664,8 +1703,9 @@ def method_dynsys_singleode_multiout(integrate_outputs: bool, B_matrix, C_matrix, D_matrix, - x0) - + x0, + ) + x, y = example_dynsys_time_invariant( list_irregular_time_steps, list_inputs, @@ -1674,19 +1714,20 @@ def method_dynsys_singleode_multiout(integrate_outputs: bool, B_matrix, C_matrix, D_matrix, - x0) - - for (x_scipy_i, x_i) in zip(x_scipy, x): + x0, + ) + + for x_scipy_i, x_i in zip(x_scipy, x): np.testing.assert_allclose(x_i, x_scipy_i) - + if number_outputs != 0: - for (y_scipy_i, y_i) in zip(y_scipy, y): + for y_scipy_i, y_i in zip(y_scipy, y): np.testing.assert_allclose(y_i, y_scipy_i) - + # ************************************************************************* - + # time-varying model, regular time steps - + x_scipy, y_scipy = example_scipy_time_varying( list_regular_time_steps, list_inputs, @@ -1695,8 +1736,9 @@ def method_dynsys_singleode_multiout(integrate_outputs: bool, list_B_matrices, list_C_matrices, list_D_matrices, - x0) - + x0, + ) + x, y = example_dynsys_time_varying( list_regular_time_steps, list_inputs, @@ -1705,19 +1747,20 @@ def method_dynsys_singleode_multiout(integrate_outputs: bool, list_B_matrices, list_C_matrices, list_D_matrices, - x0) - - for (x_scipy_i, x_i) in zip(x_scipy, x): + x0, + ) + + for x_scipy_i, x_i in zip(x_scipy, x): np.testing.assert_allclose(x_i, x_scipy_i) - + if number_outputs != 0: - for (y_scipy_i, y_i) in zip(y_scipy, y): + for y_scipy_i, y_i in zip(y_scipy, y): np.testing.assert_allclose(y_i, y_scipy_i) - + # ************************************************************************* - + # time-varying model, irregular time steps - + x_scipy, y_scipy = example_scipy_time_varying( list_irregular_time_steps, list_inputs, @@ -1726,8 +1769,9 @@ def method_dynsys_singleode_multiout(integrate_outputs: bool, list_B_matrices, list_C_matrices, list_D_matrices, - x0) - + x0, + ) + x, y = example_dynsys_time_varying( list_irregular_time_steps, list_inputs, @@ -1736,496 +1780,506 @@ def method_dynsys_singleode_multiout(integrate_outputs: bool, list_B_matrices, list_C_matrices, list_D_matrices, - x0) - - for (x_scipy_i, x_i) in zip(x_scipy, x): + x0, + ) + + for x_scipy_i, x_i in zip(x_scipy, x): np.testing.assert_allclose(x_i, x_scipy_i) - + if number_outputs != 0: - for (y_scipy_i, y_i) in zip(y_scipy, y): + for y_scipy_i, y_i in zip(y_scipy, y): np.testing.assert_allclose(y_i, y_scipy_i) - + + # ***************************************************************************** # ***************************************************************************** + def example_scipy_time_invariant_regular_steps( - time_step_durations: list, - inputs_list: list, - integrate_outputs: bool, - A_matrix, - B_matrix, - C_matrix, - D_matrix, - x0): - + time_step_durations: list, + inputs_list: list, + integrate_outputs: bool, + A_matrix, + B_matrix, + C_matrix, + D_matrix, + x0, +): # define a sequence of time steps - - t = np.array([sum(time_step_durations[0:i]) - for i in range(len(time_step_durations)+1)]) - + + t = np.array( + [sum(time_step_durations[0:i]) for i in range(len(time_step_durations) + 1)] + ) + # define the inputs - + u = np.array([inputs_list for dt in t]) - + if type(C_matrix) == type(None) and type(D_matrix) == type(None): + ss = StateSpace( + A_matrix, + B_matrix, + np.zeros((1, A_matrix.shape[1])), + np.zeros((1, B_matrix.shape[1])), + ) - ss = StateSpace(A_matrix, - B_matrix, - np.zeros((1,A_matrix.shape[1])), - np.zeros((1,B_matrix.shape[1]))) - else: - ss = StateSpace(A_matrix, B_matrix, C_matrix, D_matrix) - + # simulate the system response - + tout, yout, xout = lsim(ss, U=u, T=t, X0=x0) - + # convert to matrix format if there is only one dimension - + if len(xout.shape) == 1: - xout = np.array([xout]).T - + if len(yout.shape) == 1: - yout = np.array([yout]).T - + if integrate_outputs: - # yout's shape should be: (number of time steps, 2) - + # ignore the first instant - - yout = yout[1:,:] - + + yout = yout[1:, :] + yout_m, yout_n = yout.shape - + # multiply each output by the respective time step - + yout = np.array( - [[yout[m,n]*time_step_durations[n] for n in range(yout_n)] - for m in range(yout_m)] - ) - + [ + [yout[m, n] * time_step_durations[n] for n in range(yout_n)] + for m in range(yout_m) + ] + ) + # note: the code above should not work when C != 0 - + return xout.T, yout.T - + + # ***************************************************************************** # ***************************************************************************** + def example_scipy_time_invariant_irregular_steps( - time_step_durations: list, - inputs_list: list, - integrate_outputs: bool, - A_matrix, - B_matrix, - C_matrix, - D_matrix, - x0): - + time_step_durations: list, + inputs_list: list, + integrate_outputs: bool, + A_matrix, + B_matrix, + C_matrix, + D_matrix, + x0, +): # define a sequence of time steps - - t = np.array([sum(time_step_durations[0:i]) - for i in range(len(time_step_durations)+1)]) - + + t = np.array( + [sum(time_step_durations[0:i]) for i in range(len(time_step_durations) + 1)] + ) + # define the inputs - + u = np.array([inputs_list for _ in t]) - + # get the model - + if type(C_matrix) == type(None) and type(D_matrix) == type(None): + ss = StateSpace( + A_matrix, + B_matrix, + np.zeros((1, A_matrix.shape[1])), + np.zeros((1, B_matrix.shape[1])), + ) + + yout = np.zeros((1, len(time_step_durations) + 1)) - ss = StateSpace(A_matrix, - B_matrix, - np.zeros((1,A_matrix.shape[1])), - np.zeros((1,B_matrix.shape[1]))) - - yout = np.zeros((1, len(time_step_durations)+1)) - else: - ss = StateSpace(A_matrix, B_matrix, C_matrix, D_matrix) - - yout = np.zeros((C_matrix.shape[0], len(time_step_durations)+1)) - + + yout = np.zeros((C_matrix.shape[0], len(time_step_durations) + 1)) + # declare output variables - - xout = np.zeros((A_matrix.shape[0], len(time_step_durations)+1)) - - #yout = np.zeros((C_matrix.shape[0], len(time_step_durations)+1)) - - xout[:,0] = x0 - + + xout = np.zeros((A_matrix.shape[0], len(time_step_durations) + 1)) + + # yout = np.zeros((C_matrix.shape[0], len(time_step_durations)+1)) + + xout[:, 0] = x0 + # initial output - + if not integrate_outputs: - # do not ignore the first instant - - yout[:,0] = np.dot(ss.D, u[0,:]) - + + yout[:, 0] = np.dot(ss.D, u[0, :]) + # else: - + # yout[:,0] = np.array([0, 0]) - + # for each time step - + for i in range(len(time_step_durations)): - # time step duration - + dt = time_step_durations[i] - + ssd = ss.to_discrete(dt=dt) - + # simulate the system response - - _, ytemp, xtemp = dlsim(ssd, - u=np.array([u[i,:],u[i+1,:]]), - t=np.array([0,dt]), - x0=xout[:,i]) - - xout[:,i+1] = xtemp[1,:] #np.array([0,0]) - - yout[:,i+1] = ytemp[1,:] #np.array([0,0]) - + + _, ytemp, xtemp = dlsim( + ssd, u=np.array([u[i, :], u[i + 1, :]]), t=np.array([0, dt]), x0=xout[:, i] + ) + + xout[:, i + 1] = xtemp[1, :] # np.array([0,0]) + + yout[:, i + 1] = ytemp[1, :] # np.array([0,0]) + if integrate_outputs: - # yout's shape should be: (2,number of time steps) - + # ignore the first instant - - yout = yout[:,1:] - + + yout = yout[:, 1:] + yout_m, yout_n = yout.shape - + # multiply each output by the respective time step - + for m in range(yout_m): - for n in range(yout_n): - if n == 0: - - continue # ignore the first time interval - - yout[m,n] = yout[m,n]*time_step_durations[n] + continue # ignore the first time interval + + yout[m, n] = yout[m, n] * time_step_durations[n] return xout, yout + # ***************************************************************************** # ***************************************************************************** -def example_scipy_time_varying(time_step_durations: list, - inputs_list: list, - integrate_outputs: bool, - A_matrix, - B_matrix, - C_matrix, - D_matrix, - x0): - + +def example_scipy_time_varying( + time_step_durations: list, + inputs_list: list, + integrate_outputs: bool, + A_matrix, + B_matrix, + C_matrix, + D_matrix, + x0, +): # define a sequence of time steps - - t = np.array([sum(time_step_durations[0:i]) - for i in range(len(time_step_durations)+1)]) - + + t = np.array( + [sum(time_step_durations[0:i]) for i in range(len(time_step_durations) + 1)] + ) + # define the inputs - + u = np.array([inputs_list for dt in t]) - + # define the initial conditions - - xout = np.zeros((A_matrix[0].shape[0], len(time_step_durations)+1)) - - xout[:,0] = x0 - + + xout = np.zeros((A_matrix[0].shape[0], len(time_step_durations) + 1)) + + xout[:, 0] = x0 + if type(C_matrix) == type(None) and type(D_matrix) == type(None): - - yout = np.zeros((1, len(time_step_durations)+1)) - + yout = np.zeros((1, len(time_step_durations) + 1)) + else: - - yout = np.zeros((C_matrix[0].shape[0], len(time_step_durations)+1)) - - + yout = np.zeros((C_matrix[0].shape[0], len(time_step_durations) + 1)) + for i, dt in enumerate(time_step_durations): - # get the model - + if type(C_matrix) == type(None) and type(D_matrix) == type(None): + ss = StateSpace( + A_matrix[i], + B_matrix[i], + np.zeros((1, A_matrix[i].shape[1])), + np.zeros((1, B_matrix[i].shape[1])), + ) - ss = StateSpace(A_matrix[i], - B_matrix[i], - np.zeros((1,A_matrix[i].shape[1])), - np.zeros((1,B_matrix[i].shape[1]))) - else: - ss = StateSpace(A_matrix[i], B_matrix[i], C_matrix[i], D_matrix[i]) - + if i == 0 and not integrate_outputs: - # compute the initial output - - yout[:,0] = np.dot(ss.D, u[0,:]) - + + yout[:, 0] = np.dot(ss.D, u[0, :]) + ssd = ss.to_discrete(dt=dt) - + # simulate the system response - + if i == 0: - - _, ytemp, xtemp = dlsim(ssd, - u=np.array([u[i],u[i]]), - t=np.array([0,dt]), - x0=x0) - + _, ytemp, xtemp = dlsim( + ssd, u=np.array([u[i], u[i]]), t=np.array([0, dt]), x0=x0 + ) + else: - - _, ytemp, xtemp = dlsim(ssd, - u=np.array([u[i],u[i]]), - t=np.array([0,dt]), - x0=xtemp[1,:]) - + _, ytemp, xtemp = dlsim( + ssd, u=np.array([u[i], u[i]]), t=np.array([0, dt]), x0=xtemp[1, :] + ) + # assert that the sizes match - - xout[:,i+1] = xtemp[1,:] - - yout[:,i+1] = ytemp[1,:] - + + xout[:, i + 1] = xtemp[1, :] + + yout[:, i + 1] = ytemp[1, :] + if integrate_outputs: - # yout's shape should be: (2,number of time steps) - + # ignore the first instant - - yout = yout[:,1:] - + + yout = yout[:, 1:] + yout_m, yout_n = yout.shape - + # multiply each output by the respective time step - + for m in range(yout_m): - for n in range(yout_n): - if n == 0: - - continue # ignore the first time interval - - yout[m,n] = yout[m,n]*time_step_durations[n] - + continue # ignore the first time interval + + yout[m, n] = yout[m, n] * time_step_durations[n] + return xout, yout + # ***************************************************************************** # ***************************************************************************** -def example_dynsys_time_invariant(time_step_durations: list, - inputs_list: list, - integrate_outputs: bool, - A_matrix, - B_matrix, - C_matrix, - D_matrix, - x0): - + +def example_dynsys_time_invariant( + time_step_durations: list, + inputs_list: list, + integrate_outputs: bool, + A_matrix, + B_matrix, + C_matrix, + D_matrix, + x0, +): # define a sequence of time steps - - t = np.array([sum(time_step_durations[0:i]) - for i in range(len(time_step_durations)+1)]) - + + t = np.array( + [sum(time_step_durations[0:i]) for i in range(len(time_step_durations) + 1)] + ) + # define the inputs - + # U = np.array([[input_i for dt in t] # for input_i in inputs_list]) - - U = np.array([[input_i for _ in range(len(time_step_durations))] - for input_i in inputs_list]) - + + U = np.array( + [[input_i for _ in range(len(time_step_durations))] for input_i in inputs_list] + ) + # get the model - + ds = dynsys.DynamicSystem( time_interval_durations=time_step_durations, A=A_matrix, B=B_matrix, C=C_matrix, D=D_matrix, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) # define the initial conditions - + X, Y = ds.simulate(U, x0) - + return X, Y + # ***************************************************************************** # ***************************************************************************** -def example_dynsys_time_varying(time_step_durations: list, - inputs_list: list, - integrate_outputs: bool, - A_matrix, - B_matrix, - C_matrix, - D_matrix, - x0): - + +def example_dynsys_time_varying( + time_step_durations: list, + inputs_list: list, + integrate_outputs: bool, + A_matrix, + B_matrix, + C_matrix, + D_matrix, + x0, +): # define a sequence of time steps - - t = np.array([sum(time_step_durations[0:i]) - for i in range(len(time_step_durations)+1)]) - + + t = np.array( + [sum(time_step_durations[0:i]) for i in range(len(time_step_durations) + 1)] + ) + # define the inputs - + # U = np.array([[input_i for dt in t] # for input_i in inputs_list]) - - U = np.array([[input_i for _ in range(len(time_step_durations))] - for input_i in inputs_list]) - + + U = np.array( + [[input_i for _ in range(len(time_step_durations))] for input_i in inputs_list] + ) + ds = dynsys.DynamicSystem( time_interval_durations=time_step_durations, A=A_matrix, B=B_matrix, C=C_matrix, D=D_matrix, - integrate_outputs=integrate_outputs) + integrate_outputs=integrate_outputs, + ) # define the initial conditions - + X, Y = ds.simulate(U, x0) - - return X, Y + + return X, Y + # ***************************************************************************** # ***************************************************************************** + def get_stateless_model_data(relative_amplitude_variation: float = 0.0): - mrh_deviation = random.random() - 0.5 - - Aw = 6.22 # original: 6.22 m2 - - min_rel_heat = 0.2*(1+relative_amplitude_variation*mrh_deviation) - + + Aw = 6.22 # original: 6.22 m2 + + min_rel_heat = 0.2 * (1 + relative_amplitude_variation * mrh_deviation) + return Aw, min_rel_heat - + + # ***************************************************************************** # ***************************************************************************** + def get_single_ode_model_data(relative_amplitude_variation: float = 0.0): - # define how the coefficients change - + Ria_deviation = random.random() - 0.5 - + # define the (A, B, C and D) matrices # A: n*n # B: n*m # C: r*n # D: r*m - - Ci = 1.360*3600000 - Ria = ( - (1+relative_amplitude_variation*Ria_deviation)*5.31/3600000 - ) + + Ci = 1.360 * 3600000 + Ria = (1 + relative_amplitude_variation * Ria_deviation) * 5.31 / 3600000 Aw = 6.22 - + min_rel_heat = 0.2 - + x0 = np.array([20]) - + return Ci, Ria, Aw, min_rel_heat, x0 - + + # ***************************************************************************** # ***************************************************************************** + def get_multi_ode_model_data(relative_amplitude_variation: float = 0.0): - # define how the coefficients change - + Rih_deviation = random.random() - 0.5 - + Ria_deviation = random.random() - 0.5 - + # define the (A, B, C and D) matrices # A: n*n # B: n*m # C: r*n # D: r*m - + # from Bacher and Madsen (2011): model TiTh - - Ci = 1.360*3600000 # original: 1.36 kWh/ºC - Ch = 0.309*3600000 # original: 0.309 kWh/ºC + + Ci = 1.360 * 3600000 # original: 1.36 kWh/ºC + Ch = 0.309 * 3600000 # original: 0.309 kWh/ºC Ria = ( - (1+relative_amplitude_variation*Ria_deviation)*5.31/3600000 - ) # original: 5.31 ºC/kWh + (1 + relative_amplitude_variation * Ria_deviation) * 5.31 / 3600000 + ) # original: 5.31 ºC/kWh Rih = ( - (1+relative_amplitude_variation*Rih_deviation)*0.639/3600000 - ) # original: 0.639 ºC/kWh - Aw = 6.22 # original: 6.22 m2 - + (1 + relative_amplitude_variation * Rih_deviation) * 0.639 / 3600000 + ) # original: 0.639 ºC/kWh + Aw = 6.22 # original: 6.22 m2 + min_rel_heat = 0.2 - + x0 = np.array([20, 20]) - + return Ci, Ch, Ria, Rih, Aw, min_rel_heat, x0 - + + # ***************************************************************************** # ***************************************************************************** + def stateless_model(Aw, min_rel_heat): - # inputs: Ta, phi_s, phi_h above the minimum, phi_h status # outputs: solar irradiance, heat - - d = np.array([[0, Aw, 0, 0], - [0, 0, (1-min_rel_heat), min_rel_heat]]) - + + d = np.array([[0, Aw, 0, 0], [0, 0, (1 - min_rel_heat), min_rel_heat]]) + return None, None, None, d - + + # ***************************************************************************** # ***************************************************************************** + def single_node_model(Ci, Ria, Aw, min_rel_heat): - # states: Ti and Th # inputs: Ta, phi_s, phi_h above the minimum, phi_h status # outputs: solar irradiance, heat - - a = np.array([[-1/(Ria*Ci)]]) - b = np.array([[1/(Ci*Ria), Aw/Ci, (1-min_rel_heat)/Ci, min_rel_heat/Ci]]) - c = np.array([[0],[0]]) - d = np.array([[0, Aw, 0, 0], - [0, 0, (1-min_rel_heat), min_rel_heat]]) - + + a = np.array([[-1 / (Ria * Ci)]]) + b = np.array( + [[1 / (Ci * Ria), Aw / Ci, (1 - min_rel_heat) / Ci, min_rel_heat / Ci]] + ) + c = np.array([[0], [0]]) + d = np.array([[0, Aw, 0, 0], [0, 0, (1 - min_rel_heat), min_rel_heat]]) + return a, b, c, d - + + # ***************************************************************************** # ***************************************************************************** + def two_node_model(Ci, Ch, Ria, Rih, Aw, min_rel_heat): - # states: Ti and Th # inputs: Ta, phi_s, phi_h above the minimum, phi_h status # outputs: solar irradiance, heat - - a = np.array([[-(1/Rih+1/Ria)/Ci, 1/(Ci*Rih)], - [1/(Ch*Rih), -1/(Ch*Rih)]]) - b = np.array([[1/(Ci*Ria), Aw/Ci, 0, 0], - [0, 0, (1-min_rel_heat)/Ch, min_rel_heat/Ch]]) - c = np.array([[0, 0], - [0, 0]]) - d = np.array([[0, Aw, 0, 0], - [0, 0, (1-min_rel_heat), min_rel_heat]]) - + + a = np.array( + [[-(1 / Rih + 1 / Ria) / Ci, 1 / (Ci * Rih)], [1 / (Ch * Rih), -1 / (Ch * Rih)]] + ) + b = np.array( + [ + [1 / (Ci * Ria), Aw / Ci, 0, 0], + [0, 0, (1 - min_rel_heat) / Ch, min_rel_heat / Ch], + ] + ) + c = np.array([[0, 0], [0, 0]]) + d = np.array([[0, Aw, 0, 0], [0, 0, (1 - min_rel_heat), min_rel_heat]]) + return a, b, c, d - + + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/tests/test_esipp_network.py b/tests/test_esipp_network.py index 0959c95..414ddbe 100644 --- a/tests/test_esipp_network.py +++ b/tests/test_esipp_network.py @@ -25,2037 +25,2123 @@ from src.topupopt.problems.esipp.resource import ResourcePrice # TODO: add test for undirected arcs involving import and export nodes + class TestNetwork: - # ************************************************************************* # ************************************************************************* def test_tree_topology(self): - # create a network object with a tree topology - + tree_network = binomial_tree(3, create_using=MultiDiGraph) - + network = Network(tree_network) - + for edge_key in network.edges(keys=True): - arc = ArcsWithoutLosses( - name=str(edge_key), - capacity=[5, 10], - minimum_cost=[3, 6], - specific_capacity_cost=0, - capacity_is_instantaneous=False - ) - - network.add_edge( - *edge_key, - **{Network.KEY_ARC_TECH: arc} - ) - + name=str(edge_key), + capacity=[5, 10], + minimum_cost=[3, 6], + specific_capacity_cost=0, + capacity_is_instantaneous=False, + ) + + network.add_edge(*edge_key, **{Network.KEY_ARC_TECH: arc}) + # assert that it does not have a tree topology - + assert not network.has_tree_topology() - + # select all the nodes - + for edge_key in network.edges(keys=True): - - network.edges[edge_key][ - Network.KEY_ARC_TECH].options_selected[0] = True - + network.edges[edge_key][Network.KEY_ARC_TECH].options_selected[0] = True + # assert that it has a tree topology - + assert network.has_tree_topology() - + # ************************************************************************* # ************************************************************************* - + def test_arc_technologies_static_losses(self): - # ********************************************************************* # ********************************************************************* - + number_time_intervals = 3 number_scenarios = 2 number_options = 4 - + efficiency_dict = { - (q,k): 0.95 + (q, k): 0.95 for q in range(number_scenarios) for k in range(number_time_intervals) - } - + } + static_loss_dict = { - (h,q,k): 1 + (h, q, k): 1 for h in range(number_options) for q in range(number_scenarios) for k in range(number_time_intervals) - } - + } + for capacity_is_instantaneous in (True, False): - arc_tech = Arcs( - name='any', - efficiency=efficiency_dict, + name="any", + efficiency=efficiency_dict, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss=static_loss_dict, - validate=True - ) - + validate=True, + ) + assert arc_tech.has_proportional_losses() - + assert arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + # isotropic - + arc_tech = Arcs( - name='any', - efficiency=None, + name="any", + efficiency=None, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss=static_loss_dict, - validate=True - ) - + validate=True, + ) + assert not arc_tech.has_proportional_losses() - + assert arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + # create arc technology with only one option - + arc_tech = Arcs( - name='any', - efficiency=efficiency_dict, + name="any", + efficiency=efficiency_dict, efficiency_reverse=None, capacity=(1,), minimum_cost=(1,), - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss={ - (0,q,k): 1 - #for h in range(number_options) + (0, q, k): 1 + # for h in range(number_options) for q in range(number_scenarios) for k in range(number_time_intervals) - }, - validate=True - ) - + }, + validate=True, + ) + assert arc_tech.has_proportional_losses() - + assert arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + # create arc technology for one time interval - + arc_tech = Arcs( - name='any', + name="any", efficiency={ (q, 0): 0.5 for q in range(number_scenarios) - #for k in range(number_time_intervals) - }, + # for k in range(number_time_intervals) + }, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss={ - (h,q,0): 1 + (h, q, 0): 1 for h in range(number_options) for q in range(number_scenarios) - #for k in range(number_time_intervals) - }, - validate=True - ) - + # for k in range(number_time_intervals) + }, + validate=True, + ) + assert arc_tech.has_proportional_losses() - + assert arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + # ********************************************************************* - + # TypeError: The static losses should be given as a dict or None. - + error_triggered = False try: _ = Arcs( - name='any', - efficiency=None, + name="any", + efficiency=None, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss=tuple( - [k for k in range(number_time_intervals)] - for o in range(number_options)), - validate=True - ) + [k for k in range(number_time_intervals)] + for o in range(number_options) + ), + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - - # ValueError('The static losses should be specified for each arc + + # ValueError('The static losses should be specified for each arc # option.') - + error_triggered = False try: _ = Arcs( - name='any', - efficiency=None, + name="any", + efficiency=None, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss={ - (h, q,): 1 + ( + h, + q, + ): 1 for h in range(number_options) for q in range(number_scenarios) - }, - validate=True - ) + }, + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - + # TypeError('The static losses must be specified via a list of lists.') - + error_triggered = False try: _ = Arcs( - name='any', - efficiency=None, + name="any", + efficiency=None, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss=[ tuple(k for k in range(number_time_intervals)) - for o in range(number_options)], - validate=True - ) + for o in range(number_options) + ], + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ValueError('The static loss values are inconsistent with the number ' # 'of options, scenarios and intervals.') - + error_triggered = False try: arc_tech = Arcs( - name='any', - efficiency=None, + name="any", + efficiency=None, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss={ - (h,q,k): 1 + (h, q, k): 1 for h in range(number_options) for q in range(number_scenarios) - for k in range(number_time_intervals-1) - }, - validate=True - ) - - arc_tech.validate_sizes(number_options=number_options, - number_scenarios=number_scenarios, - number_intervals=[ - number_time_intervals - for _ in range(number_scenarios)]) + for k in range(number_time_intervals - 1) + }, + validate=True, + ) + + arc_tech.validate_sizes( + number_options=number_options, + number_scenarios=number_scenarios, + number_intervals=[ + number_time_intervals for _ in range(number_scenarios) + ], + ) except ValueError: error_triggered = True assert error_triggered - + # TypeError('The static losses were not provided as numbers.') - + error_triggered = False try: _ = Arcs( - name='any', - efficiency=None, + name="any", + efficiency=None, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss={ - (h,q,k): str(3.54) + (h, q, k): str(3.54) for h in range(number_options) for q in range(number_scenarios) for k in range(number_time_intervals) - }, - validate=True - ) + }, + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ValueError('The static losses must be positive or zero.') - + error_triggered = False try: _ = Arcs( - name='any', - efficiency=None, + name="any", + efficiency=None, efficiency_reverse=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, static_loss={ - (h,q,k): -random.randint(0, 1)*random.random() + (h, q, k): -random.randint(0, 1) * random.random() for h in range(number_options) for q in range(number_scenarios) for k in range(number_time_intervals) - }, - validate=True - ) + }, + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - + # TypeError: The static loss dict keys must be tuples - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', + name="hey", efficiency=None, efficiency_reverse=None, - static_loss={k:1 for k in range(number_time_intervals)}, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + static_loss={k: 1 for k in range(number_time_intervals)}, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - - #ValueError( 'The static loss dict keys must be tuples of size 3.') - - error_triggered = False + + # ValueError( 'The static loss dict keys must be tuples of size 3.') + + error_triggered = False try: _ = Arcs( - name='hey', + name="hey", efficiency=None, efficiency_reverse=None, - static_loss={(k,3): 1 for k in range(number_time_intervals)}, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + static_loss={(k, 3): 1 for k in range(number_time_intervals)}, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - + # TypeError(The staticl osses should be given as a dict or None.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=None, + name="hey", + efficiency=None, efficiency_reverse=None, static_loss=[1 for k in range(number_time_intervals)], - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ValueError( # 'No static loss values were provided. There should be one'+ # ' value per option, scenario and time interval.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', + name="hey", efficiency=None, efficiency_reverse=None, - static_loss={}, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + static_loss={}, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - + # ************************************************************************* # ************************************************************************* def test_arc_technologies(self): - # ********************************************************************* - + # create arc technology using instantaneous capacities - + number_scenarios = 2 number_options = 4 number_time_intervals = 3 - + efficiency_dict = { - (q,k): 0.85 + (q, k): 0.85 for q in range(number_scenarios) for k in range(number_time_intervals) - } - + } + for capacity_is_instantaneous in (True, False): - arc_tech = Arcs( - name='any', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="any", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) - + validate=True, + ) + assert arc_tech.has_proportional_losses() - + assert not arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + assert arc_tech.has_constant_efficiency() - + # create arc technology with only one option - + arc_tech = Arcs( - name='any', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="any", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, capacity=(1,), minimum_cost=(1,), - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) - + validate=True, + ) + assert arc_tech.has_proportional_losses() - + assert not arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + assert arc_tech.has_constant_efficiency() - + # create arc technology for one time interval - + arc_tech = Arcs( - name='any', - efficiency={(0,0): 0.95}, - efficiency_reverse=None, + name="any", + efficiency={(0, 0): 0.95}, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) - + validate=True, + ) + assert arc_tech.has_proportional_losses() - + assert not arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + assert arc_tech.has_constant_efficiency() - + # create arc technology for one time interval and isotropic - + arc_tech = Arcs( - name='any', - efficiency={(0,0): 0.95}, - efficiency_reverse={(0,0): 0.95}, + name="any", + efficiency={(0, 0): 0.95}, + efficiency_reverse={(0, 0): 0.95}, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) - + validate=True, + ) + assert arc_tech.has_proportional_losses() - + assert not arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + assert arc_tech.has_constant_efficiency() - + # create arc technology for one time interval and anisotropic - + arc_tech = Arcs( - name='any', - efficiency={(0,0): 0.95}, - efficiency_reverse={(0,0): 1}, + name="any", + efficiency={(0, 0): 0.95}, + efficiency_reverse={(0, 0): 1}, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) - + validate=True, + ) + assert arc_tech.has_proportional_losses() - + assert not arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + assert not arc_tech.has_constant_efficiency() - + # create arc technology for one time interval and anisotropic - + arc_tech = Arcs( - name='any', - efficiency={(0,0): 1}, - efficiency_reverse={(0,0): 0.95}, + name="any", + efficiency={(0, 0): 1}, + efficiency_reverse={(0, 0): 0.95}, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) - + validate=True, + ) + assert arc_tech.has_proportional_losses() - + assert not arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert not arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + assert not arc_tech.has_constant_efficiency() - + # create arc technology for one time interval and anisotropic - + arc_tech = Arcs( - name='any', - efficiency={(0,0): 0.95}, - efficiency_reverse={(0,0): 0.95}, + name="any", + efficiency={(0, 0): 0.95}, + efficiency_reverse={(0, 0): 0.95}, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) - + validate=True, + ) + assert arc_tech.has_proportional_losses() - + assert not arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_been_selected() - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=True) - + assert arc_tech.is_isotropic(reverse_none_means_isotropic=False) - + assert arc_tech.has_constant_efficiency() - + # ***************************************************************** # ***************************************************************** - + # trigger errors - + # TypeError('The name attribute is not hashable.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name=[1,2,3], - efficiency=efficiency_dict, - efficiency_reverse=None, + name=[1, 2, 3], + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - - #TypeError:The efficiency dict keys must be (scenario, interval) tuples - - error_triggered = False + + # TypeError:The efficiency dict keys must be (scenario, interval) tuples + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency={k:1 for k in range(number_time_intervals)}, - efficiency_reverse=None, + name="hey", + efficiency={k: 1 for k in range(number_time_intervals)}, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - - #ValueError( 'The efficiency dict keys must be tuples of size 2.') - - error_triggered = False + + # ValueError( 'The efficiency dict keys must be tuples of size 2.') + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency={(k,3,4) :1 for k in range(number_time_intervals)}, - efficiency_reverse=None, + name="hey", + efficiency={(k, 3, 4): 1 for k in range(number_time_intervals)}, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - + # TypeError(The efficiency should be given as a dict or None.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=[1 for k in range(number_time_intervals)], - efficiency_reverse=None, + name="hey", + efficiency=[1 for k in range(number_time_intervals)], + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # TypeError('The reverse efficiency has to match the nominal'+ # ' one when there are no proportional losses.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=None, - efficiency_reverse={}, + name="hey", + efficiency=None, + efficiency_reverse={}, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # TypeError:'The reverse efficiency should be given as a dict or None.' - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=[1 for k in range(number_time_intervals)], + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=[1 for k in range(number_time_intervals)], static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ValueError( # 'No efficiency values were provided. There should be '+ # 'one value per scenario and time interval.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse={}, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse={}, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - + # ValueError: The keys for the efficiency dicts do not match. - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, + name="hey", + efficiency=efficiency_dict, efficiency_reverse={ - (key[1],key[0]): value - for key, value in efficiency_dict.items()}, + (key[1], key[0]): value + for key, value in efficiency_dict.items() + }, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - + # TypeError: Efficiency values must be provided as numeric types. - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, + name="hey", + efficiency=efficiency_dict, efficiency_reverse={ - (key[0],key[1]): str(value) - for key, value in efficiency_dict.items()}, + (key[0], key[1]): str(value) + for key, value in efficiency_dict.items() + }, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ValueError('Efficiency values must be positive.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, + name="hey", + efficiency=efficiency_dict, efficiency_reverse={ - (key[0],key[1]): -1 - for key, value in efficiency_dict.items()}, + (key[0], key[1]): -1 for key, value in efficiency_dict.items() + }, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - - #TypeError('The capacity should be given as a list or tuple.') - - error_triggered = False + + # TypeError('The capacity should be given as a list or tuple.') + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity={o: 1+o for o in range(number_options)}, - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity={o: 1 + o for o in range(number_options)}, + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # TypeError: The minimum cost values should be given as a list or tuple - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost={o: 1+o for o in range(number_options)}, - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost={o: 1 + o for o in range(number_options)}, + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True - ) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # TypeError: The specific capacity cost was not given as a numeric type - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=[1], + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=[1], capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ValueError:The number of capacity and minimum cost entries must match - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options+1)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options + 1)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - - # ValueError: No entries for capacity and minimum cost were provided. + + # ValueError: No entries for capacity and minimum cost were provided. # At least one option should be provided. - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, capacity=tuple(), minimum_cost=tuple(), - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - - # ValueError: No entries for efficiency were provided. There should be + + # ValueError: No entries for efficiency were provided. There should be # one entry per time interval. - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency={}, - efficiency_reverse=None, + name="hey", + efficiency={}, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - - # ValueError('The number of efficiency values must match the number of + + # ValueError('The number of efficiency values must match the number of # time intervals.') - + arc_tech = Arcs( - name='hey', + name="hey", efficiency={ - (q,k): 0.85 + (q, k): 0.85 for q in range(number_scenarios) - for k in range(number_time_intervals+1) - }, - efficiency_reverse=None, + for k in range(number_time_intervals + 1) + }, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) - - error_triggered = False + validate=True, + ) + + error_triggered = False try: - arc_tech.validate_sizes(number_options=number_options, - number_scenarios=number_scenarios, - number_intervals=[ - number_time_intervals - for _ in range(number_scenarios)]) + arc_tech.validate_sizes( + number_options=number_options, + number_scenarios=number_scenarios, + number_intervals=[ + number_time_intervals for _ in range(number_scenarios) + ], + ) except ValueError: error_triggered = True assert error_triggered - - # ValueError('The number of efficiency values must match the number of + + # ValueError('The number of efficiency values must match the number of # time intervals.') - - error_triggered = False + + error_triggered = False try: arc_tech = Arcs( - name='hey', + name="hey", efficiency={ - (q,k): 0.85 + (q, k): 0.85 for q in range(number_scenarios) for k in range(number_time_intervals) - }, + }, efficiency_reverse={ - (q,k): 0.85 + (q, k): 0.85 for q in range(number_scenarios) - for k in range(number_time_intervals-1) - }, + for k in range(number_time_intervals - 1) + }, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) - arc_tech.validate_sizes(number_options=number_options, - number_scenarios=number_scenarios, - number_intervals=[ - number_time_intervals - for _ in range(number_scenarios)]) + validate=True, + ) + arc_tech.validate_sizes( + number_options=number_options, + number_scenarios=number_scenarios, + number_intervals=[ + number_time_intervals for _ in range(number_scenarios) + ], + ) except ValueError: error_triggered = True assert error_triggered - - # ValueError('The number of capacity values must match the number of + + # ValueError('The number of capacity values must match the number of # options.') - + arc_tech = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options+1)), - minimum_cost=tuple(1+o for o in range(number_options+1)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options + 1)), + minimum_cost=tuple(1 + o for o in range(number_options + 1)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True - ) - - error_triggered = False + validate=True, + ) + + error_triggered = False try: - arc_tech.validate_sizes(number_options=number_options, - number_scenarios=number_scenarios, - number_intervals=[ - number_time_intervals - for _ in range(number_scenarios)]) + arc_tech.validate_sizes( + number_options=number_options, + number_scenarios=number_scenarios, + number_intervals=[ + number_time_intervals for _ in range(number_scenarios) + ], + ) except ValueError: error_triggered = True assert error_triggered - - # ValueError: The minimum cost values are inconsistent with the number + + # ValueError: The minimum cost values are inconsistent with the number # of options. - + arc_tech = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options+1)), - minimum_cost=tuple(1+o for o in range(number_options+1)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options + 1)), + minimum_cost=tuple(1 + o for o in range(number_options + 1)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True - ) - - error_triggered = False + validate=True, + ) + + error_triggered = False try: - arc_tech.validate_sizes(number_options=number_options, - number_scenarios=number_scenarios, - number_intervals=[ - number_time_intervals - for _ in range(number_scenarios)]) + arc_tech.validate_sizes( + number_options=number_options, + number_scenarios=number_scenarios, + number_intervals=[ + number_time_intervals for _ in range(number_scenarios) + ], + ) except ValueError: error_triggered = True assert error_triggered - + # TypeError('Efficiency values must be provided as numeric types.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency={key: str(value) - for key, value in efficiency_dict.items()}, - efficiency_reverse=None, + name="hey", + efficiency={ + key: str(value) for key, value in efficiency_dict.items() + }, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ValueError('Efficiency values must be positive.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency={key: -value*random.randint(0,1) - for key, value in efficiency_dict.items()}, - efficiency_reverse=None, + name="hey", + efficiency={ + key: -value * random.randint(0, 1) + for key, value in efficiency_dict.items() + }, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - + # TypeError('Capacity values must be provided as numeric types.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', + name="hey", efficiency=efficiency_dict, - efficiency_reverse=None, + efficiency_reverse=None, static_loss=None, - capacity=tuple(str(1+o) for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(str(1 + o) for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ValueError('Capacity values must be positive.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(-random.randint(0,1) - for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple( + -random.randint(0, 1) for o in range(number_options) + ), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - + # TypeError('Minimum cost values must be provided as numeric types.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', + name="hey", efficiency=efficiency_dict, - efficiency_reverse=None, - static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(str(1+o) for o in range(number_options)), - specific_capacity_cost=1, + efficiency_reverse=None, + static_loss=None, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(str(1 + o) for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ValueError('Minimum cost values must be positive or zero.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o - for o in range(number_options)), - minimum_cost=tuple(-1 - for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(-1 for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=capacity_is_instantaneous, - validate=True) + validate=True, + ) except ValueError: error_triggered = True assert error_triggered - - # TypeError('The information about capacities being instantaneous or not + + # TypeError('The information about capacities being instantaneous or not # should be given as a boolean variable.') - - error_triggered = False + + error_triggered = False try: _ = Arcs( - name='hey', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="hey", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=1, - validate=True) + validate=True, + ) except TypeError: error_triggered = True assert error_triggered - + # ********************************************************************* # ********************************************************************* - + # Network - + arc_tech_AB = Arcs( - name='AB', - efficiency=efficiency_dict, - efficiency_reverse=None, + name="AB", + efficiency=efficiency_dict, + efficiency_reverse=None, static_loss=None, - capacity=tuple(1+o for o in range(number_options)), - minimum_cost=tuple(1+o for o in range(number_options)), - specific_capacity_cost=1, + capacity=tuple(1 + o for o in range(number_options)), + minimum_cost=tuple(1 + o for o in range(number_options)), + specific_capacity_cost=1, capacity_is_instantaneous=False, - validate=True) - + validate=True, + ) + arc_tech_AB.options_selected[0] = True - + assert arc_tech_AB.number_options() == number_options - + net = Network() - + # add undirected arc - - net.add_undirected_arc( - node_key_a='A', - node_key_b='B', - arcs=arc_tech_AB) - + + net.add_undirected_arc(node_key_a="A", node_key_b="B", arcs=arc_tech_AB) + # add directed arc - - net.add_directed_arc( - node_key_a='A', - node_key_b='B', - arcs=arc_tech_AB) - + + net.add_directed_arc(node_key_a="A", node_key_b="B", arcs=arc_tech_AB) + # add infinite capacity arc - + net.add_infinite_capacity_arc( - node_key_a='C', - node_key_b='D', - efficiency={ - (i, j): 1 - for i in range(3) - for j in range(4)}, - static_loss=None) - + node_key_a="C", + node_key_b="D", + efficiency={(i, j): 1 for i in range(3) for j in range(4)}, + static_loss=None, + ) + # add pre-existing directed arc - + net.add_preexisting_directed_arc( - node_key_a='E', - node_key_b='F', - efficiency=efficiency_dict, - static_loss=None, - capacity=3, - capacity_is_instantaneous=True) - + node_key_a="E", + node_key_b="F", + efficiency=efficiency_dict, + static_loss=None, + capacity=3, + capacity_is_instantaneous=True, + ) + # add pre-existing undirected arc - + net.add_preexisting_undirected_arc( - node_key_a='A', - node_key_b='C', - efficiency=efficiency_dict, - efficiency_reverse=efficiency_dict, - static_loss=None, - capacity=3, - capacity_is_instantaneous=True) - + node_key_a="A", + node_key_b="C", + efficiency=efficiency_dict, + efficiency_reverse=efficiency_dict, + static_loss=None, + capacity=3, + capacity_is_instantaneous=True, + ) + net.modify_network_arc( - node_key_a='A', - node_key_b='C', - arc_key_ab='AC', - data_dict={net.KEY_ARC_TECH: arc_tech_AB, net.KEY_ARC_UND: False}) - + node_key_a="A", + node_key_b="C", + arc_key_ab="AC", + data_dict={net.KEY_ARC_TECH: arc_tech_AB, net.KEY_ARC_UND: False}, + ) + # ********************************************************************* # ********************************************************************* - + # add import node - + imp_resource_price = ResourcePrice( - prices=[random.random() - for k in range(number_time_intervals)], - volumes=[ *[random.random() for k in range(number_time_intervals-1)], None] - ) - - net.add_import_node(node_key='G', prices={(0,0,0): imp_resource_price}) - + prices=[random.random() for k in range(number_time_intervals)], + volumes=[ + *[random.random() for k in range(number_time_intervals - 1)], + None, + ], + ) + + net.add_import_node(node_key="G", prices={(0, 0, 0): imp_resource_price}) + # add export node - + exp_resource_price = ResourcePrice( - prices=[random.random() - for k in range(number_time_intervals)], - volumes=[ *[random.random() for k in range(number_time_intervals-1)], None] - ) - - net.add_export_node(node_key='H', prices={(0,0,0): exp_resource_price}) - - net.add_waypoint_node(node_key='Z') - - base_flow = { - (i,j): random.random() - for i in range(3) - for j in range(4) - } - - net.add_source_sink_node(node_key='Y', base_flow=base_flow) - - base_flow[(2,3)] = random.random() - + prices=[random.random() for k in range(number_time_intervals)], + volumes=[ + *[random.random() for k in range(number_time_intervals - 1)], + None, + ], + ) + + net.add_export_node(node_key="H", prices={(0, 0, 0): exp_resource_price}) + + net.add_waypoint_node(node_key="Z") + + base_flow = {(i, j): random.random() for i in range(3) for j in range(4)} + + net.add_source_sink_node(node_key="Y", base_flow=base_flow) + + base_flow[(2, 3)] = random.random() + net.modify_network_node( - node_key='Y', - node_data={net.KEY_NODE_BASE_FLOW: base_flow} - ) - + node_key="Y", node_data={net.KEY_NODE_BASE_FLOW: base_flow} + ) + net.identify_node_types() - - assert 'Z' in net.waypoint_nodes - - assert 'G' in net.import_nodes - - assert 'H' in net.export_nodes - - assert 'Y' in net.source_sink_nodes - + + assert "Z" in net.waypoint_nodes + + assert "G" in net.import_nodes + + assert "H" in net.export_nodes + + assert "Y" in net.source_sink_nodes + # ************************************************************************* # ************************************************************************* def test_arcs_without_losses(self): - # test arc without (static and proportional) losses - + arc_tech = ArcsWithoutLosses( - name='AB', - capacity=(1,2,3), - minimum_cost=(4,5,6), - specific_capacity_cost=6, + name="AB", + capacity=(1, 2, 3), + minimum_cost=(4, 5, 6), + specific_capacity_cost=6, capacity_is_instantaneous=False, - validate=True - ) - + validate=True, + ) + assert not arc_tech.has_proportional_losses() - + assert not arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert arc_tech.has_constant_efficiency() - + # test arc without static losses - + arc_tech = ArcsWithoutStaticLosses( - name='AB', - efficiency={(0,0):1, (0,1):0.9, (0,2):0.8}, - efficiency_reverse=None, - capacity=(1,2,3), - minimum_cost=(4,5,6), - specific_capacity_cost=6, + name="AB", + efficiency={(0, 0): 1, (0, 1): 0.9, (0, 2): 0.8}, + efficiency_reverse=None, + capacity=(1, 2, 3), + minimum_cost=(4, 5, 6), + specific_capacity_cost=6, capacity_is_instantaneous=False, - validate=True - ) - + validate=True, + ) + assert arc_tech.has_proportional_losses() - + assert not arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert not arc_tech.has_constant_efficiency() - + # test arc without proportional losses - + arc_tech = ArcsWithoutProportionalLosses( - name='AB', - static_loss={(0,0,0):0.1, (0,0,1):0.2, (0,0,2):0.3, - (1,0,0):0.15, (1,0,1):0.25, (1,0,2):0.35, - (2,0,0):0.16, (2,0,1):0.26, (2,0,2):0.36}, - capacity=(1,2,3), - minimum_cost=(4,5,6), - specific_capacity_cost=6, + name="AB", + static_loss={ + (0, 0, 0): 0.1, + (0, 0, 1): 0.2, + (0, 0, 2): 0.3, + (1, 0, 0): 0.15, + (1, 0, 1): 0.25, + (1, 0, 2): 0.35, + (2, 0, 0): 0.16, + (2, 0, 1): 0.26, + (2, 0, 2): 0.36, + }, + capacity=(1, 2, 3), + minimum_cost=(4, 5, 6), + specific_capacity_cost=6, capacity_is_instantaneous=False, - validate=True - ) - + validate=True, + ) + assert not arc_tech.has_proportional_losses() - + assert arc_tech.has_static_losses() - + assert not arc_tech.is_infinite_capacity() - + assert arc_tech.has_constant_efficiency() - + # ************************************************************************* # ************************************************************************* - + def test_modifying_nodes(self): - # ********************************************************************* - + net = Network() - + number_intervals = 3 - + resource_price = ResourcePrice( - prices=[random.random() for k in range(number_intervals)], - volumes=[ - *[random.random() for k in range(number_intervals-1)], None - ] - ) - - base_flow = { - (0,k): random.random() - for k in range(number_intervals)} - + prices=[random.random() for k in range(number_intervals)], + volumes=[*[random.random() for k in range(number_intervals - 1)], None], + ) + + base_flow = {(0, k): random.random() for k in range(number_intervals)} + arc_tech = ArcsWithoutLosses( - name='hello', - capacity=[5], - minimum_cost=[3], - specific_capacity_cost=3, - capacity_is_instantaneous=False - ) - + name="hello", + capacity=[5], + minimum_cost=[3], + specific_capacity_cost=3, + capacity_is_instantaneous=False, + ) + # add isolated import node - - net.add_import_node(node_key='I_iso', - prices={(0,0,0): resource_price}) - + + net.add_import_node(node_key="I_iso", prices={(0, 0, 0): resource_price}) + # add import node with outgoing arcs - - net.add_import_node(node_key='I', - prices={(0,0,0): resource_price}) - + + net.add_import_node(node_key="I", prices={(0, 0, 0): resource_price}) + # add isolated export node - - net.add_import_node(node_key='E_iso', - prices={(0,0,0): resource_price}) - + + net.add_import_node(node_key="E_iso", prices={(0, 0, 0): resource_price}) + # add export node with incoming arcs - - net.add_export_node(node_key='E', - prices={(0,0,0): resource_price}) - + + net.add_export_node(node_key="E", prices={(0, 0, 0): resource_price}) + # add isolated normal node - - net.add_source_sink_node(node_key='A_iso', - base_flow=base_flow) - + + net.add_source_sink_node(node_key="A_iso", base_flow=base_flow) + # add normal node with incoming arcs - - net.add_source_sink_node(node_key='A_in', - base_flow=base_flow) - + + net.add_source_sink_node(node_key="A_in", base_flow=base_flow) + # add normal node with outgoing arcs - - net.add_source_sink_node(node_key='A_out', - base_flow=base_flow) - + + net.add_source_sink_node(node_key="A_out", base_flow=base_flow) + # add normal node with incoming and outgoing arcs - - net.add_source_sink_node(node_key='A', - base_flow=base_flow) - + + net.add_source_sink_node(node_key="A", base_flow=base_flow) + # ********************************************************************* - + # arcs - - net.add_directed_arc(node_key_a='I', - node_key_b='A_in', - arcs=arc_tech) - - net.add_directed_arc(node_key_a='I', - node_key_b='A', - arcs=arc_tech) - - net.add_directed_arc(node_key_a='A_out', - node_key_b='E', - arcs=arc_tech) - - net.add_directed_arc(node_key_a='A', - node_key_b='E', - arcs=arc_tech) - + + net.add_directed_arc(node_key_a="I", node_key_b="A_in", arcs=arc_tech) + + net.add_directed_arc(node_key_a="I", node_key_b="A", arcs=arc_tech) + + net.add_directed_arc(node_key_a="A_out", node_key_b="E", arcs=arc_tech) + + net.add_directed_arc(node_key_a="A", node_key_b="E", arcs=arc_tech) + # ********************************************************************* - + # change I_iso to regular: okay - + net.modify_network_node( - node_key='I_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="I_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='I_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="I_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # change I_iso to export: okay - + net.modify_network_node( - node_key='I_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="I_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='I_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="I_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # change I_iso to waypoint: okay - + net.modify_network_node( - node_key='I_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY}) - + node_key="I_iso", node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY} + ) + # reverse: okay - + net.modify_network_node( - node_key='I_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="I_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # ********************************************************************* - + # change E_iso to regular: okay - + net.modify_network_node( - node_key='E_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="E_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='E_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="E_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # change E_iso to import: okay - + net.modify_network_node( - node_key='E_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="E_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='E_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="E_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # change E_iso to waypoint: okay - + net.modify_network_node( - node_key='E_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY}) - + node_key="E_iso", node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY} + ) + # reverse: okay - + net.modify_network_node( - node_key='E_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="E_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # ********************************************************************* - + # change A_iso to export: okay - + net.modify_network_node( - node_key='A_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="A_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='A_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="A_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # change A_iso to import: okay - + net.modify_network_node( - node_key='A_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="A_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='A_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="A_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # change A_iso to waypoint: okay - + net.modify_network_node( - node_key='A_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY}) - + node_key="A_iso", node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY} + ) + # reverse: okay - + net.modify_network_node( - node_key='A_iso', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="A_iso", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # ********************************************************************* - + # change I to regular: okay - + net.modify_network_node( - node_key='I', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="I", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='I', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="I", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # change I to waypoint: okay - + net.modify_network_node( - node_key='I', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY}) - + node_key="I", node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY} + ) + # reverse: okay - + net.modify_network_node( - node_key='I', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="I", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # ********************************************************************* - + # change E to regular: okay - + net.modify_network_node( - node_key='E', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="E", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='E', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="E", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # change E to waypoint: okay - + net.modify_network_node( - node_key='E', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY}) - + node_key="E", node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY} + ) + # reverse: okay - + net.modify_network_node( - node_key='E', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="E", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # ********************************************************************* - + # change A_in to export: okay - + net.modify_network_node( - node_key='A_in', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="A_in", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='A_in', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="A_in", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # change A_in to waypoint: okay - + net.modify_network_node( - node_key='A_in', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY}) - + node_key="A_in", node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY} + ) + # reverse: okay - + net.modify_network_node( - node_key='A_in', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="A_in", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # ********************************************************************* - + # change A_out to import: okay - + net.modify_network_node( - node_key='A_out', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price}) - + node_key="A_out", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) + # reverse: okay - + net.modify_network_node( - node_key='A_out', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="A_out", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # change A_out to waypoint: okay - + net.modify_network_node( - node_key='A_out', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY}) - + node_key="A_out", node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY} + ) + # reverse: okay - + net.modify_network_node( - node_key='A_out', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, - net.KEY_NODE_BASE_FLOW: base_flow}) - + node_key="A_out", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_SOURCE_SINK, + net.KEY_NODE_BASE_FLOW: base_flow, + }, + ) + # ********************************************************************* - + # change I to export: fail - + error_triggered = False try: net.modify_network_node( - node_key='I', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price} - ) + node_key="I", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) except ValueError: error_triggered = True assert error_triggered - + # change E to import: fail - + error_triggered = False try: net.modify_network_node( - node_key='E', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price} - ) + node_key="E", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) except ValueError: error_triggered = True assert error_triggered - + # change A_out to export: fail - + error_triggered = False try: net.modify_network_node( - node_key='A_out', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price} - ) + node_key="A_out", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) except ValueError: error_triggered = True assert error_triggered - + # change A_in to import: fail - + error_triggered = False try: net.modify_network_node( - node_key='A_in', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price} - ) + node_key="A_in", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) except ValueError: error_triggered = True assert error_triggered - + # change A to export: fail - + error_triggered = False try: net.modify_network_node( - node_key='A', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, - net.KEY_NODE_PRICES: resource_price} - ) + node_key="A", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_EXP, + net.KEY_NODE_PRICES: resource_price, + }, + ) except ValueError: error_triggered = True assert error_triggered - + # change A to import: fail - + error_triggered = False try: net.modify_network_node( - node_key='A', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, - net.KEY_NODE_PRICES: resource_price} - ) + node_key="A", + node_data={ + net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_IMP, + net.KEY_NODE_PRICES: resource_price, + }, + ) except ValueError: error_triggered = True assert error_triggered - + # ********************************************************************* - + # try to modify a non-existent node - + error_triggered = False try: net.modify_network_node( - node_key='ABCD', - node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY} - ) + node_key="ABCD", node_data={net.KEY_NODE_TYPE: net.KEY_NODE_TYPE_WAY} + ) except ValueError: error_triggered = True assert error_triggered - + # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_network_disallowed_cases(self): - # ********************************************************************* - + net = Network() - + number_intervals = 3 - + resource_price = ResourcePrice( - prices=[random.random() for k in range(number_intervals)], - volumes=[ - *[random.random() for k in range(number_intervals-1)], None - ] - ) - - base_flow = { - (0,k): random.random() - for k in range(number_intervals)} - + prices=[random.random() for k in range(number_intervals)], + volumes=[*[random.random() for k in range(number_intervals - 1)], None], + ) + + base_flow = {(0, k): random.random() for k in range(number_intervals)} + lossless_arcs = ArcsWithoutLosses( - name='hello', - capacity=[5], - minimum_cost=[3], - specific_capacity_cost=3, - capacity_is_instantaneous=False - ) - + name="hello", + capacity=[5], + minimum_cost=[3], + specific_capacity_cost=3, + capacity_is_instantaneous=False, + ) + lossy_arcs = ArcsWithoutProportionalLosses( - name='hello back', - static_loss={ - (0,0,k): random.random() - for k in range(number_intervals) - }, + name="hello back", + static_loss={(0, 0, k): random.random() for k in range(number_intervals)}, capacity=(1,), - minimum_cost=(5,), + minimum_cost=(5,), specific_capacity_cost=0, - capacity_is_instantaneous=False - ) - + capacity_is_instantaneous=False, + ) + # add import node I - - net.add_import_node(node_key='I', - prices={(0,0,0): resource_price}) - + + net.add_import_node(node_key="I", prices={(0, 0, 0): resource_price}) + # add export node E - - net.add_export_node(node_key='E', - prices={(0,0,0): resource_price}) - + + net.add_export_node(node_key="E", prices={(0, 0, 0): resource_price}) + # add regular node A - - net.add_source_sink_node(node_key='A', - base_flow=base_flow) - + + net.add_source_sink_node(node_key="A", base_flow=base_flow) + # add regular node B - - net.add_source_sink_node(node_key='B', - base_flow=base_flow) - + + net.add_source_sink_node(node_key="B", base_flow=base_flow) + # add a valid import-export arc - - net.add_directed_arc(node_key_a='I', - node_key_b='E', - arcs=lossless_arcs) - + + net.add_directed_arc(node_key_a="I", node_key_b="E", arcs=lossless_arcs) + # identify the nodes and validate - + net.identify_node_types() - + # ********************************************************************* # ********************************************************************* - + # trigger errors using pre-identified nodes - + # directed arcs cannot start in an export node: E -> B - + error_triggered = False try: - net.add_directed_arc(node_key_a='E', - node_key_b='B', - arcs=lossless_arcs) + net.add_directed_arc(node_key_a="E", node_key_b="B", arcs=lossless_arcs) except ValueError: error_triggered = True assert error_triggered - + # directed arcs cannot end on an import node: A -> I - + error_triggered = False try: - net.add_directed_arc(node_key_a='A', - node_key_b='I', - arcs=lossless_arcs) + net.add_directed_arc(node_key_a="A", node_key_b="I", arcs=lossless_arcs) except ValueError: error_triggered = True assert error_triggered - + # import-export nodes cannot have static losses - + error_triggered = False try: - net.add_directed_arc(node_key_a='I', - node_key_b='E', - arcs=lossy_arcs) + net.add_directed_arc(node_key_a="I", node_key_b="E", arcs=lossy_arcs) except ValueError: error_triggered = True assert error_triggered - + # undirected arcs cannot involve import nor export nodes - + error_triggered = False try: - net.add_undirected_arc(node_key_a='I', - node_key_b='A', - arcs=lossless_arcs) + net.add_undirected_arc(node_key_a="I", node_key_b="A", arcs=lossless_arcs) except ValueError: error_triggered = True assert error_triggered - + # undirected arcs cannot involve import nor export nodes - + error_triggered = False try: - net.add_undirected_arc(node_key_a='B', - node_key_b='E', - arcs=lossless_arcs) + net.add_undirected_arc(node_key_a="B", node_key_b="E", arcs=lossless_arcs) except ValueError: error_triggered = True assert error_triggered - + # ********************************************************************* # ********************************************************************* - + # trigger errors using non-identified nodes - + # ********************************************************************* - + # create a new export node - - net.add_export_node(node_key='E1', - prices={(0,0,0): resource_price}) - + + net.add_export_node(node_key="E1", prices={(0, 0, 0): resource_price}) + # create an arc starting in that export node - + error_triggered = False try: - net.add_directed_arc(node_key_a='E1', - node_key_b='B', - arcs=lossless_arcs) + net.add_directed_arc(node_key_a="E1", node_key_b="B", arcs=lossless_arcs) net.identify_node_types() except ValueError: error_triggered = True assert error_triggered - + # remove the troublesome arc - - net.remove_edge(u='E1', v='B') - + + net.remove_edge(u="E1", v="B") + # ********************************************************************* - + # create a new import node - - net.add_import_node(node_key='I1', - prices={(0,0,0): resource_price}) - + + net.add_import_node(node_key="I1", prices={(0, 0, 0): resource_price}) + # create an arc ending in that import node - + error_triggered = False try: - net.add_directed_arc(node_key_a='A', - node_key_b='I1', - arcs=lossless_arcs) + net.add_directed_arc(node_key_a="A", node_key_b="I1", arcs=lossless_arcs) net.identify_node_types() except ValueError: error_triggered = True assert error_triggered - + # remove the troublesome arc - - net.remove_edge(u='A', v='I1') - + + net.remove_edge(u="A", v="I1") + # ********************************************************************* - + # check non-existent arc - - net.arc_is_undirected(('X','Y', 1)) - + + net.arc_is_undirected(("X", "Y", 1)) + # ************************************************************************* # ************************************************************************* - + def test_pseudo_unique_key_generation(self): - # create network - + network = Network() - + # add node A - - network.add_waypoint_node(node_key='A') - + + network.add_waypoint_node(node_key="A") + # add node B - - network.add_waypoint_node(node_key='B') - + + network.add_waypoint_node(node_key="B") + # identify nodes - + network.identify_node_types() - + # add arcs - - key_list = ['3e225573-4e78-48c8-bb08-efbeeb795c22', - 'f6d30428-15d1-41e9-a952-0742eaaa5a31', - '8c29b906-2518-41c5-ada8-07b83508b5b8', - 'f9a72a39-1422-4a02-af97-906ce79c32a3', - 'b6941a48-10cc-465d-bf53-178bd2939bd1'] - + + key_list = [ + "3e225573-4e78-48c8-bb08-efbeeb795c22", + "f6d30428-15d1-41e9-a952-0742eaaa5a31", + "8c29b906-2518-41c5-ada8-07b83508b5b8", + "f9a72a39-1422-4a02-af97-906ce79c32a3", + "b6941a48-10cc-465d-bf53-178bd2939bd1", + ] + for key in key_list: - network.add_edge( - u_for_edge='A', - v_for_edge='B', + u_for_edge="A", + v_for_edge="B", key=key, - **{network.KEY_ARC_UND: False, - network.KEY_ARC_TECH: None} - ) - + **{network.KEY_ARC_UND: False, network.KEY_ARC_TECH: None} + ) + # use a seed number to trigger more iterations - + import uuid + rand = random.Random() rand.seed(360) uuid.uuid4 = lambda: uuid.UUID(int=rand.getrandbits(128), version=4) - + error_triggered = False try: _ = network.get_pseudo_unique_arc_key( - node_key_start='A', - node_key_end='B', - max_iterations=len(key_list)-1) + node_key_start="A", node_key_end="B", max_iterations=len(key_list) - 1 + ) except Exception: error_triggered = True assert error_triggered + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/tests/test_esipp_problem.py b/tests/test_esipp_problem.py index 1b50747..8fe0e21 100644 --- a/tests/test_esipp_problem.py +++ b/tests/test_esipp_problem.py @@ -8,7 +8,7 @@ from statistics import mean # import numpy as np # import networkx as nx import pyomo.environ as pyo - + # import src.topupopt.problems.esipp.utils as utils from src.topupopt.data.misc.utils import generate_pseudo_unique_key from src.topupopt.problems.esipp.problem import InfrastructurePlanningProblem @@ -20,346 +20,310 @@ from src.topupopt.problems.esipp.problem import is_peak_total_problem # ***************************************************************************** # ***************************************************************************** -class TestESIPPProblem: +class TestESIPPProblem: def build_solve_ipp( - self, - solver: str = 'glpk', - solver_options: dict = None, - use_sos_arcs: bool = False, - arc_sos_weight_key: str = ( - InfrastructurePlanningProblem.SOS1_ARC_WEIGHTS_NONE), - arc_use_real_variables_if_possible: bool = False, - use_sos_sense: bool = False, - sense_sos_weight_key: int = ( - InfrastructurePlanningProblem.SOS1_SENSE_WEIGHT_NOMINAL_HIGHER - ), - sense_use_real_variables_if_possible: bool = False, - sense_use_arc_interfaces: bool = False, - perform_analysis: bool = False, - plot_results: bool = False, - print_solver_output: bool = False, - irregular_time_intervals: bool = False, - networks: dict = None, - number_intraperiod_time_intervals: int = 4, - static_losses_mode = None, - mandatory_arcs: list = None, - max_number_parallel_arcs: dict = None, - arc_groups_dict: dict = None, - init_aux_sets: bool = False, - discount_rates: dict = None, - reporting_periods: dict = None, - time_intervals: dict = None, - assessment_weights: dict = None, - simplify_problem: bool = False): - - reporting_period_duration = 365*24*3600 - + self, + solver: str = "glpk", + solver_options: dict = None, + use_sos_arcs: bool = False, + arc_sos_weight_key: str = (InfrastructurePlanningProblem.SOS1_ARC_WEIGHTS_NONE), + arc_use_real_variables_if_possible: bool = False, + use_sos_sense: bool = False, + sense_sos_weight_key: int = ( + InfrastructurePlanningProblem.SOS1_SENSE_WEIGHT_NOMINAL_HIGHER + ), + sense_use_real_variables_if_possible: bool = False, + sense_use_arc_interfaces: bool = False, + perform_analysis: bool = False, + plot_results: bool = False, + print_solver_output: bool = False, + irregular_time_intervals: bool = False, + networks: dict = None, + number_intraperiod_time_intervals: int = 4, + static_losses_mode=None, + mandatory_arcs: list = None, + max_number_parallel_arcs: dict = None, + arc_groups_dict: dict = None, + init_aux_sets: bool = False, + discount_rates: dict = None, + reporting_periods: dict = None, + time_intervals: dict = None, + assessment_weights: dict = None, + simplify_problem: bool = False, + ): + reporting_period_duration = 365 * 24 * 3600 + if type(discount_rates) != dict: - - discount_rates = { - 0: tuple([0.035, 0.035]) - } - + discount_rates = {0: tuple([0.035, 0.035])} + if type(assessment_weights) != dict: - - assessment_weights = {} # default - + assessment_weights = {} # default + if type(reporting_periods) != dict: - - reporting_periods = {0: (0,1)} - + reporting_periods = {0: (0, 1)} + # time intervals - + if type(time_intervals) != dict: - if irregular_time_intervals: - time_step_max_relative_variation = 0.25 - + intraperiod_time_interval_duration = [ - (reporting_period_duration/number_intraperiod_time_intervals)* - (1+(k/(number_intraperiod_time_intervals-1)-0.5)* - time_step_max_relative_variation) - for k in range(number_intraperiod_time_intervals)] - + (reporting_period_duration / number_intraperiod_time_intervals) + * ( + 1 + + (k / (number_intraperiod_time_intervals - 1) - 0.5) + * time_step_max_relative_variation + ) + for k in range(number_intraperiod_time_intervals) + ] + else: - intraperiod_time_interval_duration = [ - reporting_period_duration/number_intraperiod_time_intervals - for k in range(number_intraperiod_time_intervals)] - + reporting_period_duration / number_intraperiod_time_intervals + for k in range(number_intraperiod_time_intervals) + ] + # average time interval duration - + average_time_interval_duration = round( - mean( - intraperiod_time_interval_duration - ) - ) - - time_intervals = { - 0: tuple(dt for dt in intraperiod_time_interval_duration) - } - + mean(intraperiod_time_interval_duration) + ) + + time_intervals = {0: tuple(dt for dt in intraperiod_time_interval_duration)} + # time weights - + # relative weight of time period - + # one interval twice as long as the average is worth twice # one interval half as long as the average is worth half - + # time_weights = [ - # [time_period_duration/average_time_interval_duration - # for time_period_duration in intraperiod_time_interval_duration] + # [time_period_duration/average_time_interval_duration + # for time_period_duration in intraperiod_time_interval_duration] # for p in range(number_periods)] - - time_weights = None # nothing yet - - normalised_time_interval_duration = None # nothing yet - + + time_weights = None # nothing yet + + normalised_time_interval_duration = None # nothing yet + # create problem object - + ipp = InfrastructurePlanningProblem( - name='problem', - discount_rates=discount_rates, + name="problem", + discount_rates=discount_rates, reporting_periods=reporting_periods, time_intervals=time_intervals, time_weights=time_weights, normalised_time_interval_duration=normalised_time_interval_duration, - assessment_weights=assessment_weights - ) - + assessment_weights=assessment_weights, + ) + # add networks and systems - + for netkey, net in networks.items(): - ipp.add_network(network_key=netkey, network=net) - + # define arcs as mandatory - + if type(mandatory_arcs) == list: - for full_arc_key in mandatory_arcs: - ipp.make_arc_mandatory(full_arc_key[0], full_arc_key[1:]) - + # if make_all_arcs_mandatory: - + # for network_key in ipp.networks: - + # for arc_key in ipp.networks[network_key].edges(keys=True): - + # # preexisting arcs are no good - + # if ipp.networks[network_key].edges[arc_key][ # Network.KEY_ARC_TECH].has_been_selected(): - - # continue - + + # continue + # ipp.make_arc_mandatory(network_key, arc_key) - + # set up the use of sos for arc selection - + if use_sos_arcs: - for network_key in ipp.networks: - for arc_key in ipp.networks[network_key].edges(keys=True): - - if ipp.networks[network_key].edges[arc_key][ - Network.KEY_ARC_TECH].has_been_selected(): - + if ( + ipp.networks[network_key] + .edges[arc_key][Network.KEY_ARC_TECH] + .has_been_selected() + ): continue - + ipp.use_sos1_for_arc_selection( - network_key, + network_key, arc_key, use_real_variables_if_possible=( - arc_use_real_variables_if_possible), - sos1_weight_method=arc_sos_weight_key) - - + arc_use_real_variables_if_possible + ), + sos1_weight_method=arc_sos_weight_key, + ) + # set up the use of sos for flow sense determination - + if use_sos_sense: - for network_key in ipp.networks: - for arc_key in ipp.networks[network_key].edges(keys=True): - if not ipp.networks[network_key].edges[arc_key][ - Network.KEY_ARC_UND]: - + Network.KEY_ARC_UND + ]: continue - + ipp.use_sos1_for_flow_senses( - network_key, + network_key, arc_key, use_real_variables_if_possible=( sense_use_real_variables_if_possible - ), + ), use_interface_variables=sense_use_arc_interfaces, - sos1_weight_method=sense_sos_weight_key) - - elif sense_use_arc_interfaces: # set up the use of arc interfaces w/o sos1 - + sos1_weight_method=sense_sos_weight_key, + ) + + elif sense_use_arc_interfaces: # set up the use of arc interfaces w/o sos1 for network_key in ipp.networks: - for arc_key in ipp.networks[network_key].edges(keys=True): - - if ipp.networks[network_key].edges[arc_key][ - Network.KEY_ARC_TECH].has_been_selected(): - + if ( + ipp.networks[network_key] + .edges[arc_key][Network.KEY_ARC_TECH] + .has_been_selected() + ): continue - - ipp.use_interface_variables_for_arc_selection( - network_key, - arc_key - ) - + + ipp.use_interface_variables_for_arc_selection(network_key, arc_key) + # static losses - + if static_losses_mode == ipp.STATIC_LOSS_MODE_ARR: - ipp.place_static_losses_arrival_node() - + elif static_losses_mode == ipp.STATIC_LOSS_MODE_DEP: - ipp.place_static_losses_departure_node() - + elif static_losses_mode == ipp.STATIC_LOSS_MODE_US: - ipp.place_static_losses_upstream() - + elif static_losses_mode == ipp.STATIC_LOSS_MODE_DS: - ipp.place_static_losses_downstream() - + else: - - raise ValueError('Unknown static loss modelling mode.') - + raise ValueError("Unknown static loss modelling mode.") + # ********************************************************************* - + # groups - + if type(arc_groups_dict) != type(None): - for key in arc_groups_dict: - ipp.create_arc_group(arc_groups_dict[key]) - + # ********************************************************************* - + # maximum number of parallel arcs - + for key in max_number_parallel_arcs: - ipp.set_maximum_number_parallel_arcs( - network_key=key[0], - node_a=key[1], - node_b=key[2], - limit=max_number_parallel_arcs[key]) - + network_key=key[0], + node_a=key[1], + node_b=key[2], + limit=max_number_parallel_arcs[key], + ) + # ********************************************************************* - + if simplify_problem: - ipp = simplify_peak_total_problem(ipp) - + # ********************************************************************* - + # instantiate (disable the default case v-a-v fixed losses) - + # ipp.instantiate(place_fixed_losses_upstream_if_possible=False) - + ipp.instantiate(initialise_ancillary_sets=init_aux_sets) - + # optimise - - ipp.optimise(solver_name=solver, - solver_options=solver_options, - output_options={}, - print_solver_output=print_solver_output) - + + ipp.optimise( + solver_name=solver, + solver_options=solver_options, + output_options={}, + print_solver_output=print_solver_output, + ) + # return the problem object - + return ipp - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_single_network_single_arc_problem(self): - # scenario q = 0 # time number_intervals = 3 # periods number_periods = 2 - + # 2 nodes: one import, one regular mynet = Network() - - # import node + + # import node node_IMP = generate_pseudo_unique_key(mynet.nodes()) mynet.add_import_node( - node_key=node_IMP, + node_key=node_IMP, prices={ - (q,p,k): ResourcePrice( - prices=1.0, - volumes=None - ) + (q, p, k): ResourcePrice(prices=1.0, volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes - + node_A = generate_pseudo_unique_key(mynet.nodes()) - + mynet.add_source_sink_node( - node_key=node_A, - # base_flow=[0.5, 0.0, 1.0], - base_flow={ - (q,0):0.50, - (q,1):0.00, - (q,2):1.00} - ) - + node_key=node_A, + # base_flow=[0.5, 0.0, 1.0], + base_flow={(q, 0): 0.50, (q, 1): 0.00, (q, 2): 1.00}, + ) + # arc IA - + arc_tech_IA = Arcs( - name='any', - #efficiency=[0.5, 0.5, 0.5], - efficiency={ - (q,0): 0.5, - (q,1): 0.5, - (q,2): 0.5 - }, + name="any", + # efficiency=[0.5, 0.5, 0.5], + efficiency={(q, 0): 0.5, (q, 1): 0.5, (q, 2): 0.5}, efficiency_reverse=None, static_loss=None, capacity=[3], minimum_cost=[2], - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=False, - validate=False) - - mynet.add_directed_arc( - node_key_a=node_IMP, - node_key_b=node_A, - arcs=arc_tech_IA) - + validate=False, + ) + + mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = self.build_solve_ipp( # solver=solver, solver_options={}, @@ -371,150 +335,129 @@ class TestESIPPProblem: # sense_use_real_variables_if_possible=sense_use_real_variables_if_possible, # sense_use_arc_interfaces=use_arc_interfaces, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, # irregular_time_intervals=irregular_time_intervals, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, - static_losses_mode=True, # just to reach a line, + static_losses_mode=True, # just to reach a line, mandatory_arcs=[], max_number_parallel_arcs={}, # init_aux_sets=init_aux_sets, - simplify_problem=False - ) - + simplify_problem=False, + ) + assert is_peak_total_problem(ipp) - assert ipp.results['Problem'][0]['Number of constraints'] == 24 - assert ipp.results['Problem'][0]['Number of variables'] == 22 - assert ipp.results['Problem'][0]['Number of nonzeros'] == 49 - + assert ipp.results["Problem"][0]["Number of constraints"] == 24 + assert ipp.results["Problem"][0]["Number of variables"] == 22 + assert ipp.results["Problem"][0]["Number of nonzeros"] == 49 + # ********************************************************************* # ********************************************************************* - + # validation - + # the arc should be installed since it is required for feasibility - assert True in ipp.networks['mynet'].edges[(node_IMP, node_A, 0)][ - Network.KEY_ARC_TECH].options_selected - + assert ( + True + in ipp.networks["mynet"] + .edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # the flows should be 1.0, 0.0 and 2.0 assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[ - ('mynet', node_IMP, node_A, 0, q, 0) - ] - ), + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]), 1.0, - abs_tol=1e-6) + abs_tol=1e-6, + ) assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[ - ('mynet', node_IMP, node_A, 0, q, 1) - ] - ), + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 1)]), 0.0, - abs_tol=1e-6) + abs_tol=1e-6, + ) assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[ - ('mynet', node_IMP, node_A, 0, q, 2) - ] - ), + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 2)]), 2.0, - abs_tol=1e-6) - + abs_tol=1e-6, + ) + # arc amplitude should be two assert math.isclose( - pyo.value( - ipp.instance.var_v_amp_gllj[('mynet', node_IMP, node_A, 0)] - ), + pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]), 2.0, - abs_tol=0.01) - + abs_tol=0.01, + ) + # capex should be four assert math.isclose(pyo.value(ipp.instance.var_capex), 4.0, abs_tol=1e-3) - + # sdncf should be -5.7 - assert math.isclose( - pyo.value(ipp.instance.var_sdncf_q[q]), -5.7, abs_tol=1e-3 - ) - + assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -5.7, abs_tol=1e-3) + # the objective function should be -9.7 assert math.isclose(pyo.value(ipp.instance.obj_f), -9.7, abs_tol=1e-3) - + # ************************************************************************* # ************************************************************************* - + def test_single_network_single_arc_problem_simpler(self): - # scenario q = 0 # time number_intervals = 3 # periods number_periods = 2 - + # 2 nodes: one import, one regular mynet = Network() - - # import node + + # import node # node_IMP = generate_pseudo_unique_key(mynet.nodes()) - node_IMP = 'thatimpnode' + node_IMP = "thatimpnode" mynet.add_import_node( - node_key=node_IMP, + node_key=node_IMP, prices={ - (q,p,k): ResourcePrice( - prices=1.0, - volumes=None - ) + (q, p, k): ResourcePrice(prices=1.0, volumes=None) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes - + # node_A = generate_pseudo_unique_key(mynet.nodes()) - node_A = 'thatnodea' - + node_A = "thatnodea" + mynet.add_source_sink_node( - node_key=node_A, - # base_flow=[0.5, 0.0, 1.0], - base_flow={ - (q,0):0.50, - (q,1):0.00, - (q,2):1.00} - ) - + node_key=node_A, + # base_flow=[0.5, 0.0, 1.0], + base_flow={(q, 0): 0.50, (q, 1): 0.00, (q, 2): 1.00}, + ) + # arc IA - + arc_tech_IA = Arcs( - name='any', - #efficiency=[0.5, 0.5, 0.5], - efficiency={ - (q,0): 0.5, - (q,1): 0.5, - (q,2): 0.5 - }, + name="any", + # efficiency=[0.5, 0.5, 0.5], + efficiency={(q, 0): 0.5, (q, 1): 0.5, (q, 2): 0.5}, efficiency_reverse=None, static_loss=None, capacity=[3], minimum_cost=[2], - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=False, - validate=False) - - mynet.add_directed_arc( - node_key_a=node_IMP, - node_key_b=node_A, - arcs=arc_tech_IA) - + validate=False, + ) + + mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA) + # identify node types - + mynet.identify_node_types() - + # no sos, regular time intervals - + ipp = self.build_solve_ipp( # solver=solver, solver_options={}, @@ -526,58 +469,63 @@ class TestESIPPProblem: # sense_use_real_variables_if_possible=sense_use_real_variables_if_possible, # sense_use_arc_interfaces=use_arc_interfaces, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, # irregular_time_intervals=irregular_time_intervals, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, - static_losses_mode=True, # just to reach a line, + static_losses_mode=True, # just to reach a line, mandatory_arcs=[], max_number_parallel_arcs={}, # init_aux_sets=init_aux_sets, - simplify_problem=True - ) - + simplify_problem=True, + ) + assert is_peak_total_problem(ipp) - assert ipp.results['Problem'][0]['Number of constraints'] == 20 - assert ipp.results['Problem'][0]['Number of variables'] == 19 - assert ipp.results['Problem'][0]['Number of nonzeros'] == 36 - + assert ipp.results["Problem"][0]["Number of constraints"] == 20 + assert ipp.results["Problem"][0]["Number of variables"] == 19 + assert ipp.results["Problem"][0]["Number of nonzeros"] == 36 + # ********************************************************************* # ********************************************************************* - + # validation - + # the arc should be installed since it is required for feasibility - assert True in ipp.networks['mynet'].edges[(node_IMP, node_A, 0)][ - Network.KEY_ARC_TECH].options_selected - + assert ( + True + in ipp.networks["mynet"] + .edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # capex should be four assert math.isclose(pyo.value(ipp.instance.var_capex), 4.0, abs_tol=1e-3) - + # the objective function should be -9.7 assert math.isclose(pyo.value(ipp.instance.obj_f), -9.7, abs_tol=1e-3) - + # TODO: create method to automate getting data from the command line import io import sys from contextlib import redirect_stdout + # print('wow wow wow') # ipp.instance.constr_imp_flow_cost.pprint() expected_string = """constr_imp_flow_cost : Size=4, Index=constr_imp_flow_cost_index, Active=True\n Key : Lower : Body : Upper : Active\n ('mynet', 'thatimpnode', 'peak', 0, 0) : 0.0 : 0*var_if_glqpks[mynet,thatimpnode,peak,0,0,0] - var_ifc_glqpk[mynet,thatimpnode,peak,0,0] : 0.0 : True\n ('mynet', 'thatimpnode', 'peak', 1, 0) : 0.0 : 0*var_if_glqpks[mynet,thatimpnode,peak,1,0,0] - var_ifc_glqpk[mynet,thatimpnode,peak,1,0] : 0.0 : True\n ('mynet', 'thatimpnode', 'total', 0, 0) : 0.0 : var_if_glqpks[mynet,thatimpnode,total,0,0,0] - var_ifc_glqpk[mynet,thatimpnode,total,0,0] : 0.0 : True\n ('mynet', 'thatimpnode', 'total', 1, 0) : 0.0 : var_if_glqpks[mynet,thatimpnode,total,1,0,0] - var_ifc_glqpk[mynet,thatimpnode,total,1,0] : 0.0 : True\n""" - + cmd_output = io.StringIO() sys.stdout = cmd_output ipp.instance.constr_imp_flow_cost.pprint() sys.stdout = sys.__stdout__ assert cmd_output.getvalue() == expected_string - + expected_string = """constr_exp_flow_revenue : Size=0, Index=constr_exp_flow_revenue_index, Active=True\n Key : Lower : Body : Upper : Active\n""" f = io.StringIO() with redirect_stdout(f): ipp.instance.constr_exp_flow_revenue.pprint() assert f.getvalue() == expected_string - + # try the whole model # print('wow wow wow') # ipp.instance.pprint() @@ -587,7 +535,7 @@ class TestESIPPProblem: # ipp.instance.pprint() # sys.stdout = sys.__stdout__ # assert cmd_output.getvalue() == expected_string - + # from contextlib import redirect_stdout # import io @@ -596,7 +544,7 @@ class TestESIPPProblem: # with redirect_stdout(f): # # ipp.instance.pprint() # full model # ipp.instance.constr_imp_flow_cost.pprint() # only one constraint - + # expected_string = r"""constr_imp_flow_cost : Size=4, Index=constr_imp_flow_cost_index, Active=True # Key : Lower : Body : Upper : Active # ('mynet', 'thatimpnode', 'peak', 0, 0) : 0.0 : 0*var_if_glqpks[mynet,thatimpnode,peak,0,0,0] - var_ifc_glqpk[mynet,thatimpnode,peak,0,0] : 0.0 : True @@ -605,8 +553,7 @@ class TestESIPPProblem: # ('mynet', 'thatimpnode', 'total', 1, 0) : 0.0 : var_if_glqpks[mynet,thatimpnode,total,1,0,0] - var_ifc_glqpk[mynet,thatimpnode,total,1,0] : 0.0 : True # """ # assert expected_string == f.getvalue() - - + # from contextlib import redirect_stdout # import io # f = io.StringIO() @@ -615,62 +562,53 @@ class TestESIPPProblem: # print(12) # 12+3 # print('Got stdout: "{0}"'.format(f.getvalue())) - + # ************************************************************************* # ************************************************************************* - + def test_problem_increasing_imp_prices(self): - # scenario q = 0 # time number_intervals = 1 # periods number_periods = 1 - + # 2 nodes: one import, one regular mynet = Network() - - # import node + + # import node node_IMP = generate_pseudo_unique_key(mynet.nodes()) mynet.add_import_node( - node_key=node_IMP, + node_key=node_IMP, prices={ - (q,p,k): ResourcePrice( - prices=[1.0, 2.0], - volumes=[0.5, None] - ) + (q, p, k): ResourcePrice(prices=[1.0, 2.0], volumes=[0.5, None]) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes node_A = generate_pseudo_unique_key(mynet.nodes()) - mynet.add_source_sink_node( - node_key=node_A, - base_flow={(q, 0): 1.0} - ) - + mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): 1.0}) + # arc IA arc_tech_IA = Arcs( - name='any', + name="any", efficiency={(q, 0): 0.5}, efficiency_reverse=None, static_loss=None, capacity=[3], minimum_cost=[2], - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=False, - validate=False) - mynet.add_directed_arc( - node_key_a=node_IMP, - node_key_b=node_A, - arcs=arc_tech_IA) - + validate=False, + ) + mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA) + # identify node types mynet.identify_node_types() - + # no sos, regular time intervals ipp = self.build_solve_ipp( # solver=solver, @@ -683,116 +621,107 @@ class TestESIPPProblem: # sense_use_real_variables_if_possible=sense_use_real_variables_if_possible, # sense_use_arc_interfaces=use_arc_interfaces, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, # irregular_time_intervals=irregular_time_intervals, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, - static_losses_mode=True, # just to reach a line, + static_losses_mode=True, # just to reach a line, mandatory_arcs=[], max_number_parallel_arcs={}, # init_aux_sets=init_aux_sets, simplify_problem=False, reporting_periods={0: (0,)}, - discount_rates={0: (0.0,)} - ) - + discount_rates={0: (0.0,)}, + ) + assert not is_peak_total_problem(ipp) - assert ipp.results['Problem'][0]['Number of constraints'] == 10 - assert ipp.results['Problem'][0]['Number of variables'] == 11 - assert ipp.results['Problem'][0]['Number of nonzeros'] == 20 - + assert ipp.results["Problem"][0]["Number of constraints"] == 10 + assert ipp.results["Problem"][0]["Number of variables"] == 11 + assert ipp.results["Problem"][0]["Number of nonzeros"] == 20 + # ********************************************************************* # ********************************************************************* - + # validation - + # the arc should be installed since it is required for feasibility - assert True in ipp.networks['mynet'].edges[(node_IMP, node_A, 0)][ - Network.KEY_ARC_TECH].options_selected - + assert ( + True + in ipp.networks["mynet"] + .edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # the flows should be 1.0, 0.0 and 2.0 assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[('mynet', node_IMP, node_A, 0, q, 0)] - ), + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]), 2.0, - abs_tol=1e-6) - + abs_tol=1e-6, + ) + # arc amplitude should be two assert math.isclose( - pyo.value( - ipp.instance.var_v_amp_gllj[('mynet', node_IMP, node_A, 0)] - ), + pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]), 2.0, - abs_tol=0.01) - + abs_tol=0.01, + ) + # capex should be four assert math.isclose(pyo.value(ipp.instance.var_capex), 4.0, abs_tol=1e-3) - + # sdncf should be -3.5 - assert math.isclose( - pyo.value(ipp.instance.var_sdncf_q[q]), -3.5, abs_tol=1e-3 - ) - + assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -3.5, abs_tol=1e-3) + # the objective function should be -7.5 assert math.isclose(pyo.value(ipp.instance.obj_f), -7.5, abs_tol=1e-3) - + # ************************************************************************* # ************************************************************************* - + def test_problem_decreasing_exp_prices(self): - # scenario q = 0 # time number_intervals = 1 # periods number_periods = 1 - + # 2 nodes: one export, one regular mynet = Network() - - # import node + + # import node node_EXP = generate_pseudo_unique_key(mynet.nodes()) mynet.add_export_node( - node_key=node_EXP, + node_key=node_EXP, prices={ - (q,p,k): ResourcePrice( - prices=[2.0, 1.0], - volumes=[0.5, None] - ) + (q, p, k): ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, None]) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes node_A = generate_pseudo_unique_key(mynet.nodes()) - mynet.add_source_sink_node( - node_key=node_A, - base_flow={(q, 0): -1.0} - ) - + mynet.add_source_sink_node(node_key=node_A, base_flow={(q, 0): -1.0}) + # arc IA arc_tech_IA = Arcs( - name='any', + name="any", efficiency={(q, 0): 0.5}, efficiency_reverse=None, static_loss=None, capacity=[3], minimum_cost=[2], - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=False, - validate=False) - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_EXP, - arcs=arc_tech_IA) - + validate=False, + ) + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_IA) + # identify node types mynet.identify_node_types() - + # no sos, regular time intervals ipp = self.build_solve_ipp( # solver=solver, @@ -805,146 +734,134 @@ class TestESIPPProblem: # sense_use_real_variables_if_possible=sense_use_real_variables_if_possible, # sense_use_arc_interfaces=use_arc_interfaces, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, # irregular_time_intervals=irregular_time_intervals, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, - static_losses_mode=True, # just to reach a line, + static_losses_mode=True, # just to reach a line, mandatory_arcs=[], max_number_parallel_arcs={}, # init_aux_sets=init_aux_sets, simplify_problem=False, reporting_periods={0: (0,)}, - discount_rates={0: (0.0,)} - ) - + discount_rates={0: (0.0,)}, + ) + assert not is_peak_total_problem(ipp) - assert ipp.results['Problem'][0]['Number of constraints'] == 10 - assert ipp.results['Problem'][0]['Number of variables'] == 11 - assert ipp.results['Problem'][0]['Number of nonzeros'] == 20 - + assert ipp.results["Problem"][0]["Number of constraints"] == 10 + assert ipp.results["Problem"][0]["Number of variables"] == 11 + assert ipp.results["Problem"][0]["Number of nonzeros"] == 20 + # ********************************************************************* # ********************************************************************* - + # validation - + # the arc should be installed since it is required for feasibility - assert True in ipp.networks['mynet'].edges[(node_A, node_EXP, 0)][ - Network.KEY_ARC_TECH].options_selected - + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_EXP, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # the flows should be 1.0, 0.0 and 2.0 assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[('mynet', node_A, node_EXP, 0, q, 0)] - ), + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 0)]), 1.0, - abs_tol=1e-6) - + abs_tol=1e-6, + ) + # arc amplitude should be two assert math.isclose( - pyo.value( - ipp.instance.var_v_amp_gllj[('mynet', node_A, node_EXP, 0)] - ), + pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_A, node_EXP, 0)]), 1.0, - abs_tol=0.01) - + abs_tol=0.01, + ) + # capex should be four assert math.isclose(pyo.value(ipp.instance.var_capex), 3.0, abs_tol=1e-3) - + # sdncf should be 1.0 - assert math.isclose( - pyo.value(ipp.instance.var_sdncf_q[q]), 1.0, abs_tol=1e-3 - ) - + assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), 1.0, abs_tol=1e-3) + # the objective function should be -7.5 assert math.isclose(pyo.value(ipp.instance.obj_f), -2.0, abs_tol=1e-3) - + # ************************************************************************* # ************************************************************************* - + def test_problem_increasing_imp_decreasing_exp_prices(self): - # scenario q = 0 # time number_intervals = 2 # periods number_periods = 1 - + # 3 nodes: one import, one export, one regular mynet = Network() - - # import node + + # import node node_IMP = generate_pseudo_unique_key(mynet.nodes()) mynet.add_import_node( - node_key=node_IMP, + node_key=node_IMP, prices={ - (q,p,k): ResourcePrice( - prices=[1.0, 2.0], - volumes=[0.5, None] - ) + (q, p, k): ResourcePrice(prices=[1.0, 2.0], volumes=[0.5, None]) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # export node node_EXP = generate_pseudo_unique_key(mynet.nodes()) mynet.add_export_node( - node_key=node_EXP, + node_key=node_EXP, prices={ - (q,p,k): ResourcePrice( - prices=[2.0, 1.0], - volumes=[0.5, None] - ) + (q, p, k): ResourcePrice(prices=[2.0, 1.0], volumes=[0.5, None]) for p in range(number_periods) for k in range(number_intervals) - } - ) - + }, + ) + # other nodes node_A = generate_pseudo_unique_key(mynet.nodes()) mynet.add_source_sink_node( - node_key=node_A, - base_flow={(q, 0): 1.0, (q, 1): -1.0} - ) - + node_key=node_A, base_flow={(q, 0): 1.0, (q, 1): -1.0} + ) + # arc IA arc_tech_IA = Arcs( - name='any', + name="any", efficiency={(q, 0): 0.5, (q, 1): 0.5}, efficiency_reverse=None, static_loss=None, capacity=[3], minimum_cost=[2], - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=False, - validate=False) - mynet.add_directed_arc( - node_key_a=node_IMP, - node_key_b=node_A, - arcs=arc_tech_IA) - + validate=False, + ) + mynet.add_directed_arc(node_key_a=node_IMP, node_key_b=node_A, arcs=arc_tech_IA) + # arc AE arc_tech_AE = Arcs( - name='any', + name="any", efficiency={(q, 0): 0.5, (q, 1): 0.5}, efficiency_reverse=None, static_loss=None, capacity=[3], minimum_cost=[2], - specific_capacity_cost=1, + specific_capacity_cost=1, capacity_is_instantaneous=False, - validate=False) - mynet.add_directed_arc( - node_key_a=node_A, - node_key_b=node_EXP, - arcs=arc_tech_AE) - + validate=False, + ) + mynet.add_directed_arc(node_key_a=node_A, node_key_b=node_EXP, arcs=arc_tech_AE) + # identify node types mynet.identify_node_types() - + # no sos, regular time intervals ipp = self.build_solve_ipp( # solver=solver, @@ -957,96 +874,93 @@ class TestESIPPProblem: # sense_use_real_variables_if_possible=sense_use_real_variables_if_possible, # sense_use_arc_interfaces=use_arc_interfaces, perform_analysis=False, - plot_results=False, # True, + plot_results=False, # True, print_solver_output=False, # irregular_time_intervals=irregular_time_intervals, - networks={'mynet': mynet}, + networks={"mynet": mynet}, number_intraperiod_time_intervals=number_intervals, - static_losses_mode=True, # just to reach a line, + static_losses_mode=True, # just to reach a line, mandatory_arcs=[], max_number_parallel_arcs={}, # init_aux_sets=init_aux_sets, simplify_problem=False, reporting_periods={0: (0,)}, - discount_rates={0: (0.0,)} - ) - + discount_rates={0: (0.0,)}, + ) + assert not is_peak_total_problem(ipp) - assert ipp.results['Problem'][0]['Number of constraints'] == 23 - assert ipp.results['Problem'][0]['Number of variables'] == 26 - assert ipp.results['Problem'][0]['Number of nonzeros'] == 57 - + assert ipp.results["Problem"][0]["Number of constraints"] == 23 + assert ipp.results["Problem"][0]["Number of variables"] == 26 + assert ipp.results["Problem"][0]["Number of nonzeros"] == 57 + # ********************************************************************* # ********************************************************************* - + # validation - + # the arc should be installed since it is required for feasibility - assert True in ipp.networks['mynet'].edges[(node_IMP, node_A, 0)][ - Network.KEY_ARC_TECH].options_selected + assert ( + True + in ipp.networks["mynet"] + .edges[(node_IMP, node_A, 0)][Network.KEY_ARC_TECH] + .options_selected + ) # the arc should be installed since it is required for feasibility - assert True in ipp.networks['mynet'].edges[(node_A, node_EXP, 0)][ - Network.KEY_ARC_TECH].options_selected - + assert ( + True + in ipp.networks["mynet"] + .edges[(node_A, node_EXP, 0)][Network.KEY_ARC_TECH] + .options_selected + ) + # interval 0: import only assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[('mynet', node_IMP, node_A, 0, q, 0)] - ), + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 0)]), 2.0, - abs_tol=1e-6 - ) + abs_tol=1e-6, + ) assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[('mynet', node_A, node_EXP, 0, q, 0)] - ), + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 0)]), 0.0, - abs_tol=1e-6 - ) + abs_tol=1e-6, + ) # interval 1: export only assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[('mynet', node_IMP, node_A, 0, q, 1)] - ), + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_IMP, node_A, 0, q, 1)]), 0.0, - abs_tol=1e-6 - ) + abs_tol=1e-6, + ) assert math.isclose( - pyo.value( - ipp.instance.var_v_glljqk[('mynet', node_A, node_EXP, 0, q, 1)] - ), + pyo.value(ipp.instance.var_v_glljqk[("mynet", node_A, node_EXP, 0, q, 1)]), 1.0, - abs_tol=1e-6 - ) - + abs_tol=1e-6, + ) + # IA amplitude assert math.isclose( - pyo.value( - ipp.instance.var_v_amp_gllj[('mynet', node_IMP, node_A, 0)] - ), + pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_IMP, node_A, 0)]), 2.0, - abs_tol=0.01) + abs_tol=0.01, + ) # AE amplitude assert math.isclose( - pyo.value( - ipp.instance.var_v_amp_gllj[('mynet', node_A, node_EXP, 0)] - ), + pyo.value(ipp.instance.var_v_amp_gllj[("mynet", node_A, node_EXP, 0)]), 1.0, - abs_tol=0.01) - + abs_tol=0.01, + ) + # capex should be 7.0: 4+3 assert math.isclose(pyo.value(ipp.instance.var_capex), 7.0, abs_tol=1e-3) - + # sdncf should be -2.5: -3.5+1.0 - assert math.isclose( - pyo.value(ipp.instance.var_sdncf_q[q]), -2.5, abs_tol=1e-3 - ) - + assert math.isclose(pyo.value(ipp.instance.var_sdncf_q[q]), -2.5, abs_tol=1e-3) + # the objective function should be -9.5: -7.5-2.5 assert math.isclose(pyo.value(ipp.instance.obj_f), -9.5, abs_tol=1e-3) - + # ************************************************************************* # ************************************************************************* - + + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/tests/test_esipp_resource.py b/tests/test_esipp_resource.py index 8685e8b..21cd2ba 100644 --- a/tests/test_esipp_resource.py +++ b/tests/test_esipp_resource.py @@ -6,25 +6,24 @@ from src.topupopt.problems.esipp.resource import are_prices_time_invariant # ***************************************************************************** # ***************************************************************************** + class TestResourcePrice: - # ************************************************************************* # ************************************************************************* - + def test_resources_time_invariant(self): - # single entry - + resource_prices = { (0, 0, 0): ResourcePrice(prices=1, volumes=None), - } - + } + assert are_prices_time_invariant(resource_prices) - + # ********************************************************************* - + # single assessment, two periods, same prices for both periods - + resource_prices = { (0, 0, 0): ResourcePrice(prices=1, volumes=None), (0, 0, 1): ResourcePrice(prices=1, volumes=None), @@ -32,14 +31,14 @@ class TestResourcePrice: (0, 1, 0): ResourcePrice(prices=1, volumes=None), (0, 1, 1): ResourcePrice(prices=1, volumes=None), (0, 1, 2): ResourcePrice(prices=1, volumes=None), - } - + } + assert are_prices_time_invariant(resource_prices) - + # ********************************************************************* - + # single assessment, two periods, same prices per period - + resource_prices = { (0, 0, 0): ResourcePrice(prices=1, volumes=None), (0, 0, 1): ResourcePrice(prices=1, volumes=None), @@ -47,14 +46,14 @@ class TestResourcePrice: (0, 1, 0): ResourcePrice(prices=2, volumes=None), (0, 1, 1): ResourcePrice(prices=2, volumes=None), (0, 1, 2): ResourcePrice(prices=2, volumes=None), - } - + } + assert are_prices_time_invariant(resource_prices) - + # ********************************************************************* - + # single assessment, two periods, different prices in a given period - + resource_prices = { (0, 0, 0): ResourcePrice(prices=1, volumes=None), (0, 0, 1): ResourcePrice(prices=1, volumes=None), @@ -62,342 +61,338 @@ class TestResourcePrice: (0, 1, 0): ResourcePrice(prices=2, volumes=None), (0, 1, 1): ResourcePrice(prices=2.5, volumes=None), (0, 1, 2): ResourcePrice(prices=2, volumes=None), - } - + } + assert not are_prices_time_invariant(resource_prices) - + # ********************************************************************* - + # ************************************************************************* # ************************************************************************* def test_resource_prices_reals(self): - # 1) single segment, no volume limit, real input - + prices = 3 - + volumes = None - + res_p = ResourcePrice(prices=prices, volumes=volumes) - - assert res_p.number_segments(redo=False) == 1 - - assert res_p.number_segments(redo=True) == 1 - + + assert res_p.number_segments(redo=False) == 1 + + assert res_p.number_segments(redo=True) == 1 + assert not res_p.is_volume_capped() - + assert res_p.price_monotonically_increasing_with_volume() - + assert res_p.price_monotonically_decreasing_with_volume() - + assert res_p.is_volume_invariant() - + # 2) single segment, volume limit, real input - + prices = 3 - + volumes = 1.5 - + res_p = ResourcePrice(prices=prices, volumes=volumes) - - assert res_p.number_segments(redo=False) == 1 - - assert res_p.number_segments(redo=True) == 1 - + + assert res_p.number_segments(redo=False) == 1 + + assert res_p.number_segments(redo=True) == 1 + assert res_p.is_volume_capped() - + assert res_p.price_monotonically_increasing_with_volume() - + assert res_p.price_monotonically_decreasing_with_volume() - + assert res_p.is_volume_invariant() - + # ************************************************************************* # ************************************************************************* def test_equivalence_single(self): - # ********************************************************************* # ********************************************************************* - + # single segment - + # ********************************************************************* # ********************************************************************* - + # no volume limit - + # single segment, no volume limit, different formats # prices and volumes match = True - + prices = 3 volumes = None res_p1 = ResourcePrice(prices=prices, volumes=volumes) res_p2 = ResourcePrice(prices=[prices], volumes=[volumes]) assert res_p1.is_equivalent(res_p2) assert res_p2.is_equivalent(res_p1) - + # ********************************************************************* - + # single segment, no volume limit, different formats # prices do not match = False - + prices = 3 volumes = None res_p1 = ResourcePrice(prices=prices, volumes=volumes) - res_p2 = ResourcePrice(prices=[prices+1], volumes=[volumes]) + res_p2 = ResourcePrice(prices=[prices + 1], volumes=[volumes]) assert not res_p1.is_equivalent(res_p2) assert not res_p2.is_equivalent(res_p1) - + # ********************************************************************* - + # single segment, no volume limit, same format # prices and volumes match = True - + prices = 3 volumes = None res_p1 = ResourcePrice(prices=prices, volumes=volumes) res_p2 = ResourcePrice(prices=prices, volumes=volumes) assert res_p1.is_equivalent(res_p2) assert res_p2.is_equivalent(res_p1) - + # ********************************************************************* - + # single segment, no volume limit, same format # prices do not match = False - + prices = 3 volumes = None res_p1 = ResourcePrice(prices=prices, volumes=volumes) - res_p2 = ResourcePrice(prices=prices+1, volumes=volumes) + res_p2 = ResourcePrice(prices=prices + 1, volumes=volumes) assert not res_p1.is_equivalent(res_p2) assert not res_p2.is_equivalent(res_p1) - + # ********************************************************************* # ********************************************************************* - + # with volume limits - + # single segment, volume limit, different formats # prices and volumes match = True - + prices = 3 volumes = 1 res_p1 = ResourcePrice(prices=prices, volumes=volumes) res_p2 = ResourcePrice(prices=[prices], volumes=[volumes]) assert res_p1.is_equivalent(res_p2) assert res_p2.is_equivalent(res_p1) - + # ********************************************************************* - + # single segment, volume limit, different formats: False # prices do not match = False - + prices = 3 volumes = 1 res_p1 = ResourcePrice(prices=prices, volumes=volumes) - res_p2 = ResourcePrice(prices=[prices+1], volumes=[volumes]) + res_p2 = ResourcePrice(prices=[prices + 1], volumes=[volumes]) assert not res_p1.is_equivalent(res_p2) assert not res_p2.is_equivalent(res_p1) - + # ********************************************************************* - + # single segment, volume limit, same format # prices and volumes match = True - + prices = 3 volumes = 1 res_p1 = ResourcePrice(prices=prices, volumes=volumes) res_p2 = ResourcePrice(prices=prices, volumes=volumes) assert res_p1.is_equivalent(res_p2) assert res_p2.is_equivalent(res_p1) - + # ********************************************************************* - + # single segment, volume limit, same format: False # prices do not match = False - + prices = 3 volumes = 1 res_p1 = ResourcePrice(prices=prices, volumes=volumes) - res_p2 = ResourcePrice(prices=prices+1, volumes=volumes) + res_p2 = ResourcePrice(prices=prices + 1, volumes=volumes) assert not res_p1.is_equivalent(res_p2) assert not res_p2.is_equivalent(res_p1) - + # ********************************************************************* - + # single segment, volume limit, different formats # volumes do not match = False - + prices = 3 volumes = 1 res_p1 = ResourcePrice(prices=prices, volumes=volumes) - res_p2 = ResourcePrice(prices=[prices], volumes=[volumes+1]) + res_p2 = ResourcePrice(prices=[prices], volumes=[volumes + 1]) assert not res_p1.is_equivalent(res_p2) assert not res_p2.is_equivalent(res_p1) - + # ********************************************************************* - + # single segment, volume limit, same format # volumes do not match = False - + prices = 3 volumes = 1 res_p1 = ResourcePrice(prices=prices, volumes=volumes) - res_p2 = ResourcePrice(prices=prices, volumes=volumes+1) + res_p2 = ResourcePrice(prices=prices, volumes=volumes + 1) assert not res_p1.is_equivalent(res_p2) assert not res_p2.is_equivalent(res_p1) - + # ********************************************************************* - + # single segment, volume limit, different formats # volumes do not match = False - + prices = 3 volumes = 1 res_p1 = ResourcePrice(prices=prices, volumes=volumes) res_p2 = ResourcePrice(prices=[prices], volumes=[None]) assert not res_p1.is_equivalent(res_p2) assert not res_p2.is_equivalent(res_p1) - + # ********************************************************************* - + # single segment, volume limit, same format # volumes do not match = False - + prices = 3 volumes = 1 res_p1 = ResourcePrice(prices=prices, volumes=volumes) res_p2 = ResourcePrice(prices=prices, volumes=None) assert not res_p1.is_equivalent(res_p2) assert not res_p2.is_equivalent(res_p1) - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* def test_equivalence_multiple_segments(self): - # ********************************************************************* # ********************************************************************* - + # multiple segments - + # ********************************************************************* # ********************************************************************* - + # no volume limit - + # two segments, no volume limit, same format # prices and volumes match = True - - prices = [1,3] - volumes = [1,None] + + prices = [1, 3] + volumes = [1, None] res_p1 = ResourcePrice(prices=prices, volumes=volumes) res_p2 = ResourcePrice(prices=prices, volumes=volumes) assert res_p1.is_equivalent(res_p2) assert res_p2.is_equivalent(res_p1) - + # two segments, no volume limit, same format # prices do not match = False - - prices = [1,3] - volumes = [1,None] + + prices = [1, 3] + volumes = [1, None] res_p1 = ResourcePrice(prices=prices, volumes=volumes) - prices = [2,3] - volumes = [1,None] + prices = [2, 3] + volumes = [1, None] res_p2 = ResourcePrice(prices=prices, volumes=volumes) assert not res_p1.is_equivalent(res_p2) assert not res_p2.is_equivalent(res_p1) - + # ********************************************************************* - + # with volume limits - + # two segments segment, volume limit, same format # prices and volumes match = True - - prices = [1,3] - volumes = [1,3] + + prices = [1, 3] + volumes = [1, 3] res_p1 = ResourcePrice(prices=prices, volumes=volumes) res_p2 = ResourcePrice(prices=prices, volumes=volumes) assert res_p1.is_equivalent(res_p2) assert res_p2.is_equivalent(res_p1) - + # two segments, volume limit, same format: False # prices do not match = False - - prices = [1,3] - volumes = [1,4] + + prices = [1, 3] + volumes = [1, 4] res_p1 = ResourcePrice(prices=prices, volumes=volumes) - prices = [1,4] - volumes = [1,4] + prices = [1, 4] + volumes = [1, 4] res_p2 = ResourcePrice(prices=prices, volumes=volumes) assert not res_p1.is_equivalent(res_p2) assert not res_p2.is_equivalent(res_p1) - + # ********************************************************************* - + # volumes do not match - + # single segment, volume limit, same format # volumes do not match = False - - prices = [1,3] - volumes = [1,4] + + prices = [1, 3] + volumes = [1, 4] res_p1 = ResourcePrice(prices=prices, volumes=volumes) - prices = [1,3] - volumes = [1,5] + prices = [1, 3] + volumes = [1, 5] res_p2 = ResourcePrice(prices=prices, volumes=volumes) assert not res_p1.is_equivalent(res_p2) assert not res_p2.is_equivalent(res_p1) - + # single segment, volume limit, same format # volumes do not match = False - - prices = [1,3] - volumes = [1,4] + + prices = [1, 3] + volumes = [1, 4] res_p1 = ResourcePrice(prices=prices, volumes=volumes) - prices = [1,3] - volumes = [1,None] + prices = [1, 3] + volumes = [1, None] res_p2 = ResourcePrice(prices=prices, volumes=volumes) assert not res_p1.is_equivalent(res_p2) assert not res_p2.is_equivalent(res_p1) - + + # ********************************************************************* # ********************************************************************* - # ********************************************************************* - + # different number of segments - - prices = [1,3] - volumes = [1,4] + + prices = [1, 3] + volumes = [1, 4] res_p1 = ResourcePrice(prices=prices, volumes=volumes) - prices = [1,3,5] - volumes = [1,4,None] + prices = [1, 3, 5] + volumes = [1, 4, None] res_p2 = ResourcePrice(prices=prices, volumes=volumes) assert not res_p1.is_equivalent(res_p2) assert not res_p2.is_equivalent(res_p1) - + + # ********************************************************************* # ********************************************************************* - # ********************************************************************* - + # ************************************************************************* # ************************************************************************* def test_resource_prices_lists(self): - # ********************************************************************* - + # aspects that were tested: # i) number of segments (1, multiple or none) # ii) price variations (increasing, decreasing and stable) # iii) volume limits - + # ********************************************************************* - + # 1) multiple segments, prices increase, volume limits # 2) multiple segments, prices decrease, volume limits # 3) multiple segments, prices are stable, volume limits @@ -406,356 +401,345 @@ class TestResourcePrice: # 6) multiple segments, prices are stable, no volume limit # 7) one segment, prices are stable, volume limits # 8) one segment, prices are stable, no volume limit - + # ********************************************************************* - + # 1) multiple segments, prices increase, volume limits - - prices = [1,2,3] - - volumes = [2,1,3] - + + prices = [1, 2, 3] + + volumes = [2, 1, 3] + res_p = ResourcePrice(prices=prices, volumes=volumes) - + assert res_p.number_segments(redo=True) == 3 - + assert res_p.number_segments(redo=False) == 3 - + assert res_p.is_volume_capped() - + assert res_p.price_monotonically_increasing_with_volume() - + assert not res_p.price_monotonically_decreasing_with_volume() - + assert not res_p.is_volume_invariant() - + # ********************************************************************* - + # 2) multiple segments, prices decrease, volume limits - - prices = [3,2,1] - - volumes = [2,1,3] - + + prices = [3, 2, 1] + + volumes = [2, 1, 3] + res_p = ResourcePrice(prices=prices, volumes=volumes) - + assert res_p.number_segments(redo=False) == 3 - + assert res_p.is_volume_capped() - + assert not res_p.price_monotonically_increasing_with_volume() - + assert res_p.price_monotonically_decreasing_with_volume() - + assert not res_p.is_volume_invariant() - + # ********************************************************************* - + # 3) multiple segments, prices are stable, volume limits - - prices = [2,2,2] - - volumes = [2,1,3] - + + prices = [2, 2, 2] + + volumes = [2, 1, 3] + res_p = ResourcePrice(prices=prices, volumes=volumes) - + assert res_p.number_segments(redo=True) == 3 - + assert res_p.number_segments(redo=False) == 3 - + assert res_p.is_volume_capped() - + assert res_p.price_monotonically_increasing_with_volume() - + assert res_p.price_monotonically_decreasing_with_volume() - + assert res_p.is_volume_invariant() - + # ********************************************************************* - + # 4) multiple segments, prices increase, no volume limit - - prices = [1,2,3] - - volumes = [2,1,None] - + + prices = [1, 2, 3] + + volumes = [2, 1, None] + res_p = ResourcePrice(prices=prices, volumes=volumes) - + assert res_p.number_segments(redo=True) == 3 - + assert res_p.number_segments(redo=False) == 3 - + assert not res_p.is_volume_capped() - + assert res_p.price_monotonically_increasing_with_volume() - + assert not res_p.price_monotonically_decreasing_with_volume() - + assert not res_p.is_volume_invariant() - + # ********************************************************************* - + # 5) multiple segments, prices decrease, no volume limit - - prices = [2,2,2] - - volumes = [2,1,None] - + + prices = [2, 2, 2] + + volumes = [2, 1, None] + res_p = ResourcePrice(prices=prices, volumes=volumes) - + assert res_p.number_segments(redo=True) == 3 - + assert res_p.number_segments(redo=False) == 3 - + assert not res_p.is_volume_capped() - + assert res_p.price_monotonically_increasing_with_volume() - + assert res_p.price_monotonically_decreasing_with_volume() - + assert res_p.is_volume_invariant() - + # ********************************************************************* - + # 6) multiple segments, prices are stable, no volume limit - - prices = [2,2,2] - - volumes = [2,1,None] - + + prices = [2, 2, 2] + + volumes = [2, 1, None] + res_p = ResourcePrice(prices=prices, volumes=volumes) - + assert res_p.number_segments(redo=True) == 3 - + assert res_p.number_segments(redo=False) == 3 - + assert not res_p.is_volume_capped() - + assert res_p.price_monotonically_increasing_with_volume() - + assert res_p.price_monotonically_decreasing_with_volume() - + assert res_p.is_volume_invariant() - + # ********************************************************************* - + # 7) one segment, prices are stable, volume limits - + prices = [2] - + volumes = [2] - + res_p = ResourcePrice(prices=prices, volumes=volumes) - + assert res_p.number_segments(redo=True) == 1 - + assert res_p.number_segments(redo=False) == 1 - + assert res_p.is_volume_capped() - + assert res_p.price_monotonically_increasing_with_volume() - + assert res_p.price_monotonically_decreasing_with_volume() - + assert res_p.is_volume_invariant() - + # ********************************************************************* - + # 8) one segment, prices are stable, no volume limit - + prices = [3] - + volumes = [None] - + res_p = ResourcePrice(prices=prices, volumes=volumes) - + assert res_p.number_segments(redo=True) == 1 - + assert res_p.number_segments(redo=False) == 1 - + assert not res_p.is_volume_capped() - + assert res_p.price_monotonically_increasing_with_volume() - + assert res_p.price_monotonically_decreasing_with_volume() - + assert res_p.is_volume_invariant() - + res_p = ResourcePrice(prices=prices[0], volumes=volumes[0]) - + assert res_p.number_segments(redo=True) == 1 - + assert res_p.number_segments(redo=False) == 1 - + assert not res_p.is_volume_capped() - + assert res_p.price_monotonically_increasing_with_volume() - + assert res_p.price_monotonically_decreasing_with_volume() - + assert res_p.is_volume_invariant() - + # ********************************************************************* - + # errors - + # ********************************************************************* - + # create object without prices - + error_triggered = False - + try: - _ = ResourcePrice(prices=None, - volumes=volumes) + _ = ResourcePrice(prices=None, volumes=volumes) except TypeError: error_triggered = True assert error_triggered - + # ********************************************************************* - + # create object with negative prices in lists - + error_triggered = False - + try: - _ = ResourcePrice(prices=[7,-3,2], - volumes=[3,4,5]) + _ = ResourcePrice(prices=[7, -3, 2], volumes=[3, 4, 5]) except ValueError: error_triggered = True assert error_triggered - + # ********************************************************************* - + # create object where an intermediate segment has no volume limit - + error_triggered = False - + try: - _ = ResourcePrice(prices=[7,4,2], - volumes=[3,None,5]) + _ = ResourcePrice(prices=[7, 4, 2], volumes=[3, None, 5]) except ValueError: error_triggered = True assert error_triggered - + # ********************************************************************* - + # create object with negative volumes in lists - + error_triggered = False - + try: - _ = ResourcePrice(prices=[7,3,2], - volumes=[4,-1,2]) + _ = ResourcePrice(prices=[7, 3, 2], volumes=[4, -1, 2]) except ValueError: error_triggered = True assert error_triggered - + # ********************************************************************* - + # create object with non-numeric prices in lists - + error_triggered = False - + try: - _ = ResourcePrice(prices=[7,'4',2], - volumes=[3,4,5]) + _ = ResourcePrice(prices=[7, "4", 2], volumes=[3, 4, 5]) except TypeError: error_triggered = True assert error_triggered - + # ********************************************************************* - + # create object with non-numeric volumes in lists - + error_triggered = False - + try: - _ = ResourcePrice(prices=[7,3,2], - volumes=[4,'3',2]) + _ = ResourcePrice(prices=[7, 3, 2], volumes=[4, "3", 2]) except TypeError: error_triggered = True assert error_triggered - + # ********************************************************************* - + # create object with mismatched price and volume lists - + error_triggered = False - + try: - _ = ResourcePrice(prices=[7,3,2], - volumes=[5,7]) + _ = ResourcePrice(prices=[7, 3, 2], volumes=[5, 7]) except ValueError: error_triggered = True assert error_triggered - + # ********************************************************************* - + # create object with a price list as an input and an unsupported type - + error_triggered = False - + try: - _ = ResourcePrice(prices=[7,3,2], - volumes='hello') + _ = ResourcePrice(prices=[7, 3, 2], volumes="hello") except TypeError: error_triggered = True assert error_triggered - + # ********************************************************************* - + # create object with negative prices in lists (no volumes are provided) - + error_triggered = False - + try: - _ = ResourcePrice(prices=[7,3,-2], - volumes=None) + _ = ResourcePrice(prices=[7, 3, -2], volumes=None) except TypeError: error_triggered = True assert error_triggered - + # ********************************************************************* - + # create object with non-numeric prices in lists (no volumes are provided) - + error_triggered = False - + try: - _ = ResourcePrice(prices=[7,3,'a'], - volumes=None) + _ = ResourcePrice(prices=[7, 3, "a"], volumes=None) except TypeError: error_triggered = True assert error_triggered - + # ********************************************************************* - + # create object with non-numeric prices in lists (no volumes are provided) - + error_triggered = False - + try: - _ = ResourcePrice(prices=5, - volumes=[7,3,4]) + _ = ResourcePrice(prices=5, volumes=[7, 3, 4]) except TypeError: error_triggered = True assert error_triggered - + # ********************************************************************* - + # create object with negative prices - + error_triggered = False - + try: - _ = ResourcePrice(prices=-3, - volumes=None) + _ = ResourcePrice(prices=-3, volumes=None) except ValueError: error_triggered = True assert error_triggered - + # ********************************************************************* - + + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/tests/test_esipp_utils.py b/tests/test_esipp_utils.py index ad1a749..2c27b98 100644 --- a/tests/test_esipp_utils.py +++ b/tests/test_esipp_utils.py @@ -8,13 +8,12 @@ import src.topupopt.problems.esipp.utils as utils -#****************************************************************************** -#****************************************************************************** +# ****************************************************************************** +# ****************************************************************************** + class TestProblemUtils: - def test_integrality(self): - # case 1 assert utils.is_integer(0, integrality_tolerance=0.1) # case 2 @@ -31,12 +30,12 @@ class TestProblemUtils: assert utils.is_integer(0.1, integrality_tolerance=0.1) # case 8 assert utils.is_integer(0.9, integrality_tolerance=0.1) - + # case 9 assert utils.is_integer(0.49, integrality_tolerance=0.49) # case 10 assert utils.is_integer(0.51, integrality_tolerance=0.49) - + # integrality tolerance is too large error_raised = False try: @@ -50,9 +49,10 @@ class TestProblemUtils: except ValueError: error_raised = True assert error_raised - - #************************************************************************** - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** \ No newline at end of file + + # ************************************************************************** + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** diff --git a/tests/test_gis_calculate.py b/tests/test_gis_calculate.py index c44af7f..5c2ee63 100644 --- a/tests/test_gis_calculate.py +++ b/tests/test_gis_calculate.py @@ -19,364 +19,340 @@ import src.topupopt.data.gis.osm as osm # ***************************************************************************** # ***************************************************************************** + class TestGisCalculate: - # ************************************************************************* # ************************************************************************* - + def validate_edge_distances(self, G: nx.MultiDiGraph, abs_tol: float = 5): - # get the true edge lengths true_lengths = { - edge_key: (G.edges[edge_key][osm.KEY_OSMNX_LENGTH] - if osm.KEY_OSMNX_LENGTH in G.edges[edge_key] else None) + edge_key: ( + G.edges[edge_key][osm.KEY_OSMNX_LENGTH] + if osm.KEY_OSMNX_LENGTH in G.edges[edge_key] + else None + ) for edge_key in G.edges(keys=True) - } - + } + # get the edge lengths calculated independently calculated_lengths = gis_calc.edge_lengths(G) - + # for each edge on the graph - for edge_key in true_lengths.keys(): + for edge_key in true_lengths.keys(): # validate assert isclose( - calculated_lengths[edge_key], - true_lengths[edge_key], - abs_tol=abs_tol - ) - - + calculated_lengths[edge_key], true_lengths[edge_key], abs_tol=abs_tol + ) + # ************************************************************************* # ************************************************************************* - + def test_distances(self): - # get a graph - + G = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', + (55.71654, 9.11728), + network_type="drive", custom_filter='["highway"~"residential|tertiary|unclassified|service"]', - truncate_by_edge=True - ) - + truncate_by_edge=True, + ) + # count occurrences - + self.example_count_occurrences(G) - + # validate without projected coordinates - + self.validate_edge_distances(G=G) - + # project the graph - + projected_G = ox.project_graph(G=G) - + # validate with projected coordinates - + self.validate_edge_distances(G=projected_G) - + # calculate node path lenths - + self.example_node_path_lengths() - + # calculate edge path lengths - + self.example_edge_path_lengths() - + # ************************************************************************* # ************************************************************************* - + def example_node_path_lengths(self): - # identify path between two nodes - + G = nx.MultiDiGraph() - + G.add_edge(0, 1, length=10) - + G.add_edge(1, 2, length=10) - + G.add_edge(1, 2, length=5) - + G.add_edge(2, 3, length=5) - + # calculate length - - path = [0,1,2,3] - + + path = [0, 1, 2, 3] + this_length = gis_calc.node_path_length(network=G, path=path) - + assert this_length == 20 - + # calculate all possible lengths - + these_lengths = gis_calc.node_path_length( - network=G, - path=path, - return_minimum_length_only=False - ) - - true_lengths = [25,20] - + network=G, path=path, return_minimum_length_only=False + ) + + true_lengths = [25, 20] + assert len(these_lengths) == len(true_lengths) - + for _length in these_lengths: - assert _length in true_lengths - + # ********************************************************************* # ********************************************************************* - + # no path - - path = [0,3] - + + path = [0, 3] + this_length = gis_calc.node_path_length(network=G, path=path) - + assert this_length == inf - + these_lengths = gis_calc.node_path_length( - network=G, - path=path, - return_minimum_length_only=False - ) - + network=G, path=path, return_minimum_length_only=False + ) + assert these_lengths[0] == inf - + # empty path - + path = [] - + this_length = gis_calc.node_path_length(network=G, path=path) - + assert this_length == inf - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def example_edge_path_lengths(self): - # identify path between two nodes - + G = nx.MultiDiGraph() - + G.add_edge(0, 1, length=10) - + G.add_edge(1, 2, length=10) - + G.add_edge(1, 2, length=5) - + G.add_edge(2, 3, length=5) - + # calculate length - - path = [(0,1,0),(1,2,0),(2,3,0)] - + + path = [(0, 1, 0), (1, 2, 0), (2, 3, 0)] + this_length = gis_calc.edge_path_length(network=G, path=path) - + assert this_length == 25 - + # alternative path - - path = [(0,1,0),(1,2,1),(2,3,0)] - + + path = [(0, 1, 0), (1, 2, 1), (2, 3, 0)] + this_length = gis_calc.edge_path_length(network=G, path=path) - + assert this_length == 20 - + # ********************************************************************* # ********************************************************************* - + # no path - - path = [(0,1,0),(1,3,0)] - + + path = [(0, 1, 0), (1, 3, 0)] + this_length = gis_calc.edge_path_length(network=G, path=path) - + assert this_length == inf - + path = [] - + this_length = gis_calc.edge_path_length(network=G, path=path) - + assert this_length == inf - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def example_count_occurrences(self, G: nx.MultiDiGraph): - # get a gdf from the graph via osmnx - + gdf = ox.utils_graph.graph_to_gdfs(G, nodes=False) - + # define the column (street) - - column = 'name' - - # count the ocurrences - + + column = "name" + + # count the ocurrences + count_dict = gis_calc.count_ocurrences(gdf, column=column) - + # expected result - + true_count_dict = { - nan: 249, - 'Havremarken': 8, - 'Kornmarken': 34, - 'Kløvervej': 38, - 'Kærvej': 52, - 'Plougslundvej': 24, - 'Egevænget': 16, - 'Fyrrevænget': 6, - 'Kærhusvej': 12, - 'Kløvermarken': 52, - 'Grønningen': 38, - 'Bygager': 10, - 'Fælleden': 6, - 'Flintemarken': 52, - 'Stendyssen': 8, - 'Markskellet': 54, - 'Engdraget': 20, - 'Vestervang': 36, - 'Tingstedet': 87, - 'Tuen': 10, - 'Lillevang': 96, - 'Grenevej': 24, - 'Hedegårdsvej': 16, - 'Gravhøjen': 28, - 'Lysningen': 37, - 'Ved Søen': 20, - 'Bopladsen': 10, - 'Koldingvej': 14, - 'Bakkelien': 38 - } - + nan: 249, + "Havremarken": 8, + "Kornmarken": 34, + "Kløvervej": 38, + "Kærvej": 52, + "Plougslundvej": 24, + "Egevænget": 16, + "Fyrrevænget": 6, + "Kærhusvej": 12, + "Kløvermarken": 52, + "Grønningen": 38, + "Bygager": 10, + "Fælleden": 6, + "Flintemarken": 52, + "Stendyssen": 8, + "Markskellet": 54, + "Engdraget": 20, + "Vestervang": 36, + "Tingstedet": 87, + "Tuen": 10, + "Lillevang": 96, + "Grenevej": 24, + "Hedegårdsvej": 16, + "Gravhøjen": 28, + "Lysningen": 37, + "Ved Søen": 20, + "Bopladsen": 10, + "Koldingvej": 14, + "Bakkelien": 38, + } + # test - + for key, value in count_dict.items(): - assert value == true_count_dict[key] - + # ********************************************************************* # ********************************************************************* - + # count only a few column entries - + count_dict = gis_calc.count_ocurrences( - gdf, - column=column, - column_entries=['Kløvermarken', 'Grenevej', 'Bopladsen'] - ) - + gdf, column=column, column_entries=["Kløvermarken", "Grenevej", "Bopladsen"] + ) + # test - + for key, value in count_dict.items(): - assert value == true_count_dict[key] - + # ********************************************************************* # ********************************************************************* - + # count the nans - - count_dict = gis_calc.count_ocurrences( - gdf, - column=column, - column_entries=[nan] - ) - + + count_dict = gis_calc.count_ocurrences(gdf, column=column, column_entries=[nan]) + # test - + for key, value in count_dict.items(): - assert value == true_count_dict[key] - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_compute_great_circle_distance_linestring(self): - # Source: https://keisan.casio.com/exec/system/1224587128 - - length_tolerance = 1 # meters - + + length_tolerance = 1 # meters + # dict_A = {'x': 12.2430512, 'y': 55.6822924} - + # dict_B = {'x': 12.242863, 'y': 55.682155} - + # dict_C = {'x': 12.252233, 'y': 55.68085} - + list_1_points = [(12, 56)] - + # 1º longitude at a latitude of 56º - - list_2_points_a = [(12, 56), - (13, 56)] - + + list_2_points_a = [(12, 56), (13, 56)] + # 1º latituide at a longitude of 13º - - list_2_points_b = [(13, 56), - (13, 57)] - + + list_2_points_b = [(13, 56), (13, 57)] + # 1º longitude at a latitude of 56º + 1º latituide at a longitude of 13º - - list_3_points = [(12, 56), - (13, 56), - (13, 57)] - + + list_3_points = [(12, 56), (13, 56), (13, 57)] + # radius = 6363.478 km at sea level # edge = radius*(pi/180)*angle in degrees - - true_length_2_points_a = 62.178959*1e3 # 62.178959 km with r=6371.009 km - - true_length_2_points_b = 111.195084*1e3 # 111.195084 km with r=6371.009 km - - true_length_3_points = true_length_2_points_a+true_length_2_points_b - + + true_length_2_points_a = 62.178959 * 1e3 # 62.178959 km with r=6371.009 km + + true_length_2_points_b = 111.195084 * 1e3 # 111.195084 km with r=6371.009 km + + true_length_3_points = true_length_2_points_a + true_length_2_points_b + # make sure the function fails with a single point (sanity check) error_triggered = False try: - line = LineString(list_1_points) + line = LineString(list_1_points) except Exception: error_triggered = True assert error_triggered - + # make sure it works with 2 points - + line = LineString(list_2_points_a) - + _length = gis_calc.great_circle_distance_along_path(line) - + assert isclose(_length, true_length_2_points_a, abs_tol=length_tolerance) - + line = LineString(list_2_points_b) - + _length = gis_calc.great_circle_distance_along_path(line) - + assert isclose(_length, true_length_2_points_b, abs_tol=length_tolerance) - + # make sure it works with 3 points - + line = LineString(list_3_points) - + _length = gis_calc.great_circle_distance_along_path(line) - + assert isclose(_length, true_length_3_points, abs_tol=length_tolerance) - + # ************************************************************************* # ************************************************************************* - + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/tests/test_gis_identify.py b/tests/test_gis_identify.py index 04862ba..cb960a9 100644 --- a/tests/test_gis_identify.py +++ b/tests/test_gis_identify.py @@ -19,18 +19,19 @@ import src.topupopt.data.gis.osm as gis_osm # ***************************************************************************** # ***************************************************************************** + class TestGisIdentify: - # ************************************************************************* # ************************************************************************* - + def straight_path_validator( - self, - network: nx.MultiDiGraph, - path: list, - excluded_nodes: list, - consider_reversed_edges: bool, - ignore_self_loops: bool): + self, + network: nx.MultiDiGraph, + path: list, + excluded_nodes: list, + consider_reversed_edges: bool, + ignore_self_loops: bool, + ): # find out the unique nodes set_nodes = set(path) # at least three nodes @@ -51,48 +52,28 @@ class TestGisIdentify: assert node not in path[1:-1] # intermediate nodes can only have two neighbours for node_key in path[1:-1]: - assert len( - set( - gis_iden.neighbours( - network, - node_key, - ignore_self_loops=True - ) - ) - ) == 2 + assert ( + len(set(gis_iden.neighbours(network, node_key, ignore_self_loops=True))) + == 2 + ) # end nodes need to have at least one neighbour, except in loops, # wherein they need to have two neighbours if path[0] == path[-1]: # end nodes in loops need to have at least two neighbours - assert len( - set( - gis_iden.neighbours( - network, - path[0], - ignore_self_loops=True - ) - ) - ) >= 2 + assert ( + len(set(gis_iden.neighbours(network, path[0], ignore_self_loops=True))) + >= 2 + ) else: # end nodes need to have at least one neighbour - assert len( - set( - gis_iden.neighbours( - network, - path[0], - ignore_self_loops=True - ) - ) - ) >= 1 - assert len( - set( - gis_iden.neighbours( - network, - path[-1], - ignore_self_loops=True - ) - ) - ) >= 1 + assert ( + len(set(gis_iden.neighbours(network, path[0], ignore_self_loops=True))) + >= 1 + ) + assert ( + len(set(gis_iden.neighbours(network, path[-1], ignore_self_loops=True))) + >= 1 + ) # if ignore_self_loops=False, intermediate nodes cannot have self-loops if not ignore_self_loops: for node in path[1:-1]: @@ -107,2521 +88,2453 @@ class TestGisIdentify: # assert not network.has_edge(node, node) # make sure it qualifies as a path assert gis_iden.is_node_path( - network, - path, - consider_reversed_edges=consider_reversed_edges - ) + network, path, consider_reversed_edges=consider_reversed_edges + ) # make sure it is a straight path assert gis_iden.is_path_straight( - network, - path, + network, + path, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - - + ignore_self_loops=ignore_self_loops, + ) + # ************************************************************************* # ************************************************************************* - + def test_finding_simplifiable_paths_osmnx(self): - # network should be a OSM-nx formatted graph network = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', + (55.71654, 9.11728), + network_type="drive", custom_filter='["highway"~"residential|tertiary|unclassified|service"]', - truncate_by_edge=True - ) - + truncate_by_edge=True, + ) + # ********************************************************************* # ********************************************************************* - + consider_reversed_edges = False ignore_self_loops = False - + # paths paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes=[], consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) # verify the paths for path in paths: self.straight_path_validator( - network, - path, - excluded_nodes=[], + network, + path, + excluded_nodes=[], consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + consider_reversed_edges = False ignore_self_loops = True - + # paths paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes=[], consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) # verify the paths for path in paths: self.straight_path_validator( - network, - path, - excluded_nodes=[], + network, + path, + excluded_nodes=[], consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + consider_reversed_edges = True ignore_self_loops = False - + # paths paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes=[], consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) # verify the paths for path in paths: self.straight_path_validator( - network, - path, - excluded_nodes=[], + network, + path, + excluded_nodes=[], consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + consider_reversed_edges = True ignore_self_loops = True - + # paths paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes=[], consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) # verify the paths for path in paths: self.straight_path_validator( - network, - path, - excluded_nodes=[], + network, + path, + excluded_nodes=[], consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops) - - # ********************************************************************* - # ********************************************************************* + ignore_self_loops=ignore_self_loops, + ) + + # ********************************************************************* + # ********************************************************************* # ************************************************************************* # ************************************************************************* - + def test_find_straight_path_empty_graph(self): - # test variations: # 1) excluded nodes # 2) self loops # 3) reversed edges - + # ********************************************************************* # ********************************************************************* - + # network with a two edge path network = nx.MultiDiGraph() - + # no reversed edges, no self loops, no excluded nodes consider_reversed_edges = False ignore_self_loops = False excluded_nodes = [] - + # test path validator with non-path error_raised = False try: assert not self.straight_path_validator( - network, - [1, 1, 1], - excluded_nodes, + network, + [1, 1, 1], + excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) except AssertionError: error_raised = True - assert error_raised - + assert error_raised + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) assert len(straight_paths) == 0 - + # ********************************************************************* # ********************************************************************* - + # no reversed edges, no self loops, no excluded nodes consider_reversed_edges = False ignore_self_loops = True excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) assert len(straight_paths) == 0 - + # ********************************************************************* # ********************************************************************* - + # network with a two edge path network = nx.MultiDiGraph() - + # no reversed edges, no self loops, no excluded nodes consider_reversed_edges = True ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) assert len(straight_paths) == 0 - + # ********************************************************************* # ********************************************************************* - + # network with a two edge path network = nx.MultiDiGraph() - + # no reversed edges, no self loops, no excluded nodes consider_reversed_edges = True ignore_self_loops = True excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) assert len(straight_paths) == 0 - + + # ********************************************************************* # ********************************************************************* - # ********************************************************************* - + # enhance test coverage - + # add single node network.add_node(0) path = gis_iden._find_path_direction_insensitive(network, [], 0, False) assert type(path) == list assert len(path) == 1 assert repr(path) == repr([0]) - + # ************************************************************************* # ************************************************************************* - + def test_find_one_edge_path(self): - # ********************************************************************* # ********************************************************************* - + # network with a two edge path network = nx.MultiDiGraph() - network.add_edges_from([ - (0, 1, 0) - ]) - + network.add_edges_from([(0, 1, 0)]) + # ********************************************************************* # ********************************************************************* - + # do not consider reversed edges consider_reversed_edges = False - + # ********************************************************************* # ********************************************************************* - + # no reversed edges, no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # no reversed edges, allow self loops, no excluded nodes ignore_self_loops = True excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the middle node ignore_self_loops = False excluded_nodes = [1] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the start node ignore_self_loops = False excluded_nodes = [0] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the end node ignore_self_loops = False excluded_nodes = [2] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # consider reversed edges consider_reversed_edges = True - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, allow self loops, no excluded nodes ignore_self_loops = True excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the middle node ignore_self_loops = False excluded_nodes = [1] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the start node ignore_self_loops = False excluded_nodes = [0] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the end node ignore_self_loops = False excluded_nodes = [2] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_find_one_edge_path_w_reversed_edge(self): - # ********************************************************************* # ********************************************************************* - + # network with a two edge path network = nx.MultiDiGraph() - network.add_edges_from([ - (0, 1, 0), (1, 0, 0) - ]) - + network.add_edges_from([(0, 1, 0), (1, 0, 0)]) + # ********************************************************************* # ********************************************************************* - + # do not consider reversed edges consider_reversed_edges = False - + # ********************************************************************* # ********************************************************************* - + # no reversed edges, no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # no reversed edges, allow self loops, no excluded nodes ignore_self_loops = True excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the middle node ignore_self_loops = False excluded_nodes = [1] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the start node ignore_self_loops = False excluded_nodes = [0] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the end node ignore_self_loops = False excluded_nodes = [2] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # consider reversed edges consider_reversed_edges = True - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, allow self loops, no excluded nodes ignore_self_loops = True excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the middle node ignore_self_loops = False excluded_nodes = [1] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the start node ignore_self_loops = False excluded_nodes = [0] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the end node ignore_self_loops = False excluded_nodes = [2] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* # ************************************************************************* # ************************************************************************* - + def test_find_simple_straight_path(self): - # ********************************************************************* # ********************************************************************* - + # network with a two edge path network = nx.MultiDiGraph() - network.add_edges_from([ - (0, 1, 0), (1, 2, 0) - ]) - + network.add_edges_from([(0, 1, 0), (1, 2, 0)]) + # ********************************************************************* # ********************************************************************* - + # do not consider reversed edges consider_reversed_edges = False - + # ********************************************************************* # ********************************************************************* - + # no reversed edges, no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2]] assert len(straight_paths) == len(true_straight_paths) for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # no reversed edges, allow self loops, no excluded nodes ignore_self_loops = True excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2]] assert len(straight_paths) == len(true_straight_paths) for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, - excluded_nodes, + network, + straight_path, + excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the middle node ignore_self_loops = False excluded_nodes = [1] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the start node ignore_self_loops = False excluded_nodes = [0] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2]] assert len(straight_paths) == len(true_straight_paths) for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the end node ignore_self_loops = False excluded_nodes = [2] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2]] assert len(straight_paths) == len(true_straight_paths) for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # consider reversed edges consider_reversed_edges = True - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, allow self loops, no excluded nodes ignore_self_loops = True excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2], [2, 1, 0]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2], [2, 1, 0]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the middle node ignore_self_loops = False excluded_nodes = [1] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the start node ignore_self_loops = False excluded_nodes = [0] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2], [2, 1, 0]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the end node ignore_self_loops = False excluded_nodes = [2] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2], [2, 1, 0]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_find_simple_straight_path_with_antiparallel_edge(self): - # ********************************************************************* # ********************************************************************* - + # network with a two edge path network = nx.MultiDiGraph() - network.add_edges_from([ - (0, 1, 0), (1, 2, 0), (2, 1, 0) - ]) - + network.add_edges_from([(0, 1, 0), (1, 2, 0), (2, 1, 0)]) + # ********************************************************************* # ********************************************************************* - + # do not consider reversed edges consider_reversed_edges = False - + # ********************************************************************* # ********************************************************************* - + # no reversed edges, no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2]] assert len(straight_paths) == len(true_straight_paths) for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # no reversed edges, allow self loops, no excluded nodes ignore_self_loops = True excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2]] assert len(straight_paths) == len(true_straight_paths) for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the middle node ignore_self_loops = False excluded_nodes = [1] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the start node ignore_self_loops = False excluded_nodes = [0] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2]] assert len(straight_paths) == len(true_straight_paths) for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the end node ignore_self_loops = False excluded_nodes = [2] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2]] assert len(straight_paths) == len(true_straight_paths) for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # consider reversed edges consider_reversed_edges = True - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, allow self loops, no excluded nodes ignore_self_loops = True excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2], [2, 1, 0]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2], [2, 1, 0]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the middle node ignore_self_loops = False excluded_nodes = [1] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the start node ignore_self_loops = False excluded_nodes = [0] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2], [2, 1, 0]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the end node ignore_self_loops = False excluded_nodes = [2] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2], [2, 1, 0]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_find_straight_path_cycle(self): - # ********************************************************************* # ********************************************************************* - + # network with a two edge path network = nx.MultiDiGraph() - network.add_edges_from([ - (0, 1, 0), (1, 2, 0), (2, 0, 0), (0, 2, 0), (1, 1, 0) - ]) - + network.add_edges_from([(0, 1, 0), (1, 2, 0), (2, 0, 0), (0, 2, 0), (1, 1, 0)]) + # ********************************************************************* # ********************************************************************* - + # do not consider reversed edges consider_reversed_edges = False - + # ********************************************************************* # ********************************************************************* - + # no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[1, 2, 0, 1]] assert len(straight_paths) == len(true_straight_paths) for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # allow self loops, no excluded nodes ignore_self_loops = True excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2, 0], [1, 2, 0, 1], [2, 1, 0, 2]] - assert len(straight_paths) == len(true_straight_paths)-2 + assert len(straight_paths) == len(true_straight_paths) - 2 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # no self loops, excluded the "middle" node ignore_self_loops = False excluded_nodes = [1] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[1, 2, 0, 1]] assert len(straight_paths) == len(true_straight_paths) for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # no self loops, excluded the start node ignore_self_loops = False excluded_nodes = [0] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[1, 2, 0]] assert len(straight_paths) == len(true_straight_paths) for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the end node ignore_self_loops = False excluded_nodes = [2] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[2, 0, 1]] assert len(straight_paths) == len(true_straight_paths) for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # consider reversed edges consider_reversed_edges = True - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, allow self loops, no excluded nodes ignore_self_loops = True excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2, 0], [1, 2, 0, 1], [2, 1, 0, 2]] - assert len(straight_paths) == len(true_straight_paths)-2 + assert len(straight_paths) == len(true_straight_paths) - 2 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[1, 2, 0, 1], [1, 0, 2, 1]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the middle node ignore_self_loops = False excluded_nodes = [1] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - true_straight_paths = [[1, 2, 0, 1], [1, 0, 2, 1]] - assert len(straight_paths) == len(true_straight_paths)-1 + ignore_self_loops=ignore_self_loops, + ) + true_straight_paths = [[1, 2, 0, 1], [1, 0, 2, 1]] + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the start node ignore_self_loops = False excluded_nodes = [0] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[1, 2, 0], [0, 2, 1]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the end node ignore_self_loops = False excluded_nodes = [2] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[1, 0, 2], [2, 0, 1]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_find_simple_straight_path_w_reversed_edge(self): - # ********************************************************************* # ********************************************************************* - + # network with a two edge path network = nx.MultiDiGraph() - network.add_edges_from([ - (0, 1, 0), (2, 1, 0) - ]) - + network.add_edges_from([(0, 1, 0), (2, 1, 0)]) + # ********************************************************************* # ********************************************************************* - + # consider reversed edges consider_reversed_edges = False - + # ********************************************************************* # ********************************************************************* - + # no reversed edges, no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # no reversed edges, allow self loops, no excluded nodes ignore_self_loops = True excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the middle node ignore_self_loops = False excluded_nodes = [1] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the start node ignore_self_loops = False excluded_nodes = [0] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the end node ignore_self_loops = False excluded_nodes = [2] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # consider reversed edges consider_reversed_edges = True - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, allow self loops, no excluded nodes ignore_self_loops = True excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2], [2, 1, 0]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2], [2, 1, 0]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the middle node ignore_self_loops = False excluded_nodes = [1] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the start node ignore_self_loops = False excluded_nodes = [0] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2], [2, 1, 0]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the end node ignore_self_loops = False excluded_nodes = [2] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2], [2, 1, 0]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_find_simple_straight_path_w_reversed_edge2(self): - # ********************************************************************* # ********************************************************************* - + # network with a two edge path network = nx.MultiDiGraph() - network.add_edges_from([ - (1, 0, 0), (1, 2, 0) - ]) - + network.add_edges_from([(1, 0, 0), (1, 2, 0)]) + # ********************************************************************* # ********************************************************************* - + # consider reversed edges consider_reversed_edges = False - + # ********************************************************************* # ********************************************************************* - + # no reversed edges, no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # no reversed edges, allow self loops, no excluded nodes ignore_self_loops = True excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the middle node ignore_self_loops = False excluded_nodes = [1] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the start node ignore_self_loops = False excluded_nodes = [0] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # do not allow reversed edges, no self loops, excluded the end node ignore_self_loops = False excluded_nodes = [2] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # consider reversed edges consider_reversed_edges = True - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, allow self loops, no excluded nodes ignore_self_loops = True excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2], [2, 1, 0]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2], [2, 1, 0]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the middle node ignore_self_loops = False excluded_nodes = [1] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [] assert len(straight_paths) == len(true_straight_paths) - + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the start node ignore_self_loops = False excluded_nodes = [0] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2], [2, 1, 0]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # allow reversed edges, no self loops, excluded the end node ignore_self_loops = False excluded_nodes = [2] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2], [2, 1, 0]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_find_simplifiable_path_four_nodes(self): - # ********************************************************************* # ********************************************************************* - + # network with a two edge path network = nx.MultiDiGraph() - network.add_edges_from([ - (1, 0, 0), (1, 2, 0), (2, 3, 0) - ]) - + network.add_edges_from([(1, 0, 0), (1, 2, 0), (2, 3, 0)]) + # ********************************************************************* # ********************************************************************* - + # do not consider reversed edges consider_reversed_edges = False - + # ********************************************************************* # ********************************************************************* - + # no reversed edges, no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[1, 2, 3]] assert len(straight_paths) == len(true_straight_paths) for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # consider reversed edges consider_reversed_edges = True - + # ********************************************************************* # ********************************************************************* - + # no reversed edges, no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2, 3], [3, 2, 1, 0]] - assert len(straight_paths) == len(true_straight_paths)-1 + assert len(straight_paths) == len(true_straight_paths) - 1 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # no reversed edges, no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, ignore_self_loops=ignore_self_loops, - include_both_directions=True - ) + include_both_directions=True, + ) true_straight_paths = [[0, 1, 2, 3], [3, 2, 1, 0]] assert len(straight_paths) == len(true_straight_paths) for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_find_simplifiable_path_four_node_cycle(self): - # ********************************************************************* # ********************************************************************* - + # network with a two edge path network = nx.MultiDiGraph() - network.add_edges_from([ - (0, 1, 0), (1, 2, 0), (2, 3, 0), (0, 3, 0) - ]) - + network.add_edges_from([(0, 1, 0), (1, 2, 0), (2, 3, 0), (0, 3, 0)]) + # ********************************************************************* # ********************************************************************* - + # do not consider reversed edges consider_reversed_edges = False - + # ********************************************************************* # ********************************************************************* - + # no reversed edges, no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [[0, 1, 2, 3]] assert len(straight_paths) == len(true_straight_paths) for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # consider reversed edges consider_reversed_edges = True - + # ********************************************************************* # ********************************************************************* - + # no reversed edges, no self loops, no excluded nodes ignore_self_loops = False excluded_nodes = [] - + straight_paths = gis_iden.find_simplifiable_paths( - network, + network, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) + ignore_self_loops=ignore_self_loops, + ) true_straight_paths = [ - [0, 1, 2, 3, 0], [1, 2, 3, 0, 1], [2, 3, 0, 1, 2], [3, 0, 1, 2, 3] - ] - assert len(straight_paths) == len(true_straight_paths)-3 + [0, 1, 2, 3, 0], + [1, 2, 3, 0, 1], + [2, 3, 0, 1, 2], + [3, 0, 1, 2, 3], + ] + assert len(straight_paths) == len(true_straight_paths) - 3 for straight_path in straight_paths: assert straight_path in true_straight_paths self.straight_path_validator( - network, - straight_path, + network, + straight_path, excluded_nodes, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_start_end_points_identification(self): - # create LineString object - + line = LineString([(0, 0), (1, 0), (2, 0)]) - + # tolerance - + proximity_tolerance = 1e-3 - + # start point - + point_a = Point(0, 0) - + # end point - + point_b = Point(2, 0) - + # approximately the start point - - point_c = Point(0+proximity_tolerance/2, 0) - + + point_c = Point(0 + proximity_tolerance / 2, 0) + # approximately the end point - - point_d = Point(2+proximity_tolerance/2, 0) - + + point_d = Point(2 + proximity_tolerance / 2, 0) + # close, but not quite the start point - - point_e = Point(0+2*proximity_tolerance, 0) - + + point_e = Point(0 + 2 * proximity_tolerance, 0) + # close, but not quite the end point - - point_f = Point(2+2*proximity_tolerance, 0) - + + point_f = Point(2 + 2 * proximity_tolerance, 0) + # another point on the line other than the start and end points - + point_g = Point(1, 0) - + # tests - + assert gis_iden.is_start_or_end_point_or_close( - line, - point_a, - tolerance=proximity_tolerance) - + line, point_a, tolerance=proximity_tolerance + ) + assert gis_iden.is_start_or_end_point_or_close( - line, - point_b, - tolerance=proximity_tolerance) - + line, point_b, tolerance=proximity_tolerance + ) + assert gis_iden.is_start_or_end_point_or_close( - line, - point_c, - tolerance=proximity_tolerance) - + line, point_c, tolerance=proximity_tolerance + ) + assert gis_iden.is_start_or_end_point_or_close( - line, - point_d, - tolerance=proximity_tolerance) - + line, point_d, tolerance=proximity_tolerance + ) + assert not gis_iden.is_start_or_end_point_or_close( - line, - point_e, - tolerance=proximity_tolerance) - + line, point_e, tolerance=proximity_tolerance + ) + assert not gis_iden.is_start_or_end_point_or_close( - line, - point_f, - tolerance=proximity_tolerance) - + line, point_f, tolerance=proximity_tolerance + ) + assert not gis_iden.is_start_or_end_point_or_close( - line, - point_g, - tolerance=proximity_tolerance) - + line, point_g, tolerance=proximity_tolerance + ) + # other method - - assert gis_iden.is_start_or_end_point( - line, - point_a - ) - - assert gis_iden.is_start_or_end_point( - line, - point_b - ) - - assert not gis_iden.is_start_or_end_point( - line, - point_c - ) - - assert not gis_iden.is_start_or_end_point( - line, - point_d - ) - - assert not gis_iden.is_start_or_end_point( - line, - point_e - ) - - assert not gis_iden.is_start_or_end_point( - line, - point_f - ) - - assert not gis_iden.is_start_or_end_point( - line, - point_g - ) - + + assert gis_iden.is_start_or_end_point(line, point_a) + + assert gis_iden.is_start_or_end_point(line, point_b) + + assert not gis_iden.is_start_or_end_point(line, point_c) + + assert not gis_iden.is_start_or_end_point(line, point_d) + + assert not gis_iden.is_start_or_end_point(line, point_e) + + assert not gis_iden.is_start_or_end_point(line, point_f) + + assert not gis_iden.is_start_or_end_point(line, point_g) + # ************************************************************************* # ************************************************************************* - + def test_find_unconnected_nodes(self): - G = nx.MultiDiGraph() - + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 1), (2, 0)]) - + G.add_nodes_from([4, 5]) - + node_keys = gis_iden.find_unconnected_nodes(G) - + true_node_keys = [4, 5] - + assert len(node_keys) == len(true_node_keys) - + for node_key in node_keys: - assert node_key in true_node_keys - + # ************************************************************************* # ************************************************************************* - + def test_identify_get_from_a_to_b(self): - # edge_keys = gis_iden.get_edges_from_a_to_b(G, 0, 1) - + G = nx.MultiDiGraph() - + G.add_edges_from( - [(0, 1, {'a': 1}), - (1, 2, {'b': 2}), - (2, 3, {'c': 3}), - (2, 3, {'d': 4}), - (1, 0, {'e': 5}), - (3, 2, {'f': 6})] - ) - + [ + (0, 1, {"a": 1}), + (1, 2, {"b": 2}), + (2, 3, {"c": 3}), + (2, 3, {"d": 4}), + (1, 0, {"e": 5}), + (3, 2, {"f": 6}), + ] + ) + # from node 0 to node 1 - + edge_keys = gis_iden.get_edges_from_a_to_b(G, 0, 1) - + true_edge_keys = [(0, 1, 0)] - + assert len(true_edge_keys) == len(edge_keys) - + for edge_key in edge_keys: - assert edge_key in true_edge_keys - + # from node 1 to node 0 - + edge_keys = gis_iden.get_edges_from_a_to_b(G, 1, 0) - + true_edge_keys = [(1, 0, 0)] - + assert len(true_edge_keys) == len(edge_keys) - + for edge_key in edge_keys: - assert edge_key in true_edge_keys - + # from node 1 to node 2 - + edge_keys = gis_iden.get_edges_from_a_to_b(G, 1, 2) - + true_edge_keys = [(1, 2, 0)] - + assert len(true_edge_keys) == len(edge_keys) - + for edge_key in edge_keys: - assert edge_key in true_edge_keys - + # from node 2 to node 1 - + edge_keys = gis_iden.get_edges_from_a_to_b(G, 2, 1) - + true_edge_keys = [] - + assert len(true_edge_keys) == len(edge_keys) - + # from node 2 to node 3 - + edge_keys = gis_iden.get_edges_from_a_to_b(G, 2, 3) - + true_edge_keys = [(2, 3, 0), (2, 3, 1)] - + assert len(true_edge_keys) == len(edge_keys) - + for edge_key in edge_keys: - assert edge_key in true_edge_keys - + # from node 3 to node 2 - + true_edge_keys = [(3, 2, 0)] - + edge_keys = gis_iden.get_edges_from_a_to_b(G, 3, 2) - + assert len(true_edge_keys) == len(edge_keys) - + for edge_key in edge_keys: - assert edge_key in true_edge_keys - + # ************************************************************************* # ************************************************************************* - + def test_identify_get_edges_involving_nodes(self): - G = nx.MultiDiGraph() - - G.add_node('noedges') - - G.add_edges_from( - [(0, 1, 0), - (1, 2, 0), - (1, 2, 1)] - ) - + + G.add_node("noedges") + + G.add_edges_from([(0, 1, 0), (1, 2, 0), (1, 2, 1)]) + # no edges - node_key = 'noedges' + node_key = "noedges" edges = gis_iden.get_edges_involving_node(G, node_key) assert len(edges) == 0 - + # one edge node_key = 0 - true_edges = [(0,1,0)] + true_edges = [(0, 1, 0)] edges = gis_iden.get_edges_involving_node(G, node_key) assert len(edges) == len(true_edges) for edge in edges: assert edge in true_edges - + # multiple edges node_key = 1 - true_edges = [(0,1,0), (1,2,1), (1,2,0)] + true_edges = [(0, 1, 0), (1, 2, 1), (1, 2, 0)] edges = gis_iden.get_edges_involving_node(G, node_key) assert len(edges) == len(true_edges) for edge in edges: assert edge in true_edges - + # ************************************************************************* # ************************************************************************* - + def test_identify_get_edges_between_nodes(self): - G = nx.MultiDiGraph() - + G.add_edges_from( - [(0, 1, {'a': 1}), - (1, 2, {'b': 2}), - (2, 3, {'c': 3}), - (2, 3, {'d': 4}), - (1, 0, {'e': 5}), - (3, 2, {'f': 6}), - (4, 1, {'f': 6})] - ) - + [ + (0, 1, {"a": 1}), + (1, 2, {"b": 2}), + (2, 3, {"c": 3}), + (2, 3, {"d": 4}), + (1, 0, {"e": 5}), + (3, 2, {"f": 6}), + (4, 1, {"f": 6}), + ] + ) + # between two nodes that are not connected - + edge_keys = gis_iden.get_edges_between_two_nodes(G, 2, 4) - + assert 0 == len(edge_keys) - + # between nodes 0 and 1, nominal direction - + edge_keys = gis_iden.get_edges_between_two_nodes(G, 0, 1) - + true_edge_keys = [(0, 1, 0), (1, 0, 0)] - + assert len(true_edge_keys) == len(edge_keys) - + for edge_key in edge_keys: - assert edge_key in true_edge_keys - + # between nodes 0 and 1, reverse direction - + edge_keys = gis_iden.get_edges_between_two_nodes(G, 1, 0) - + assert len(true_edge_keys) == len(edge_keys) - + for edge_key in edge_keys: - assert edge_key in true_edge_keys - + # between nodes 1 and 2, nominal direction - + edge_keys = gis_iden.get_edges_between_two_nodes(G, 1, 2) - + true_edge_keys = [(1, 2, 0)] - + assert len(true_edge_keys) == len(edge_keys) - + for edge_key in edge_keys: - assert edge_key in true_edge_keys - + # between nodes 1 and 2, reverse direction - + edge_keys = gis_iden.get_edges_between_two_nodes(G, 2, 1) - + assert len(true_edge_keys) == len(edge_keys) - + for edge_key in edge_keys: - assert edge_key in true_edge_keys - + # between nodes 2 and 3, nominal direction - + edge_keys = gis_iden.get_edges_between_two_nodes(G, 2, 3) - + true_edge_keys = [(2, 3, 0), (2, 3, 1), (3, 2, 0)] - + assert len(true_edge_keys) == len(edge_keys) - + for edge_key in edge_keys: - assert edge_key in true_edge_keys - + # between nodes 2 and 3, reverse direction - + edge_keys = gis_iden.get_edges_between_two_nodes(G, 3, 2) - + assert len(true_edge_keys) == len(edge_keys) - + for edge_key in edge_keys: - assert edge_key in true_edge_keys - + # ************************************************************************* # ************************************************************************* - + def test_identify_edges_closest_to_nodes(self): - # network should be a OSM-nx formatted graph - + network = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', + (55.71654, 9.11728), + network_type="drive", custom_filter='["highway"~"residential|tertiary|unclassified|service"]', - truncate_by_edge=True - ) - + truncate_by_edge=True, + ) + # pick a certain number of nodes randomly number_nodes = 3 node_keys = list(network.nodes()) while len(node_keys) > number_nodes: - node_keys.pop(random.randint(0, len(node_keys)-1)) - + node_keys.pop(random.randint(0, len(node_keys) - 1)) + # find out which edges are closest edge_keys, projected_network = gis_iden.identify_edge_closest_to_node( - network=network, - node_keys=node_keys + network=network, node_keys=node_keys ) - + # for each node, verify that there is no closer edge - # prepare the edge geometries + # prepare the edge geometries edge_key_geos = [ - (projected_network.edges[edge_key][gis_osm.KEY_OSMNX_GEOMETRY] - if gis_osm.KEY_OSMNX_GEOMETRY in projected_network.edges[edge_key] else - LineString([ - (projected_network.nodes[edge_key[0]][gis_osm.KEY_OSMNX_X], - projected_network.nodes[edge_key[0]][gis_osm.KEY_OSMNX_Y]), - (projected_network.nodes[edge_key[1]][gis_osm.KEY_OSMNX_X], - projected_network.nodes[edge_key[1]][gis_osm.KEY_OSMNX_Y]) - ]) - ) + ( + projected_network.edges[edge_key][gis_osm.KEY_OSMNX_GEOMETRY] + if gis_osm.KEY_OSMNX_GEOMETRY in projected_network.edges[edge_key] + else LineString( + [ + ( + projected_network.nodes[edge_key[0]][gis_osm.KEY_OSMNX_X], + projected_network.nodes[edge_key[0]][gis_osm.KEY_OSMNX_Y], + ), + ( + projected_network.nodes[edge_key[1]][gis_osm.KEY_OSMNX_X], + projected_network.nodes[edge_key[1]][gis_osm.KEY_OSMNX_Y], + ), + ] + ) + ) for edge_key in edge_keys - ] - for node_index, node_key in enumerate(node_keys): + ] + for node_index, node_key in enumerate(node_keys): # prepare the node geometry the_point = Point( projected_network.nodes[node_key][gis_osm.KEY_OSMNX_X], - projected_network.nodes[node_key][gis_osm.KEY_OSMNX_Y] - ) + projected_network.nodes[node_key][gis_osm.KEY_OSMNX_Y], + ) # calculate the distances to every edge edge_point_distances = the_point.distance(edge_key_geos) # find the distance to the edge identified as the closest one shortest_distance = edge_point_distances[node_index] # the edge identified must lead to the shortest distance assert shortest_distance == min(edge_point_distances) - + # find out which edges are closest using the projected network edge_keys_proj, _ = gis_iden.identify_edge_closest_to_node( - network=projected_network, - node_keys=node_keys + network=projected_network, node_keys=node_keys ) # assert that the same edges have been returned (with the projected network) assert len(edge_keys) == len(edge_keys_proj) for edge_key1, edge_key2 in zip(edge_keys, edge_keys_proj): assert edge_key1 == edge_key2 - + # ************************************************************************* # ************************************************************************* - + def test_identify_roundabouts(self): - # ********************************************************************* # ********************************************************************* - + edge_container = [ # roundabout with 2 nodes - (0, 1, {'length': 3, 'oneway': True}), - (1, 0, {'length': 5, 'oneway': True}), + (0, 1, {"length": 3, "oneway": True}), + (1, 0, {"length": 5, "oneway": True}), # roundabout with 3 nodes - (2, 3, {'length': 8, 'oneway': True}), - (3, 4, {'length': 19, 'oneway': True}), - (4, 2, {'length': 4, 'oneway': True}), + (2, 3, {"length": 8, "oneway": True}), + (3, 4, {"length": 19, "oneway": True}), + (4, 2, {"length": 4, "oneway": True}), # roundabout with 4 nodes - (5, 6, {'length': 8, 'oneway': True}), - (6, 7, {'length': 5, 'oneway': True}), - (7, 8, {'length': 10, 'oneway': True}), - (8, 5, {'length': 7, 'oneway': True}), + (5, 6, {"length": 8, "oneway": True}), + (6, 7, {"length": 5, "oneway": True}), + (7, 8, {"length": 10, "oneway": True}), + (8, 5, {"length": 7, "oneway": True}), # roundabout within roundabout 2 - (2, 4, {'length': 6, 'oneway': True}), + (2, 4, {"length": 6, "oneway": True}), # roundabout overlapping with roundabouts 1 and 3 - (9, 10, {'length': 14, 'oneway': True}), - (10, 11, {'length': 57, 'oneway': True}), - (11, 9, {'length': 13, 'oneway': True}), + (9, 10, {"length": 14, "oneway": True}), + (10, 11, {"length": 57, "oneway": True}), + (11, 9, {"length": 13, "oneway": True}), # fake roundabouts # self loop - (12, 12, {'length': 0, 'oneway': True}), + (12, 12, {"length": 0, "oneway": True}), # no oneway attr - (13, 14, {'length': 9}), - (14, 15, {'length': 6, 'oneway': True}), - (15, 13, {'length': 7, 'oneway': True}), + (13, 14, {"length": 9}), + (14, 15, {"length": 6, "oneway": True}), + (15, 13, {"length": 7, "oneway": True}), # oneway = False - (16, 17, {'length': 9, 'oneway': True}), - (17, 18, {'length': 5, 'oneway': False}), - (18, 19, {'length': 3, 'oneway': True}), - (19, 16, {'length': 2, 'oneway': True}), + (16, 17, {"length": 9, "oneway": True}), + (17, 18, {"length": 5, "oneway": False}), + (18, 19, {"length": 3, "oneway": True}), + (19, 16, {"length": 2, "oneway": True}), # connect roundabouts 1 and 3 with a edge - (1, 6, {'length': 9, 'oneway': False}), - (6, 1, {'length': 24, 'oneway': False}), + (1, 6, {"length": 9, "oneway": False}), + (6, 1, {"length": 24, "oneway": False}), # create an edge between node 7 and 5 that cannot be used, - (7, 5, {'length': 6, 'oneway': False}) + (7, 5, {"length": 6, "oneway": False}), ] - + network = nx.MultiDiGraph() - + network.add_edges_from(edge_container) - + # ********************************************************************* # ********************************************************************* - + # true roundabouts: must be oneway streets forming an endless loop - + true_roundabouts = [ # roundabout with 2 nodes [0, 1], @@ -2634,16 +2547,15 @@ class TestGisIdentify: # roundabout overlapping with roundabouts 1 and 3 [9, 10, 11], ] - + for roundabout in true_roundabouts: - assert gis_iden.is_roundabout(network, path=roundabout) - + # ********************************************************************* # ********************************************************************* - + # fake roundabouts - + fake_roundabouts = [ # self loop [12, 12], @@ -2652,7 +2564,7 @@ class TestGisIdentify: # oneway = False [16, 17, 18, 19], # true roundabout with one non-existent node added - [2, 3, 'h', 4], + [2, 3, "h", 4], # path whose last node does not lead to the first through a valid edge [5, 6, 7], # true roundabout without the last node (8) @@ -2660,702 +2572,960 @@ class TestGisIdentify: # # connect 1 and 3 with a edge # [1, 6] # why? ] - + for roundabout in fake_roundabouts: - assert not gis_iden.is_roundabout(network, path=roundabout) - + # ********************************************************************* # ********************************************************************* - + # errors - + # incorrect path - + error_raised = False try: gis_iden.is_roundabout(network, path=[1]) except ValueError: error_raised = True assert error_raised - + + # ************************************************************************* # ************************************************************************* - # ************************************************************************* - + def test_identify_points_extremities(self): - # ********************************************************************* # ********************************************************************* - + # redundant points - + line_coords = tuple([(1.0, 1.0), (2.0, 2.0), (3.0, 0.0)]) line = LineString(line_coords) - + close_to_start, close_to_end = gis_iden.close_to_extremities( line=line, points=[Point(1, 1), Point(2, 2), Point(3, 0)], ) - + assert repr(close_to_start) == repr([0]) assert repr(close_to_end) == repr([2]) - + # redundant points, with distance - - (close_to_start, - close_to_end, - line_distances, - start_distances, - end_distances) = gis_iden.close_to_extremities( + + ( + close_to_start, + close_to_end, + line_distances, + start_distances, + end_distances, + ) = gis_iden.close_to_extremities( line=line, points=[Point(1, 1), Point(2, 2), Point(3, 0)], - return_distances=True + return_distances=True, ) - + assert repr(close_to_start) == repr([0]) assert repr(close_to_end) == repr([2]) abs_tols = [1e-3, 1e-3, 1e-3] true_line_distances = [0, 0, 0] true_start_distances = [0, 1.41421356, 2.23606798] true_end_distances = [2.23606798, 2.23606798, 0] - for line_d, start_d, end_d, true_line_d, true_start_d, true_end_d, abs_tol in zip( - line_distances, - start_distances, - end_distances, - true_line_distances, - true_start_distances, - true_end_distances, - abs_tols - ): + for ( + line_d, + start_d, + end_d, + true_line_d, + true_start_d, + true_end_d, + abs_tol, + ) in zip( + line_distances, + start_distances, + end_distances, + true_line_distances, + true_start_distances, + true_end_distances, + abs_tols, + ): assert isclose(line_d, true_line_d, abs_tol=abs_tol) assert isclose(start_d, true_start_d, abs_tol=abs_tol) assert isclose(end_d, true_end_d, abs_tol=abs_tol) - + # ********************************************************************* # ********************************************************************* - + # redundant points, different order - + close_to_start, close_to_end = gis_iden.close_to_extremities( - line=line, - points=[Point(3, 0), Point(2, 2), Point(1, 1)] + line=line, points=[Point(3, 0), Point(2, 2), Point(1, 1)] ) - + assert repr(close_to_start) == repr([2]) assert repr(close_to_end) == repr([0]) - + # redundant points, different order, with distance - - (close_to_start, - close_to_end, - line_distances, - start_distances, - end_distances) = gis_iden.close_to_extremities( + + ( + close_to_start, + close_to_end, + line_distances, + start_distances, + end_distances, + ) = gis_iden.close_to_extremities( line=line, points=[Point(3, 0), Point(2, 2), Point(1, 1)], - return_distances=True + return_distances=True, ) - + assert repr(close_to_start) == repr([2]) assert repr(close_to_end) == repr([0]) abs_tols = [1e-3, 1e-3, 1e-3] true_line_distances = [0, 0, 0] true_start_distances = [2.23606798, 1.41421356, 0] true_end_distances = [0, 2.23606798, 2.23606798] - for line_d, start_d, end_d, true_line_d, true_start_d, true_end_d, abs_tol in zip( - line_distances, - start_distances, - end_distances, - true_line_distances, - true_start_distances, - true_end_distances, - abs_tols - ): + for ( + line_d, + start_d, + end_d, + true_line_d, + true_start_d, + true_end_d, + abs_tol, + ) in zip( + line_distances, + start_distances, + end_distances, + true_line_distances, + true_start_distances, + true_end_distances, + abs_tols, + ): assert isclose(line_d, true_line_d, abs_tol=abs_tol) assert isclose(start_d, true_start_d, abs_tol=abs_tol) assert isclose(end_d, true_end_d, abs_tol=abs_tol) - + # ********************************************************************* # ********************************************************************* - + # redundant points, yet another order - + close_to_start, close_to_end = gis_iden.close_to_extremities( - line=line, - points=[Point(2, 2), Point(3, 0), Point(1, 1)] + line=line, points=[Point(2, 2), Point(3, 0), Point(1, 1)] ) - + assert repr(close_to_start) == repr([2]) assert repr(close_to_end) == repr([1]) - + # redundant points, yet another order, with distance - - (close_to_start, - close_to_end, - line_distances, - start_distances, - end_distances) = gis_iden.close_to_extremities( + + ( + close_to_start, + close_to_end, + line_distances, + start_distances, + end_distances, + ) = gis_iden.close_to_extremities( line=line, points=[Point(2, 2), Point(3, 0), Point(1, 1)], - return_distances=True + return_distances=True, ) - + assert repr(close_to_start) == repr([2]) assert repr(close_to_end) == repr([1]) abs_tols = [1e-3, 1e-3, 1e-3] true_line_distances = [0, 0, 0] true_start_distances = [1.41421356, 2.23606798, 0] true_end_distances = [2.23606798, 0, 2.23606798] - for line_d, start_d, end_d, true_line_d, true_start_d, true_end_d, abs_tol in zip( - line_distances, - start_distances, - end_distances, - true_line_distances, - true_start_distances, - true_end_distances, - abs_tols - ): + for ( + line_d, + start_d, + end_d, + true_line_d, + true_start_d, + true_end_d, + abs_tol, + ) in zip( + line_distances, + start_distances, + end_distances, + true_line_distances, + true_start_distances, + true_end_distances, + abs_tols, + ): assert isclose(line_d, true_line_d, abs_tol=abs_tol) assert isclose(start_d, true_start_d, abs_tol=abs_tol) assert isclose(end_d, true_end_d, abs_tol=abs_tol) - + # ********************************************************************* # ********************************************************************* - + # new points, directly on the line - + close_to_start, close_to_end = gis_iden.close_to_extremities( line=line, points=[Point(1.2, 1.2), Point(3, 0), Point(1.5, 1.5)], ) - + assert repr(close_to_start) == repr([]) assert repr(close_to_end) == repr([1]) - + # new points, directly on the line, with distance - - (close_to_start, - close_to_end, - line_distances, - start_distances, - end_distances) = gis_iden.close_to_extremities( + + ( + close_to_start, + close_to_end, + line_distances, + start_distances, + end_distances, + ) = gis_iden.close_to_extremities( line=line, points=[Point(1.2, 1.2), Point(3, 0), Point(1.5, 1.5)], - return_distances=True + return_distances=True, ) - + assert repr(close_to_start) == repr([]) assert repr(close_to_end) == repr([1]) abs_tols = [1e-3, 1e-3, 1e-3] true_line_distances = [0, 0, 0] true_start_distances = [0.28284271, 2.23606798, 0.70710678] true_end_distances = [2.16333077, 0, 2.12132034] - for line_d, start_d, end_d, true_line_d, true_start_d, true_end_d, abs_tol in zip( - line_distances, - start_distances, - end_distances, - true_line_distances, - true_start_distances, - true_end_distances, - abs_tols - ): + for ( + line_d, + start_d, + end_d, + true_line_d, + true_start_d, + true_end_d, + abs_tol, + ) in zip( + line_distances, + start_distances, + end_distances, + true_line_distances, + true_start_distances, + true_end_distances, + abs_tols, + ): assert isclose(line_d, true_line_d, abs_tol=abs_tol) assert isclose(start_d, true_start_d, abs_tol=abs_tol) assert isclose(end_d, true_end_d, abs_tol=abs_tol) - + # ********************************************************************* # ********************************************************************* - + # new points, extending beyond the line - + close_to_start, close_to_end = gis_iden.close_to_extremities( line=line, points=[Point(0.5, 0.5), Point(3, 0), Point(4, -2)], ) - + assert repr(close_to_start) == repr([0]) assert repr(close_to_end) == repr([1, 2]) - + # new points, extending beyond the line, with distance - - (close_to_start, - close_to_end, - line_distances, - start_distances, - end_distances) = gis_iden.close_to_extremities( + + ( + close_to_start, + close_to_end, + line_distances, + start_distances, + end_distances, + ) = gis_iden.close_to_extremities( line=line, points=[Point(0.5, 0.5), Point(3, 0), Point(4, -2)], - return_distances=True + return_distances=True, ) - + assert repr(close_to_start) == repr([0]) assert repr(close_to_end) == repr([1, 2]) abs_tols = [1e-3, 1e-3, 1e-3] true_line_distances = [0.70710678, 0, 2.23606798] true_start_distances = [0.70710678, 2.23606798, 4.24264069] true_end_distances = [2.54950976, 0, 2.23606798] - for line_d, start_d, end_d, true_line_d, true_start_d, true_end_d, abs_tol in zip( - line_distances, - start_distances, - end_distances, - true_line_distances, - true_start_distances, - true_end_distances, - abs_tols - ): + for ( + line_d, + start_d, + end_d, + true_line_d, + true_start_d, + true_end_d, + abs_tol, + ) in zip( + line_distances, + start_distances, + end_distances, + true_line_distances, + true_start_distances, + true_end_distances, + abs_tols, + ): assert isclose(line_d, true_line_d, abs_tol=abs_tol) assert isclose(start_d, true_start_d, abs_tol=abs_tol) assert isclose(end_d, true_end_d, abs_tol=abs_tol) - + # ********************************************************************* # ********************************************************************* - + # new points, extending beyond the line - + close_to_start, close_to_end = gis_iden.close_to_extremities( line=line, points=[Point(0.5, 0.5), Point(1.0, 1.0), Point(4, -2)], ) - + assert repr(close_to_start) == repr([0, 1]) assert repr(close_to_end) == repr([2]) - + # new points, extending beyond the line, with distance - - (close_to_start, - close_to_end, - line_distances, - start_distances, - end_distances) = gis_iden.close_to_extremities( + + ( + close_to_start, + close_to_end, + line_distances, + start_distances, + end_distances, + ) = gis_iden.close_to_extremities( line=line, points=[Point(0.5, 0.5), Point(1.0, 1.0), Point(4, -2)], - return_distances=True + return_distances=True, ) - + assert repr(close_to_start) == repr([0, 1]) assert repr(close_to_end) == repr([2]) abs_tols = [1e-3, 1e-3, 1e-3] true_line_distances = [0.70710678, 0, 2.23606798] true_start_distances = [0.70710678, 0, 4.24264069] true_end_distances = [2.54950976, 2.23606798, 2.23606798] - for line_d, start_d, end_d, true_line_d, true_start_d, true_end_d, abs_tol in zip( - line_distances, - start_distances, - end_distances, - true_line_distances, - true_start_distances, - true_end_distances, - abs_tols - ): + for ( + line_d, + start_d, + end_d, + true_line_d, + true_start_d, + true_end_d, + abs_tol, + ) in zip( + line_distances, + start_distances, + end_distances, + true_line_distances, + true_start_distances, + true_end_distances, + abs_tols, + ): assert isclose(line_d, true_line_d, abs_tol=abs_tol) assert isclose(start_d, true_start_d, abs_tol=abs_tol) assert isclose(end_d, true_end_d, abs_tol=abs_tol) - + # ********************************************************************* # ********************************************************************* - + # new points, not on the line # expected result: the new points appear on the geometry - + close_to_start, close_to_end = gis_iden.close_to_extremities( line=line, points=[Point(0.5, 0.75), Point(3.0, -0.5)], ) - + assert repr(close_to_start) == repr([0]) assert repr(close_to_end) == repr([1]) - + # new points, not on the line, with distance - - (close_to_start, - close_to_end, - line_distances, - start_distances, - end_distances) = gis_iden.close_to_extremities( + + ( + close_to_start, + close_to_end, + line_distances, + start_distances, + end_distances, + ) = gis_iden.close_to_extremities( line=line, points=[Point(0.5, 0.75), Point(3.0, -0.5)], - return_distances=True + return_distances=True, ) - + assert repr(close_to_start) == repr([0]) assert repr(close_to_end) == repr([1]) abs_tols = [1e-3, 1e-3] true_line_distances = [0.55901699, 0.5] true_start_distances = [0.55901699, 2.5] true_end_distances = [2.61007663, 0.5] - for line_d, start_d, end_d, true_line_d, true_start_d, true_end_d, abs_tol in zip( - line_distances, - start_distances, - end_distances, - true_line_distances, - true_start_distances, - true_end_distances, - abs_tols - ): + for ( + line_d, + start_d, + end_d, + true_line_d, + true_start_d, + true_end_d, + abs_tol, + ) in zip( + line_distances, + start_distances, + end_distances, + true_line_distances, + true_start_distances, + true_end_distances, + abs_tols, + ): assert isclose(line_d, true_line_d, abs_tol=abs_tol) assert isclose(start_d, true_start_d, abs_tol=abs_tol) assert isclose(end_d, true_end_d, abs_tol=abs_tol) - + # ********************************************************************* # ********************************************************************* - + # new points, close to multiple segments - + line_coords = tuple([(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)]) line = LineString(line_coords) - + close_to_start, close_to_end = gis_iden.close_to_extremities( line=line, points=[Point(0.5, 0.5)], ) - + assert repr(close_to_start) == repr([]) assert repr(close_to_end) == repr([]) - + # new points, close to multiple segments, with distance - - (close_to_start, - close_to_end, - line_distances, - start_distances, - end_distances) = gis_iden.close_to_extremities( - line=line, - points=[Point(0.5, 0.5)], - return_distances=True + + ( + close_to_start, + close_to_end, + line_distances, + start_distances, + end_distances, + ) = gis_iden.close_to_extremities( + line=line, points=[Point(0.5, 0.5)], return_distances=True ) - + assert repr(close_to_start) == repr([]) assert repr(close_to_end) == repr([]) abs_tols = [1e-3] true_line_distances = [0.5] true_start_distances = [0.70710678] true_end_distances = [0.70710678] - for line_d, start_d, end_d, true_line_d, true_start_d, true_end_d, abs_tol in zip( - line_distances, - start_distances, - end_distances, - true_line_distances, - true_start_distances, - true_end_distances, - abs_tols - ): + for ( + line_d, + start_d, + end_d, + true_line_d, + true_start_d, + true_end_d, + abs_tol, + ) in zip( + line_distances, + start_distances, + end_distances, + true_line_distances, + true_start_distances, + true_end_distances, + abs_tols, + ): assert isclose(line_d, true_line_d, abs_tol=abs_tol) assert isclose(start_d, true_start_d, abs_tol=abs_tol) assert isclose(end_d, true_end_d, abs_tol=abs_tol) - + # ********************************************************************* # ********************************************************************* - + # point equidistant to start and end, favour start - + line_coords = tuple([(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)]) line = LineString(line_coords) - + close_to_start, close_to_end = gis_iden.close_to_extremities( - line=line, - points=[Point(0.5, -0.5)], - use_start_point_equidistant=True + line=line, points=[Point(0.5, -0.5)], use_start_point_equidistant=True ) - + assert repr(close_to_start) == repr([0]) assert repr(close_to_end) == repr([]) - + # ********************************************************************* # ********************************************************************* - + # point equidistant to start and end, favour end - + line_coords = tuple([(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)]) line = LineString(line_coords) - + close_to_start, close_to_end = gis_iden.close_to_extremities( - line=line, - points=[Point(0.5, -0.5)], - use_start_point_equidistant=False + line=line, points=[Point(0.5, -0.5)], use_start_point_equidistant=False ) - + assert repr(close_to_start) == repr([]) assert repr(close_to_end) == repr([0]) - + # ************************************************************************* # ************************************************************************* - + def test_identify_self_loops(self): - # find one self-loop - + network = nx.MultiDiGraph() - - network.add_edges_from([ - (0, 1, 0), - (1, 2, 0), - (2, 0, 0), - (1, 1, 0) - ]) - - true_selflooping_nodes = [1] - + + network.add_edges_from([(0, 1, 0), (1, 2, 0), (2, 0, 0), (1, 1, 0)]) + + true_selflooping_nodes = [1] + selflooping_nodes = list(gis_iden.find_self_loops(network)) - + assert len(selflooping_nodes) == len(true_selflooping_nodes) - + for node_key in selflooping_nodes: - assert node_key in true_selflooping_nodes - + # find two self-loops - + network = nx.MultiDiGraph() - - network.add_edges_from([ - (0, 1, 0), - (1, 2, 0), - (2, 0, 0), - (1, 1, 0), - (2, 2, 0) - ]) - + + network.add_edges_from([(0, 1, 0), (1, 2, 0), (2, 0, 0), (1, 1, 0), (2, 2, 0)]) + true_selflooping_nodes = [1, 2] - + selflooping_nodes = list(gis_iden.find_self_loops(network)) - + assert len(selflooping_nodes) == len(true_selflooping_nodes) - + for node_key in selflooping_nodes: - assert node_key in true_selflooping_nodes - + # find no self-loops - + network = nx.MultiDiGraph() - - network.add_edges_from([ - (0, 1, 0), - (1, 2, 0), - (2, 0, 0) - ]) - + + network.add_edges_from([(0, 1, 0), (1, 2, 0), (2, 0, 0)]) + true_selflooping_nodes = [] - + selflooping_nodes = list(gis_iden.find_self_loops(network)) - + assert len(selflooping_nodes) == len(true_selflooping_nodes) - + assert len(true_selflooping_nodes) == 0 - + # ************************************************************************* # ************************************************************************* - + def test_reversed_edges(self): - # - + network = nx.MultiDiGraph() - - network.add_nodes_from([ - (0, {gis_osm.KEY_OSMNX_X: 0, gis_osm.KEY_OSMNX_Y: 0}), - (1, {gis_osm.KEY_OSMNX_X: 0, gis_osm.KEY_OSMNX_Y: 1}), - (2, {gis_osm.KEY_OSMNX_X: 0, gis_osm.KEY_OSMNX_Y: 1}), - (3, {gis_osm.KEY_OSMNX_X: 0, gis_osm.KEY_OSMNX_Y: 2}) - ]) - - network.add_edges_from([ - # reversed, without geometry - (0, 1, 0, {gis_osm.KEY_OSMNX_OSMID: 1, - gis_osm.KEY_OSMNX_LENGTH: 1, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: False}), - (1, 0, 0, {gis_osm.KEY_OSMNX_OSMID: 1, - gis_osm.KEY_OSMNX_LENGTH: 1, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: True}), - # reversed with geometry - (2, 3, 0, {gis_osm.KEY_OSMNX_OSMID: 2, - gis_osm.KEY_OSMNX_LENGTH: 1, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_GEOMETRY: LineString([(0, 1), (0, 2)]), - gis_osm.KEY_OSMNX_REVERSED: False}), - (3, 2, 0, {gis_osm.KEY_OSMNX_OSMID: 2, - gis_osm.KEY_OSMNX_LENGTH: 1, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_GEOMETRY: LineString([(0, 2), (0, 1)]), - gis_osm.KEY_OSMNX_REVERSED: True}), - # parallel to (0,1,0) but with different osmid - (0, 1, 1, {gis_osm.KEY_OSMNX_OSMID: 3, - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_GEOMETRY: LineString( - [(0, 0), (1, 0), (1, 1), (0, 1)] - ), - gis_osm.KEY_OSMNX_REVERSED: False}), - # parallel to (2,3,0) but with different osmid - (2, 3, 1, {gis_osm.KEY_OSMNX_OSMID: 4, - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_GEOMETRY: LineString( - [(0, 1), (1, 1), (1, 2), (0, 2)] - ), - gis_osm.KEY_OSMNX_REVERSED: False}), - # parallel to (2,3,0) but with a different geometry - (2, 3, 2, {gis_osm.KEY_OSMNX_OSMID: 2, - gis_osm.KEY_OSMNX_LENGTH: 1, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_GEOMETRY: LineString( - [(0, 1), (1, 1), (1, 2), (0, 2)] - ), - gis_osm.KEY_OSMNX_REVERSED: False}), - # parallel to (2,3,0) but without a geometry - (2, 3, 3, {gis_osm.KEY_OSMNX_OSMID: 2, - gis_osm.KEY_OSMNX_LENGTH: 1, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: False}), - # parallel to (3,2,0) but without a geometry - (3, 2, 1, {gis_osm.KEY_OSMNX_OSMID: 4, - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: True}), - # parallel to (3,2,0) but with a different geometry - (3, 2, 2, {gis_osm.KEY_OSMNX_OSMID: 2, - gis_osm.KEY_OSMNX_LENGTH: 1, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_GEOMETRY: LineString([(0, 0), (0, 1)]), - gis_osm.KEY_OSMNX_REVERSED: True}), - # edge sharing one node with (0,1,0) and (2,3,0) - (0, 2, 0, {gis_osm.KEY_OSMNX_OSMID: 5, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_LENGTH: 4}), - # edge that is identical to (1,0,0) but has the opposite reversed attr. - (1, 0, 1, {gis_osm.KEY_OSMNX_OSMID: 1, - gis_osm.KEY_OSMNX_LENGTH: 1, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: False}), - # edge that is identical to (1,0,0) but has a different length - (1, 0, 2, {gis_osm.KEY_OSMNX_OSMID: 1, - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: True}), - # edges with different list sizes for 'reversed' - (5, 0, 0, {gis_osm.KEY_OSMNX_OSMID: 1, - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: True}), - # simplified edges with the wrong number of osmids - (6, 7, 0, {gis_osm.KEY_OSMNX_OSMID: [1,2], - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: False}), - (7, 6, 0, {gis_osm.KEY_OSMNX_OSMID: [1,2,3], - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: True}), - # simplified edges with different osmids - (6, 7, 1, {gis_osm.KEY_OSMNX_OSMID: [1,3], - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: False}), - (7, 6, 1, {gis_osm.KEY_OSMNX_OSMID: [1,2], - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: True}), - # edges without a 'reversed' attribute - (7, 6, 2, {gis_osm.KEY_OSMNX_OSMID: [1,2], - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: True, - gis_osm.KEY_OSMNX_LENGTH: 3}), - # edges with different 'osmid' attribute types - (7, 6, 3, {gis_osm.KEY_OSMNX_OSMID: {1,2}, - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: True}), - # simplified edges with non-boolean types - (7, 8, 0, {gis_osm.KEY_OSMNX_OSMID: [1,2], - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: 0}), - (8, 7, 0, {gis_osm.KEY_OSMNX_OSMID: [2,1], - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: 1}), - # simplified edges without True or without False - (7, 8, 1, {gis_osm.KEY_OSMNX_OSMID: [1,2], - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: [True]}), - (8, 7, 1, {gis_osm.KEY_OSMNX_OSMID: [2,1], - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: [False]}), - # simplified edges - (7, 8, 2, {gis_osm.KEY_OSMNX_OSMID: [1,2], - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: [True,False]}), - (8, 7, 2, {gis_osm.KEY_OSMNX_OSMID: [2,1], - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: [True,False]}), - ]) - + + network.add_nodes_from( + [ + (0, {gis_osm.KEY_OSMNX_X: 0, gis_osm.KEY_OSMNX_Y: 0}), + (1, {gis_osm.KEY_OSMNX_X: 0, gis_osm.KEY_OSMNX_Y: 1}), + (2, {gis_osm.KEY_OSMNX_X: 0, gis_osm.KEY_OSMNX_Y: 1}), + (3, {gis_osm.KEY_OSMNX_X: 0, gis_osm.KEY_OSMNX_Y: 2}), + ] + ) + + network.add_edges_from( + [ + # reversed, without geometry + ( + 0, + 1, + 0, + { + gis_osm.KEY_OSMNX_OSMID: 1, + gis_osm.KEY_OSMNX_LENGTH: 1, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: False, + }, + ), + ( + 1, + 0, + 0, + { + gis_osm.KEY_OSMNX_OSMID: 1, + gis_osm.KEY_OSMNX_LENGTH: 1, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: True, + }, + ), + # reversed with geometry + ( + 2, + 3, + 0, + { + gis_osm.KEY_OSMNX_OSMID: 2, + gis_osm.KEY_OSMNX_LENGTH: 1, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_GEOMETRY: LineString([(0, 1), (0, 2)]), + gis_osm.KEY_OSMNX_REVERSED: False, + }, + ), + ( + 3, + 2, + 0, + { + gis_osm.KEY_OSMNX_OSMID: 2, + gis_osm.KEY_OSMNX_LENGTH: 1, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_GEOMETRY: LineString([(0, 2), (0, 1)]), + gis_osm.KEY_OSMNX_REVERSED: True, + }, + ), + # parallel to (0,1,0) but with different osmid + ( + 0, + 1, + 1, + { + gis_osm.KEY_OSMNX_OSMID: 3, + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_GEOMETRY: LineString( + [(0, 0), (1, 0), (1, 1), (0, 1)] + ), + gis_osm.KEY_OSMNX_REVERSED: False, + }, + ), + # parallel to (2,3,0) but with different osmid + ( + 2, + 3, + 1, + { + gis_osm.KEY_OSMNX_OSMID: 4, + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_GEOMETRY: LineString( + [(0, 1), (1, 1), (1, 2), (0, 2)] + ), + gis_osm.KEY_OSMNX_REVERSED: False, + }, + ), + # parallel to (2,3,0) but with a different geometry + ( + 2, + 3, + 2, + { + gis_osm.KEY_OSMNX_OSMID: 2, + gis_osm.KEY_OSMNX_LENGTH: 1, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_GEOMETRY: LineString( + [(0, 1), (1, 1), (1, 2), (0, 2)] + ), + gis_osm.KEY_OSMNX_REVERSED: False, + }, + ), + # parallel to (2,3,0) but without a geometry + ( + 2, + 3, + 3, + { + gis_osm.KEY_OSMNX_OSMID: 2, + gis_osm.KEY_OSMNX_LENGTH: 1, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: False, + }, + ), + # parallel to (3,2,0) but without a geometry + ( + 3, + 2, + 1, + { + gis_osm.KEY_OSMNX_OSMID: 4, + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: True, + }, + ), + # parallel to (3,2,0) but with a different geometry + ( + 3, + 2, + 2, + { + gis_osm.KEY_OSMNX_OSMID: 2, + gis_osm.KEY_OSMNX_LENGTH: 1, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_GEOMETRY: LineString([(0, 0), (0, 1)]), + gis_osm.KEY_OSMNX_REVERSED: True, + }, + ), + # edge sharing one node with (0,1,0) and (2,3,0) + ( + 0, + 2, + 0, + { + gis_osm.KEY_OSMNX_OSMID: 5, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_LENGTH: 4, + }, + ), + # edge that is identical to (1,0,0) but has the opposite reversed attr. + ( + 1, + 0, + 1, + { + gis_osm.KEY_OSMNX_OSMID: 1, + gis_osm.KEY_OSMNX_LENGTH: 1, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: False, + }, + ), + # edge that is identical to (1,0,0) but has a different length + ( + 1, + 0, + 2, + { + gis_osm.KEY_OSMNX_OSMID: 1, + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: True, + }, + ), + # edges with different list sizes for 'reversed' + ( + 5, + 0, + 0, + { + gis_osm.KEY_OSMNX_OSMID: 1, + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: True, + }, + ), + # simplified edges with the wrong number of osmids + ( + 6, + 7, + 0, + { + gis_osm.KEY_OSMNX_OSMID: [1, 2], + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: False, + }, + ), + ( + 7, + 6, + 0, + { + gis_osm.KEY_OSMNX_OSMID: [1, 2, 3], + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: True, + }, + ), + # simplified edges with different osmids + ( + 6, + 7, + 1, + { + gis_osm.KEY_OSMNX_OSMID: [1, 3], + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: False, + }, + ), + ( + 7, + 6, + 1, + { + gis_osm.KEY_OSMNX_OSMID: [1, 2], + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: True, + }, + ), + # edges without a 'reversed' attribute + ( + 7, + 6, + 2, + { + gis_osm.KEY_OSMNX_OSMID: [1, 2], + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: True, + gis_osm.KEY_OSMNX_LENGTH: 3, + }, + ), + # edges with different 'osmid' attribute types + ( + 7, + 6, + 3, + { + gis_osm.KEY_OSMNX_OSMID: {1, 2}, + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: True, + }, + ), + # simplified edges with non-boolean types + ( + 7, + 8, + 0, + { + gis_osm.KEY_OSMNX_OSMID: [1, 2], + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: 0, + }, + ), + ( + 8, + 7, + 0, + { + gis_osm.KEY_OSMNX_OSMID: [2, 1], + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: 1, + }, + ), + # simplified edges without True or without False + ( + 7, + 8, + 1, + { + gis_osm.KEY_OSMNX_OSMID: [1, 2], + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: [True], + }, + ), + ( + 8, + 7, + 1, + { + gis_osm.KEY_OSMNX_OSMID: [2, 1], + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: [False], + }, + ), + # simplified edges + ( + 7, + 8, + 2, + { + gis_osm.KEY_OSMNX_OSMID: [1, 2], + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: [True, False], + }, + ), + ( + 8, + 7, + 2, + { + gis_osm.KEY_OSMNX_OSMID: [2, 1], + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: [True, False], + }, + ), + ] + ) + # test reversed edges without geometry assert gis_iden.edges_are_in_reverse( - network, edge_a=(0, 1, 0), edge_b=(1, 0, 0)) + network, edge_a=(0, 1, 0), edge_b=(1, 0, 0) + ) assert gis_iden.edges_are_in_reverse( - network, edge_a=(1, 0, 0), edge_b=(0, 1, 0)) - + network, edge_a=(1, 0, 0), edge_b=(0, 1, 0) + ) + # test reversed edges with geometry assert gis_iden.edges_are_in_reverse( - network, edge_a=(2, 3, 0), edge_b=(3, 2, 0)) + network, edge_a=(2, 3, 0), edge_b=(3, 2, 0) + ) assert gis_iden.edges_are_in_reverse( - network, edge_a=(3, 2, 0), edge_b=(2, 3, 0)) - + network, edge_a=(3, 2, 0), edge_b=(2, 3, 0) + ) + # test non-reversed edges between the same nodes assert not gis_iden.edges_are_in_reverse( - network, edge_a=(1, 0, 0), edge_b=(0, 1, 1)) + network, edge_a=(1, 0, 0), edge_b=(0, 1, 1) + ) assert not gis_iden.edges_are_in_reverse( - network, edge_a=(0, 1, 1), edge_b=(1, 0, 0)) - + network, edge_a=(0, 1, 1), edge_b=(1, 0, 0) + ) + assert not gis_iden.edges_are_in_reverse( - network, edge_a=(0, 1, 0), edge_b=(0, 1, 1)) + network, edge_a=(0, 1, 0), edge_b=(0, 1, 1) + ) assert not gis_iden.edges_are_in_reverse( - network, edge_a=(0, 1, 1), edge_b=(0, 1, 0)) - + network, edge_a=(0, 1, 1), edge_b=(0, 1, 0) + ) + # test non-reversed edges between the same nodes assert not gis_iden.edges_are_in_reverse( - network, edge_a=(3, 2, 0), edge_b=(2, 3, 1)) + network, edge_a=(3, 2, 0), edge_b=(2, 3, 1) + ) assert not gis_iden.edges_are_in_reverse( - network, edge_a=(2, 3, 1), edge_b=(3, 2, 0)) - + network, edge_a=(2, 3, 1), edge_b=(3, 2, 0) + ) + assert not gis_iden.edges_are_in_reverse( - network, edge_a=(2, 3, 0), edge_b=(2, 3, 1)) + network, edge_a=(2, 3, 0), edge_b=(2, 3, 1) + ) assert not gis_iden.edges_are_in_reverse( - network, edge_a=(2, 3, 1), edge_b=(2, 3, 0)) - + network, edge_a=(2, 3, 1), edge_b=(2, 3, 0) + ) + # test edges that are not parallel/anti-parallel assert not gis_iden.edges_are_in_reverse( - network, edge_a=(0, 1, 0), edge_b=(0, 2, 0)) + network, edge_a=(0, 1, 0), edge_b=(0, 2, 0) + ) assert not gis_iden.edges_are_in_reverse( - network, edge_a=(0, 2, 0), edge_b=(0, 1, 0)) + network, edge_a=(0, 2, 0), edge_b=(0, 1, 0) + ) assert not gis_iden.edges_are_in_reverse( - network, edge_a=(0, 1, 0), edge_b=(2, 3, 0)) + network, edge_a=(0, 1, 0), edge_b=(2, 3, 0) + ) assert not gis_iden.edges_are_in_reverse( - network, edge_a=(2, 3, 0), edge_b=(0, 1, 0)) - + network, edge_a=(2, 3, 0), edge_b=(0, 1, 0) + ) + # test edges with the same osmid but whose reversed attribues are the same assert not gis_iden.edges_are_in_reverse( - network, edge_a=(0, 1, 0), edge_b=(1, 0, 1)) + network, edge_a=(0, 1, 0), edge_b=(1, 0, 1) + ) assert not gis_iden.edges_are_in_reverse( - network, edge_a=(1, 0, 1), edge_b=(0, 1, 0)) - + network, edge_a=(1, 0, 1), edge_b=(0, 1, 0) + ) + # test edges that have the same osmid but somehow have different lengths assert not gis_iden.edges_are_in_reverse( - network, edge_a=(0, 1, 0), edge_b=(1, 0, 2)) - + network, edge_a=(0, 1, 0), edge_b=(1, 0, 2) + ) + # test edges that have the different geometries assert not gis_iden.edges_are_in_reverse( - network, edge_a=(2, 3, 2), edge_b=(3, 2, 0)) + network, edge_a=(2, 3, 2), edge_b=(3, 2, 0) + ) assert not gis_iden.edges_are_in_reverse( - network, edge_a=(3, 2, 0), edge_b=(2, 3, 2)) - + network, edge_a=(3, 2, 0), edge_b=(2, 3, 2) + ) + # # old: # # test one edge with a simple geometry against one without a geometry (False case: no match) # assert not gis_iden.edges_are_in_reverse( @@ -3367,99 +3537,112 @@ class TestGisIdentify: # network, edge_a=(2, 3, 3), edge_b=(3, 2, 0)) # assert gis_iden.edges_are_in_reverse( # network, edge_a=(3, 2, 0), edge_b=(2, 3, 3)) - + # new: geometries should exist in both or not at all # test one edge with a simple geometry against one without a geometry (False case: no match) assert not gis_iden.edges_are_in_reverse( - network, edge_a=(2, 3, 3), edge_b=(3, 2, 2)) + network, edge_a=(2, 3, 3), edge_b=(3, 2, 2) + ) assert not gis_iden.edges_are_in_reverse( - network, edge_a=(3, 2, 2), edge_b=(2, 3, 3)) + network, edge_a=(3, 2, 2), edge_b=(2, 3, 3) + ) # test one edge with a simple geometry against one without a geometry (True case: match) assert not gis_iden.edges_are_in_reverse( - network, edge_a=(2, 3, 3), edge_b=(3, 2, 0)) + network, edge_a=(2, 3, 3), edge_b=(3, 2, 0) + ) assert not gis_iden.edges_are_in_reverse( - network, edge_a=(3, 2, 0), edge_b=(2, 3, 3)) - + network, edge_a=(3, 2, 0), edge_b=(2, 3, 3) + ) + # test one edge with a simplified geometry against one without a geometry (False) assert not gis_iden.edges_are_in_reverse( - network, edge_a=(2, 3, 1), edge_b=(3, 2, 1)) + network, edge_a=(2, 3, 1), edge_b=(3, 2, 1) + ) assert not gis_iden.edges_are_in_reverse( - network, edge_a=(3, 2, 1), edge_b=(2, 3, 1)) - + network, edge_a=(3, 2, 1), edge_b=(2, 3, 1) + ) + # test non-inexistent edges error_raised = False try: - gis_iden.edges_are_in_reverse( - network, edge_a=(5, 0, 0), edge_b=(0, 5, 0)) + gis_iden.edges_are_in_reverse(network, edge_a=(5, 0, 0), edge_b=(0, 5, 0)) except ValueError: error_raised = True assert error_raised - + # test simplified edges with the wrong number of osmids assert not gis_iden.edges_are_in_reverse( - network, edge_a=(6, 7, 0), edge_b=(7, 6, 0)) + network, edge_a=(6, 7, 0), edge_b=(7, 6, 0) + ) assert not gis_iden.edges_are_in_reverse( - network, edge_a=(7, 6, 0), edge_b=(6, 7, 0)) + network, edge_a=(7, 6, 0), edge_b=(6, 7, 0) + ) # test simplified edges with thedifferent osmids assert not gis_iden.edges_are_in_reverse( - network, edge_a=(6, 7, 1), edge_b=(7, 6, 1)) + network, edge_a=(6, 7, 1), edge_b=(7, 6, 1) + ) assert not gis_iden.edges_are_in_reverse( - network, edge_a=(7, 6, 1), edge_b=(6, 7, 1)) + network, edge_a=(7, 6, 1), edge_b=(6, 7, 1) + ) # test simplified edges assert gis_iden.edges_are_in_reverse( - network, edge_a=(6, 7, 0), edge_b=(7, 6, 1)) + network, edge_a=(6, 7, 0), edge_b=(7, 6, 1) + ) assert gis_iden.edges_are_in_reverse( - network, edge_a=(7, 6, 1), edge_b=(6, 7, 0)) - + network, edge_a=(7, 6, 1), edge_b=(6, 7, 0) + ) + # test edges with a 'reversed' attribute assert gis_iden.edges_are_in_reverse( - network, edge_a=(6, 7, 0), edge_b=(7, 6, 2)) + network, edge_a=(6, 7, 0), edge_b=(7, 6, 2) + ) assert gis_iden.edges_are_in_reverse( - network, edge_a=(7, 6, 2), edge_b=(6, 7, 0)) - + network, edge_a=(7, 6, 2), edge_b=(6, 7, 0) + ) + # test simplified edges without True or without False assert not gis_iden.edges_are_in_reverse( - network, edge_a=(7,8,1), edge_b=(8,7,1)) + network, edge_a=(7, 8, 1), edge_b=(8, 7, 1) + ) assert not gis_iden.edges_are_in_reverse( - network, edge_a=(8,7,1), edge_b=(7,8,1)) + network, edge_a=(8, 7, 1), edge_b=(7, 8, 1) + ) # test simplified edges assert gis_iden.edges_are_in_reverse( - network, edge_a=(7,8,2), edge_b=(8,7,2)) + network, edge_a=(7, 8, 2), edge_b=(8, 7, 2) + ) assert gis_iden.edges_are_in_reverse( - network, edge_a=(8,7,2), edge_b=(7,8,2)) - + network, edge_a=(8, 7, 2), edge_b=(7, 8, 2) + ) + # errors - + # test edges with different 'osmid' attribute types error_raised = False try: assert not gis_iden.edges_are_in_reverse( - network, - edge_a=(6, 7, 0), - edge_b=(7, 6, 3) - ) + network, edge_a=(6, 7, 0), edge_b=(7, 6, 3) + ) except ValueError: error_raised = True assert error_raised - + # test edges with different 'osmid' attribute types error_raised = False try: assert not gis_iden.edges_are_in_reverse( - network, - edge_a=(7, 6, 3), - edge_b=(6, 7, 0) - ) + network, edge_a=(7, 6, 3), edge_b=(6, 7, 0) + ) except ValueError: error_raised = True assert error_raised - + # test simplified edges with non-boolean types error_raised = False try: assert not gis_iden.edges_are_in_reverse( - network, edge_a=(7,8,0), edge_b=(8,7,0) - ) + network, edge_a=(7, 8, 0), edge_b=(8, 7, 0) + ) except ValueError: error_raised = True assert error_raised @@ -3467,121 +3650,175 @@ class TestGisIdentify: error_raised = False try: assert not gis_iden.edges_are_in_reverse( - network, edge_a=(8,7,0), edge_b=(7,8,0) - ) + network, edge_a=(8, 7, 0), edge_b=(7, 8, 0) + ) except ValueError: error_raised = True assert error_raised # test simplified edges with non-boolean types assert not gis_iden.edges_are_in_reverse( - network, edge_a=(6,7,0), edge_b=(8,7,0) - ) + network, edge_a=(6, 7, 0), edge_b=(8, 7, 0) + ) # test simplified edges with non-boolean types, reverse assert not gis_iden.edges_are_in_reverse( - network, edge_a=(8,7,0), edge_b=(6,7,0) - ) + network, edge_a=(8, 7, 0), edge_b=(6, 7, 0) + ) # test simplified edges with non-boolean types assert not gis_iden.edges_are_in_reverse( - network, edge_a=(6,7,0), edge_b=(7,8,0) - ) + network, edge_a=(6, 7, 0), edge_b=(7, 8, 0) + ) # test simplified edges with non-boolean types, reverse assert not gis_iden.edges_are_in_reverse( - network, edge_a=(7,8,0), edge_b=(6,7,0) - ) - + network, edge_a=(7, 8, 0), edge_b=(6, 7, 0) + ) + # ************************************************************************* # ************************************************************************* - + def test_osmnx_compliance(self): - # get the network network = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', - custom_filter=( - '["highway"~"residential|tertiary|unclassified|service"]' - ), - truncate_by_edge=True - ) - + (55.71654, 9.11728), + network_type="drive", + custom_filter=('["highway"~"residential|tertiary|unclassified|service"]'), + truncate_by_edge=True, + ) + for edge_key in network.edges(keys=True): assert gis_iden.is_edge_osmnx_compliant(network, edge_key) - + # try a non-existent edge - edge_key = ('a','b','c') + edge_key = ("a", "b", "c") assert not network.has_edge(*edge_key) - + error_raised = False try: assert not gis_iden.is_edge_osmnx_compliant(network, edge_key) except ValueError: error_raised = True assert error_raised - + # add uncompliant edges - - network.add_edges_from([ - # edge with non-integer osmids - ('a', 'b', 0, {gis_osm.KEY_OSMNX_OSMID: 'hello', - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: [True,False]}), - # edge with non-integer osmids in a list - ('a', 'b', 1, {gis_osm.KEY_OSMNX_OSMID: [1,'a'], - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: [True,False]}), - # edge with non-numeric lengths - ('a', 'b', 2, {gis_osm.KEY_OSMNX_OSMID: [1,2], - gis_osm.KEY_OSMNX_LENGTH: 'three', - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: [True,False]}), - # edge with non-LineString geometries - ('a', 'b', 3, {gis_osm.KEY_OSMNX_OSMID: [1,2], - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: [True,False], - gis_osm.KEY_OSMNX_GEOMETRY: Point((1,0))}), - # edge with non-boolean reversed attr in a list - ('a', 'b', 4, {gis_osm.KEY_OSMNX_OSMID: [1,2], - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_ONEWAY: False, - gis_osm.KEY_OSMNX_REVERSED: ['True',False]}), - # edge with non-boolean oneway - ('a', 'b', 5, {gis_osm.KEY_OSMNX_OSMID: [1,2], - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_REVERSED: [True,False], - gis_osm.KEY_OSMNX_ONEWAY: 'True'}), - # edge with non-boolean oneway in a list - ('a', 'b', 6, {gis_osm.KEY_OSMNX_OSMID: [1,2], - gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_REVERSED: [True,False], - gis_osm.KEY_OSMNX_ONEWAY: [False,'True']}), - # edge without all the essential attributes - ('a', 'b', 7, {gis_osm.KEY_OSMNX_OSMID: [1,2], - gis_osm.KEY_OSMNX_REVERSED: [True,False], - gis_osm.KEY_OSMNX_ONEWAY: False}), - - ]) - + + network.add_edges_from( + [ + # edge with non-integer osmids + ( + "a", + "b", + 0, + { + gis_osm.KEY_OSMNX_OSMID: "hello", + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: [True, False], + }, + ), + # edge with non-integer osmids in a list + ( + "a", + "b", + 1, + { + gis_osm.KEY_OSMNX_OSMID: [1, "a"], + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: [True, False], + }, + ), + # edge with non-numeric lengths + ( + "a", + "b", + 2, + { + gis_osm.KEY_OSMNX_OSMID: [1, 2], + gis_osm.KEY_OSMNX_LENGTH: "three", + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: [True, False], + }, + ), + # edge with non-LineString geometries + ( + "a", + "b", + 3, + { + gis_osm.KEY_OSMNX_OSMID: [1, 2], + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: [True, False], + gis_osm.KEY_OSMNX_GEOMETRY: Point((1, 0)), + }, + ), + # edge with non-boolean reversed attr in a list + ( + "a", + "b", + 4, + { + gis_osm.KEY_OSMNX_OSMID: [1, 2], + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_ONEWAY: False, + gis_osm.KEY_OSMNX_REVERSED: ["True", False], + }, + ), + # edge with non-boolean oneway + ( + "a", + "b", + 5, + { + gis_osm.KEY_OSMNX_OSMID: [1, 2], + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_REVERSED: [True, False], + gis_osm.KEY_OSMNX_ONEWAY: "True", + }, + ), + # edge with non-boolean oneway in a list + ( + "a", + "b", + 6, + { + gis_osm.KEY_OSMNX_OSMID: [1, 2], + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_REVERSED: [True, False], + gis_osm.KEY_OSMNX_ONEWAY: [False, "True"], + }, + ), + # edge without all the essential attributes + ( + "a", + "b", + 7, + { + gis_osm.KEY_OSMNX_OSMID: [1, 2], + gis_osm.KEY_OSMNX_REVERSED: [True, False], + gis_osm.KEY_OSMNX_ONEWAY: False, + }, + ), + ] + ) + # edge with non-integer osmids - edge_key = ('a', 'b', 0) + edge_key = ("a", "b", 0) assert not gis_iden.is_edge_osmnx_compliant(network, edge_key) - edge_key = ('a', 'b', 1) + edge_key = ("a", "b", 1) assert not gis_iden.is_edge_osmnx_compliant(network, edge_key) - edge_key = ('a', 'b', 2) + edge_key = ("a", "b", 2) assert not gis_iden.is_edge_osmnx_compliant(network, edge_key) - edge_key = ('a', 'b', 3) + edge_key = ("a", "b", 3) assert not gis_iden.is_edge_osmnx_compliant(network, edge_key) - edge_key = ('a', 'b', 4) + edge_key = ("a", "b", 4) assert not gis_iden.is_edge_osmnx_compliant(network, edge_key) - edge_key = ('a', 'b', 5) + edge_key = ("a", "b", 5) assert not gis_iden.is_edge_osmnx_compliant(network, edge_key) - edge_key = ('a', 'b', 6) + edge_key = ("a", "b", 6) assert not gis_iden.is_edge_osmnx_compliant(network, edge_key) - edge_key = ('a', 'b', 7) + edge_key = ("a", "b", 7) assert not gis_iden.is_edge_osmnx_compliant(network, edge_key) - + # error_raised = False # try: # edge_key = ('a', 'b', 'c') @@ -3589,53 +3826,53 @@ class TestGisIdentify: # except ValueError: # error_raised = True # assert error_raised - + # ************************************************************************* # ************************************************************************* - + def test_reversed_edges_osmnx(self): - # get the network network = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', - custom_filter=( - '["highway"~"residential|tertiary|unclassified|service"]' - ), - truncate_by_edge=True - ) - + (55.71654, 9.11728), + network_type="drive", + custom_filter=('["highway"~"residential|tertiary|unclassified|service"]'), + truncate_by_edge=True, + ) + # create edge to trigger the negative case with a different length # (317812803, 317812802, 2) edge_k = network.add_edge( - 317812803, + 317812803, 317812802, - **{'osmid': 28913471, - 'name': 'Tingstedet', - 'highway': 'residential', - 'maxspeed': '50', - 'oneway': False, - 'reversed': True, - 'length': 27.601+1} - ) + **{ + "osmid": 28913471, + "name": "Tingstedet", + "highway": "residential", + "maxspeed": "50", + "oneway": False, + "reversed": True, + "length": 27.601 + 1, + } + ) assert edge_k == 2 # create edge tp trigger the negative case with a different geometry # (317812802, 317812803, 2) edge_k = network.add_edge( 317812802, - 317812803, - **{'osmid': 28913483, - 'name': 'Tingstedet', - 'highway': 'residential', - 'maxspeed': '50', - 'oneway': False, - 'reversed': False, - 'length': 99.155, - 'geometry': LineString([(1,3),(2,4)]) - } - ) + 317812803, + **{ + "osmid": 28913483, + "name": "Tingstedet", + "highway": "residential", + "maxspeed": "50", + "oneway": False, + "reversed": False, + "length": 99.155, + "geometry": LineString([(1, 3), (2, 4)]), + } + ) assert edge_k == 2 - + # find edges that have matching edges in the reverse direction for edge_key in network.edges(keys=True): edge_dict = network.get_edge_data(*edge_key) @@ -3645,62 +3882,50 @@ class TestGisIdentify: if network.has_edge(u=edge_key[1], v=edge_key[0]): # there is an edge in the opposite sense for other_edge_key in gis_iden.get_edges_from_a_to_b( - network=network, - node_start=edge_key[1], - node_end=edge_key[0] - ): + network=network, node_start=edge_key[1], node_end=edge_key[0] + ): # check if the edges are the same but in reverse if gis_iden.edges_are_in_reverse( - network, - edge_a=edge_key, - edge_b=other_edge_key - ): + network, edge_a=edge_key, edge_b=other_edge_key + ): # the edges are the same but in reverse: - # - all attributes have to be the same or lists with - # the same content, as in a set, except for + # - all attributes have to be the same or lists with + # the same content, as in a set, except for # the geometry and reversed attributes fw_dict = network.get_edge_data(*edge_key) rv_dict = network.get_edge_data(*other_edge_key) - + # should have the same attributes assert set(fw_dict.keys()) == set(rv_dict.keys()) # the attributes must be identical except if they are # lists or if they geometries or reversed arguments for attr_key, attr_value in fw_dict.items(): - if type(attr_value) == list: # the dict values are lists, their equivalent # sets must match (they cannot be geometries) - assert ( - set(attr_value) == set(rv_dict[attr_key]) - ) + assert set(attr_value) == set(rv_dict[attr_key]) elif attr_key == gis_osm.KEY_OSMNX_GEOMETRY: # the dict values are geometries, they must be # the reverse of one another - assert ( - tuple(attr_value.coords) == - tuple(rv_dict[attr_key].reverse().coords) - ) + assert tuple(attr_value.coords) == tuple( + rv_dict[attr_key].reverse().coords + ) elif attr_key == gis_osm.KEY_OSMNX_REVERSED: # the dict values are the reversed attributes, # they must be opposites (since type(..)!=list) - assert ( - not attr_value == rv_dict[attr_key] - ) + assert not attr_value == rv_dict[attr_key] elif attr_key == gis_osm.KEY_OSMNX_LENGTH: # the dict values are the reversed attributes, # they must be opposites (since type(..)!=list) - assert isclose( - attr_value, - rv_dict[attr_key], - abs_tol=1e-3 - ) - else: + assert isclose( + attr_value, rv_dict[attr_key], abs_tol=1e-3 + ) + else: # the dict values are not lists, nor geometries # nor reversed attributes: they must match assert attr_value == rv_dict[attr_key] - - else: # the edges are not the same in reverse + + else: # the edges are not the same in reverse # at least one of their attributes must be different or # incompatible fw_dict = network.get_edge_data(*edge_key) @@ -3712,90 +3937,77 @@ class TestGisIdentify: # the attributes must be identical except if they are # lists or if they geometries or reversed arguments for attr_key, attr_value in fw_dict.items(): - if type(attr_value) == list: # the dict values are lists, their equivalent # sets must match (they cannot be geometries) - assert ( - set(attr_value) == set( - rv_dict[attr_key]) - ) + assert set(attr_value) == set(rv_dict[attr_key]) elif attr_key == gis_osm.KEY_OSMNX_GEOMETRY: # the dict values are geometries, they must be # the reverse of one another - assert ( - tuple(attr_value.coords) == - tuple( - rv_dict[attr_key].reverse().coords - ) - ) + assert tuple(attr_value.coords) == tuple( + rv_dict[attr_key].reverse().coords + ) elif attr_key == gis_osm.KEY_OSMNX_REVERSED: # the dict values are the reversed attributes, # they must be opposites (since type(..)!=list) - assert ( - not attr_value == rv_dict[attr_key] - ) + assert not attr_value == rv_dict[attr_key] elif attr_key == gis_osm.KEY_OSMNX_LENGTH: # the dict values are the reversed attributes, # they must be opposites (since type(..)!=list) - assert isclose( - attr_value, - rv_dict[attr_key], - abs_tol=1e-3 - ) - else: + assert isclose( + attr_value, rv_dict[attr_key], abs_tol=1e-3 + ) + else: # the dict values are not lists, nor geometries # nor reversed attributes: they must match assert attr_value == rv_dict[attr_key] except Exception: error_raised = True assert error_raised - + # ************************************************************************* # ************************************************************************* - + def test_edge_geometry_consistency(self): - network = nx.MultiDiGraph() - + node_key0 = 0 - node_key0_dict = { - gis_osm.KEY_OSMNX_X: 55, - gis_osm.KEY_OSMNX_Y: 25 - } + node_key0_dict = {gis_osm.KEY_OSMNX_X: 55, gis_osm.KEY_OSMNX_Y: 25} node_key1 = 1 - node_key1_dict = { - gis_osm.KEY_OSMNX_X: 55.001, - gis_osm.KEY_OSMNX_Y: 25.001 - } - + node_key1_dict = {gis_osm.KEY_OSMNX_X: 55.001, gis_osm.KEY_OSMNX_Y: 25.001} + network.add_node(node_key0, **node_key0_dict) network.add_node(node_key1, **node_key1_dict) # create a line between node 0 and node 1 - geo_line = LineString([ - (node_key0_dict[gis_osm.KEY_OSMNX_X], - node_key0_dict[gis_osm.KEY_OSMNX_Y]), - (node_key1_dict[gis_osm.KEY_OSMNX_X], - node_key1_dict[gis_osm.KEY_OSMNX_Y]) - ]) + geo_line = LineString( + [ + ( + node_key0_dict[gis_osm.KEY_OSMNX_X], + node_key0_dict[gis_osm.KEY_OSMNX_Y], + ), + ( + node_key1_dict[gis_osm.KEY_OSMNX_X], + node_key1_dict[gis_osm.KEY_OSMNX_Y], + ), + ] + ) # the same line, reversed geo_line_reversed = geo_line.reverse() - + k_no_geo = network.add_edge(0, 1, **{gis_osm.KEY_OSMNX_LENGTH: 3}) - + k_normal = network.add_edge( - 0, - 1, - **{gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_GEOMETRY: geo_line} - ) - + 0, 1, **{gis_osm.KEY_OSMNX_LENGTH: 3, gis_osm.KEY_OSMNX_GEOMETRY: geo_line} + ) + k_reversed = network.add_edge( - 0, - 1, - **{gis_osm.KEY_OSMNX_LENGTH: 3, - gis_osm.KEY_OSMNX_GEOMETRY: geo_line_reversed} - ) + 0, + 1, + **{ + gis_osm.KEY_OSMNX_LENGTH: 3, + gis_osm.KEY_OSMNX_GEOMETRY: geo_line_reversed, + } + ) # edge without geometry should be consistent edge_key = (node_key0, node_key1, k_no_geo) assert gis_iden.is_edge_consistent_with_geometry(network, edge_key) @@ -3805,718 +4017,729 @@ class TestGisIdentify: # edge with reversed geometry should not be consistent edge_key = (node_key0, node_key1, k_reversed) assert not gis_iden.is_edge_consistent_with_geometry(network, edge_key) - + # trigger no edge found error error_raised = False try: - edge_key = (node_key0, node_key1, k_no_geo-1) + edge_key = (node_key0, node_key1, k_no_geo - 1) gis_iden.is_edge_consistent_with_geometry(network, edge_key) except ValueError: error_raised = True assert error_raised - + # ************************************************************************* # ************************************************************************* - + def test_edge_geometry_consistency_osmnx(self): - # get the network network = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', - custom_filter=( - '["highway"~"residential|tertiary|unclassified|service"]' - ), - truncate_by_edge=True - ) - + (55.71654, 9.11728), + network_type="drive", + custom_filter=('["highway"~"residential|tertiary|unclassified|service"]'), + truncate_by_edge=True, + ) + for edge_key in network.edges(keys=True): - - assert gis_iden.is_edge_consistent_with_geometry( - network, edge_key) - + assert gis_iden.is_edge_consistent_with_geometry(network, edge_key) + edge_key = (115831, 1104936963, 0) assert gis_iden.is_edge_consistent_with_geometry(network, edge_key) edge_key = (1104936963, 115831, 0) assert gis_iden.is_edge_consistent_with_geometry(network, edge_key) - + # ************************************************************************* # ************************************************************************* - + def test_valid_node_paths(self): - # ********************************************************************* # ********************************************************************* - + # create network - + network = nx.MultiDiGraph() - + # define and add edges - + list_edges = [ - (0,1),(1,2),(2,3), # path 1 - (4,3),(5,4),(6,5), # path 2 - (6,7),(7,8),(8,9), # path 3 - (2,2), # self loop on path 1 - (4,4), # self loop on path 2 - (9,9), # self loop on path 3 - (10,8) # extra lone neighbour for node 8 on - ] - + (0, 1), + (1, 2), + (2, 3), # path 1 + (4, 3), + (5, 4), + (6, 5), # path 2 + (6, 7), + (7, 8), + (8, 9), # path 3 + (2, 2), # self loop on path 1 + (4, 4), # self loop on path 2 + (9, 9), # self loop on path 3 + (10, 8), # extra lone neighbour for node 8 on + ] + network.add_edges_from(list_edges) - + # ********************************************************************* # ********************************************************************* - - valid_node_paths = [ - [0,1,2,3], - [6,5,4,3], - [6,7,8,9], - [10,8,9] - ] - + + valid_node_paths = [[0, 1, 2, 3], [6, 5, 4, 3], [6, 7, 8, 9], [10, 8, 9]] + invalid_node_paths = [ - [], # empty list - [1], # single node path - [-1,0,1,2,3], # nodes do not belong to the network - [3,2,1,0], # path 1 reversed - [3,4,5,6], # path 2 reversed - [9,8,7,6], # path 3 reversed - [6,7,8,10] # node 10 is connected to node 8 but not the other way arou. - ] - + [], # empty list + [1], # single node path + [-1, 0, 1, 2, 3], # nodes do not belong to the network + [3, 2, 1, 0], # path 1 reversed + [3, 4, 5, 6], # path 2 reversed + [9, 8, 7, 6], # path 3 reversed + [6, 7, 8, 10], # node 10 is connected to node 8 but not the other way arou. + ] + # make sure valid node paths are valid for path in valid_node_paths: assert gis_iden.is_node_path(network, path) # make sure invalid node paths are invalid for path in invalid_node_paths: assert not gis_iden.is_node_path(network, path) - + # ********************************************************************* # ********************************************************************* - + consider_reversed_edges = True - + valid_node_paths = [ - [0,1,2,3], - [6,5,4,3], - [6,7,8,9], - [10,8,9], - [3,2,1,0], # path 1 reversed - [3,4,5,6], # path 2 reversed - [9,8,7,6], # path 3 reversed - [6,7,8,10] # node 10 is connected to node 8 but not the other way arou. - ] - + [0, 1, 2, 3], + [6, 5, 4, 3], + [6, 7, 8, 9], + [10, 8, 9], + [3, 2, 1, 0], # path 1 reversed + [3, 4, 5, 6], # path 2 reversed + [9, 8, 7, 6], # path 3 reversed + [6, 7, 8, 10], # node 10 is connected to node 8 but not the other way arou. + ] + invalid_node_paths = [ - [], # empty list - [1], # single node path - [-1,0,1,2,3], # nodes do not belong to the network - ] - + [], # empty list + [1], # single node path + [-1, 0, 1, 2, 3], # nodes do not belong to the network + ] + # make sure valid node paths are valid for path in valid_node_paths: assert gis_iden.is_node_path(network, path, consider_reversed_edges) # make sure invalid node paths are invalid for path in invalid_node_paths: - assert not gis_iden.is_node_path( - network, - path, - consider_reversed_edges - ) - + assert not gis_iden.is_node_path(network, path, consider_reversed_edges) + # ************************************************************************* # ************************************************************************* - + def test_valid_edge_paths(self): - # ********************************************************************* # ********************************************************************* - + # create network - + network = nx.MultiDiGraph() - + # define and add edges - + list_edges = [ - (0,1),(1,2),(2,3), # path 1 - (4,3),(5,4),(6,5), # path 2 - (6,7),(7,8),(8,9), # path 3 - (2,2), # self loop on path 1 - (4,4), # self loop on path 2 - (9,9), # self loop on path 3 - (10,8) # extra lone neighbour for node 8 on - ] - + (0, 1), + (1, 2), + (2, 3), # path 1 + (4, 3), + (5, 4), + (6, 5), # path 2 + (6, 7), + (7, 8), + (8, 9), # path 3 + (2, 2), # self loop on path 1 + (4, 4), # self loop on path 2 + (9, 9), # self loop on path 3 + (10, 8), # extra lone neighbour for node 8 on + ] + network.add_edges_from(list_edges) - + # ********************************************************************* # ********************************************************************* - + valid_edge_paths = [ - [(0,1),(1,2),(2,3)], - [(6,5),(5,4),(4,3)], - [(6,7),(7,8),(8,9)], - [(10,8),(8,9)], - [(0,1,0),(1,2,0),(2,3,0)], # with keys - [(6,5,0),(5,4,0),(4,3,0)], # with keys - [(6,7,0),(7,8,0),(8,9,0)], # with keys - [(10,8,0),(8,9,0)] # with keys - ] - + [(0, 1), (1, 2), (2, 3)], + [(6, 5), (5, 4), (4, 3)], + [(6, 7), (7, 8), (8, 9)], + [(10, 8), (8, 9)], + [(0, 1, 0), (1, 2, 0), (2, 3, 0)], # with keys + [(6, 5, 0), (5, 4, 0), (4, 3, 0)], # with keys + [(6, 7, 0), (7, 8, 0), (8, 9, 0)], # with keys + [(10, 8, 0), (8, 9, 0)], # with keys + ] + invalid_edge_paths = [ - [(0,1,1),(1,2,0),(2,3,3)], # with incorrect keys - [(6,5,2),(5,4,0),(4,3,2)], # with incorrect keys - [(6,7,3),(7,8,0),(8,9,1)], # with incorrect keys - [(10,8,0),(8,9,2)], # with incorrect keys - [], # empty list - [(-1,0),(0,1),(1,2),(2,3)], # nodes do not belong to the network - [(3,2),(2,1),(1,0)], # path 1 reversed - [(3,4),(4,5),(5,6)], # path 2 reversed - [(9,8),(8,7),(7,6)], # path 3 reversed - [(6,7),(7,8),(8,10)], # node 10 is connected to node 8 but not the other way - [(),(7,8),(8)], # unknown format - #[(6,5,0),(5,4),(4,3,0)], # inconsistent edge key format - [(6,5),(9,9),(5,4),(4,3)] # no sequence due to (9,9) - ] - + [(0, 1, 1), (1, 2, 0), (2, 3, 3)], # with incorrect keys + [(6, 5, 2), (5, 4, 0), (4, 3, 2)], # with incorrect keys + [(6, 7, 3), (7, 8, 0), (8, 9, 1)], # with incorrect keys + [(10, 8, 0), (8, 9, 2)], # with incorrect keys + [], # empty list + [(-1, 0), (0, 1), (1, 2), (2, 3)], # nodes do not belong to the network + [(3, 2), (2, 1), (1, 0)], # path 1 reversed + [(3, 4), (4, 5), (5, 6)], # path 2 reversed + [(9, 8), (8, 7), (7, 6)], # path 3 reversed + [ + (6, 7), + (7, 8), + (8, 10), + ], # node 10 is connected to node 8 but not the other way + [(), (7, 8), (8)], # unknown format + # [(6,5,0),(5,4),(4,3,0)], # inconsistent edge key format + [(6, 5), (9, 9), (5, 4), (4, 3)], # no sequence due to (9,9) + ] + for path in valid_edge_paths: - assert gis_iden.is_edge_path(network, path) - + # make sure invalid node paths are invalid - + for path in invalid_edge_paths: - assert not gis_iden.is_edge_path(network, path) - + # ********************************************************************* # ********************************************************************* - + # inconsistent edge key format - + # allowed - + assert gis_iden.is_edge_path( - network, - path=[(6,5,0),(5,4),(4,3,0)], - allow_multiple_formats=True - ) - + network, path=[(6, 5, 0), (5, 4), (4, 3, 0)], allow_multiple_formats=True + ) + # not allowed - + error_triggered = False try: # inconsistent edge key format gis_iden.is_edge_path( - network, - path=[(6,5,0),(5,4),(4,3,0)], - allow_multiple_formats=False - ) + network, + path=[(6, 5, 0), (5, 4), (4, 3, 0)], + allow_multiple_formats=False, + ) except ValueError: error_triggered = True assert error_triggered - + # ********************************************************************* # ********************************************************************* - + ignore_edge_direction = True - + valid_edge_paths = [ - [(0,1),(1,2),(2,3)], - [(6,5),(5,4),(4,3)], - [(6,7),(7,8),(8,9)], - [(10,8),(8,9)], - [(0,1,0),(1,2,0),(2,3,0)], # with keys - [(6,5,0),(5,4,0),(4,3,0)], # with keys - [(6,7,0),(7,8,0),(8,9,0)], # with keys - [(10,8,0),(8,9,0)], # with keys - [(2,3),(1,2),(0,1)], # path 1 reversed - [(4,3),(5,4),(6,5)], # path 2 reversed - [(8,9),(7,8),(6,7)], # path 3 reversed - [(6,7),(7,8),(10,8)] # node 10 is connected to node 8 but not the other way - ] - + [(0, 1), (1, 2), (2, 3)], + [(6, 5), (5, 4), (4, 3)], + [(6, 7), (7, 8), (8, 9)], + [(10, 8), (8, 9)], + [(0, 1, 0), (1, 2, 0), (2, 3, 0)], # with keys + [(6, 5, 0), (5, 4, 0), (4, 3, 0)], # with keys + [(6, 7, 0), (7, 8, 0), (8, 9, 0)], # with keys + [(10, 8, 0), (8, 9, 0)], # with keys + [(2, 3), (1, 2), (0, 1)], # path 1 reversed + [(4, 3), (5, 4), (6, 5)], # path 2 reversed + [(8, 9), (7, 8), (6, 7)], # path 3 reversed + [ + (6, 7), + (7, 8), + (10, 8), + ], # node 10 is connected to node 8 but not the other way + ] + invalid_edge_paths = [ - [(0,1,1),(1,2,0),(2,3,3)], # with incorrect keys - [(6,5,2),(5,4,0),(4,3,2)], # with incorrect keys - [(6,7,3),(7,8,0),(8,9,1)], # with incorrect keys - [(10,8,0),(8,9,2)], # with incorrect keys - [], # empty list - [(-1,0),(0,1),(1,2),(2,3)], # nodes do not belong to the network arou. - #[(10,8,0),(8,9)], # different key formats - #[(6,5),(5,4,0),(4,3)], # different key formats - [(),(5,4,0),(4,3)], # unknown key formats - [(6,5),(9,9),(5,4,0),(4,3)] # no sequence due to (9,9) - ] - + [(0, 1, 1), (1, 2, 0), (2, 3, 3)], # with incorrect keys + [(6, 5, 2), (5, 4, 0), (4, 3, 2)], # with incorrect keys + [(6, 7, 3), (7, 8, 0), (8, 9, 1)], # with incorrect keys + [(10, 8, 0), (8, 9, 2)], # with incorrect keys + [], # empty list + [ + (-1, 0), + (0, 1), + (1, 2), + (2, 3), + ], # nodes do not belong to the network arou. + # [(10,8,0),(8,9)], # different key formats + # [(6,5),(5,4,0),(4,3)], # different key formats + [(), (5, 4, 0), (4, 3)], # unknown key formats + [(6, 5), (9, 9), (5, 4, 0), (4, 3)], # no sequence due to (9,9) + ] + for path in valid_edge_paths: - assert gis_iden.is_edge_path( - network, - path, - ignore_edge_direction=ignore_edge_direction) - + network, path, ignore_edge_direction=ignore_edge_direction + ) + # make sure invalid node paths are invalid - + for path in invalid_edge_paths: - assert not gis_iden.is_edge_path( - network, - path, - ignore_edge_direction=ignore_edge_direction) - + network, path, ignore_edge_direction=ignore_edge_direction + ) + # ********************************************************************* # ********************************************************************* - + # inconsistent edge key format - + # allowed - + assert gis_iden.is_edge_path( - network, - path=[(10,8,0),(8,9)], - allow_multiple_formats=True - ) - + network, path=[(10, 8, 0), (8, 9)], allow_multiple_formats=True + ) + # not allowed - + error_triggered = False try: # inconsistent edge key format gis_iden.is_edge_path( - network, - path=[(10,8,0),(8,9)], - allow_multiple_formats=False - ) + network, path=[(10, 8, 0), (8, 9)], allow_multiple_formats=False + ) except ValueError: error_triggered = True assert error_triggered - + # inconsistent edge key format - + # allowed - + assert gis_iden.is_edge_path( - network, - path=[(6,5),(5,4,0),(4,3)], - allow_multiple_formats=True - ) - + network, path=[(6, 5), (5, 4, 0), (4, 3)], allow_multiple_formats=True + ) + # not allowed - + error_triggered = False try: # inconsistent edge key format gis_iden.is_edge_path( - network, - path=[(6,5),(5,4,0),(4,3)], - allow_multiple_formats=False - ) + network, path=[(6, 5), (5, 4, 0), (4, 3)], allow_multiple_formats=False + ) except ValueError: error_triggered = True assert error_triggered - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_straight_paths_reversed_edges_self_loops(self): - # create network - + network = nx.MultiDiGraph() - + # define and add edges list_edges = [ - (0,1),(1,2),(2,3), # path 1 - (4,3),(5,4),(6,5), # path 2 - (6,7),(7,8),(8,9), # path 3 - (2,2), # self loop on path 1 - (4,4), # self loop on path 2 - (9,9), # self loop on path 3 - (10,8) # extra lone neighbour for node 8 on - ] + (0, 1), + (1, 2), + (2, 3), # path 1 + (4, 3), + (5, 4), + (6, 5), # path 2 + (6, 7), + (7, 8), + (8, 9), # path 3 + (2, 2), # self loop on path 1 + (4, 4), # self loop on path 2 + (9, 9), # self loop on path 3 + (10, 8), # extra lone neighbour for node 8 on + ] network.add_edges_from(list_edges) - + # reversed edges are okay, self loops too - + ignore_self_loops = True consider_reversed_edges = True - + # valid node paths valid_straight_node_paths = [ - [0,1,2], - [1,2,3], - [0,1,2,3], - [5,4,3], - [6,5,4], - [6,5,4,3], - [0,1], # just two nodes - [0,1,2,3,4], # node 4 is connected using an edge in the opposite dir. - [6,5,4,3,2], # node 2 is connected using an edge in the opposite dir. - [3,4,5,6], # the path is reversed - ] - + [0, 1, 2], + [1, 2, 3], + [0, 1, 2, 3], + [5, 4, 3], + [6, 5, 4], + [6, 5, 4, 3], + [0, 1], # just two nodes + [0, 1, 2, 3, 4], # node 4 is connected using an edge in the opposite dir. + [6, 5, 4, 3, 2], # node 2 is connected using an edge in the opposite dir. + [3, 4, 5, 6], # the path is reversed + ] + # invalid node paths invalid_straight_node_paths = [ - [6,7,8,9], # node 8 has three neighbours (7, 9 and 10) - [0,4,1], # node 4 is not a neighbour of nodes 0 and 1 - [11,3,4,5,6] # there is no node 11 - ] - + [6, 7, 8, 9], # node 8 has three neighbours (7, 9 and 10) + [0, 4, 1], # node 4 is not a neighbour of nodes 0 and 1 + [11, 3, 4, 5, 6], # there is no node 11 + ] + # make sure valid node paths are valid for path in valid_straight_node_paths: assert gis_iden.is_path_straight( - network, + network, path, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops) - + ignore_self_loops=ignore_self_loops, + ) + # make sure invalid node paths are invalid for path in invalid_straight_node_paths: assert not gis_iden.is_path_straight( - network, + network, path, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_straight_paths_no_reversed_edges_self_loops(self): - # create network - + network = nx.MultiDiGraph() - + # define and add edges list_edges = [ - (0,1),(1,2),(2,3), # path 1 - (4,3),(5,4),(6,5), # path 2 - (6,7),(7,8),(8,9), # path 3 - (2,2), # self loop on path 1 - (4,4), # self loop on path 2 - (9,9), # self loop on path 3 - (10,8) # extra lone neighbour for node 8 on - ] + (0, 1), + (1, 2), + (2, 3), # path 1 + (4, 3), + (5, 4), + (6, 5), # path 2 + (6, 7), + (7, 8), + (8, 9), # path 3 + (2, 2), # self loop on path 1 + (4, 4), # self loop on path 2 + (9, 9), # self loop on path 3 + (10, 8), # extra lone neighbour for node 8 on + ] network.add_edges_from(list_edges) - + # no reversed edges, self loops are okay - + ignore_self_loops = True consider_reversed_edges = False - + # valid node paths - + valid_straight_node_paths = [ - [0,1,2], - [1,2,3], - [0,1,2,3], - [5,4,3], - [6,5,4], - [6,5,4,3], - [0,1], # just two nodes - ] - + [0, 1, 2], + [1, 2, 3], + [0, 1, 2, 3], + [5, 4, 3], + [6, 5, 4], + [6, 5, 4, 3], + [0, 1], # just two nodes + ] + # invalid node paths - + invalid_straight_node_paths = [ - [2,1,0], # reversed path - [3,2,1], # reversed path - [3,2,1,0], # reversed path - [3,4,5], # reversed path - [4,5,6], # reversed path - [3,4,5,6], # reversed path - [0,1,2,3,4,5,6,7,8], # path with reversed elements - [8,7,6,5,4,3,2,1,0], # path with reversed element - [0,1,2,3,4], # path with reversed elements - [6,5,4,3,2], # the last edge is reversed - [6,7,8,9], # node 8 has three neighbours (7, 9 and 10) - [0,4,1], # node 4 is not a neighbour of nodes 0 and 1 - [11,3,4,5,6], # there is no node 11 - [0,1,2,3,4,5,6,7,8,9], # node 8 has three neighbours (7, 9 and 10) - [9,8,7,6,5,4,3,2,1,0] # node 8 has three neighbours (7, 9 and 10) - ] - - # make sure valid node paths are valid - for path in valid_straight_node_paths: + [2, 1, 0], # reversed path + [3, 2, 1], # reversed path + [3, 2, 1, 0], # reversed path + [3, 4, 5], # reversed path + [4, 5, 6], # reversed path + [3, 4, 5, 6], # reversed path + [0, 1, 2, 3, 4, 5, 6, 7, 8], # path with reversed elements + [8, 7, 6, 5, 4, 3, 2, 1, 0], # path with reversed element + [0, 1, 2, 3, 4], # path with reversed elements + [6, 5, 4, 3, 2], # the last edge is reversed + [6, 7, 8, 9], # node 8 has three neighbours (7, 9 and 10) + [0, 4, 1], # node 4 is not a neighbour of nodes 0 and 1 + [11, 3, 4, 5, 6], # there is no node 11 + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], # node 8 has three neighbours (7, 9 and 10) + [9, 8, 7, 6, 5, 4, 3, 2, 1, 0], # node 8 has three neighbours (7, 9 and 10) + ] + + # make sure valid node paths are valid + for path in valid_straight_node_paths: assert gis_iden.is_path_straight( - network, + network, path, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops) - - # make sure invalid node paths are invalid + ignore_self_loops=ignore_self_loops, + ) + + # make sure invalid node paths are invalid for path in invalid_straight_node_paths: assert not gis_iden.is_path_straight( - network, + network, path, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_straight_paths_no_reversed_edges_no_self_loops(self): - # create network - + network = nx.MultiDiGraph() - + # define and add edges list_edges = [ - (0,1),(1,2),(2,3), # path 1 - (4,3),(5,4),(6,5), # path 2 - (6,7),(7,8),(8,9), # path 3 - (2,2), # self loop on path 1 - (4,4), # self loop on path 2 - (9,9), # self loop on path 3 - (10,8) # extra lone neighbour for node 8 on - ] + (0, 1), + (1, 2), + (2, 3), # path 1 + (4, 3), + (5, 4), + (6, 5), # path 2 + (6, 7), + (7, 8), + (8, 9), # path 3 + (2, 2), # self loop on path 1 + (4, 4), # self loop on path 2 + (9, 9), # self loop on path 3 + (10, 8), # extra lone neighbour for node 8 on + ] network.add_edges_from(list_edges) - + # no reversed edges, self loops are not okay - + ignore_self_loops = False consider_reversed_edges = False - + # (0,1),(1,2),(2,3), # path 1 # (4,3),(5,4),(6,5), # path 2 # (6,7),(7,8),(8,9), # path 3 # (2,2), # self loop on path 1 # (4,4), # self loop on path 2 # (9,9), # self loop on path 3 - + # valid node paths - - valid_straight_node_paths = [ - [6,5,4], - [6,7,8], - [0,1,2] - ] - + + valid_straight_node_paths = [[6, 5, 4], [6, 7, 8], [0, 1, 2]] + # invalid node paths - + invalid_straight_node_paths = [ - [1,2,3], - [0,1,2,3], - [5,4,3], - [6,5,4,3], - [4,5,6], - [8,7,6], - [2,1,0] - ] - - # make sure valid node paths are valid - for path in valid_straight_node_paths: + [1, 2, 3], + [0, 1, 2, 3], + [5, 4, 3], + [6, 5, 4, 3], + [4, 5, 6], + [8, 7, 6], + [2, 1, 0], + ] + + # make sure valid node paths are valid + for path in valid_straight_node_paths: assert gis_iden.is_path_straight( - network, + network, path, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops) - - # make sure invalid node paths are invalid + ignore_self_loops=ignore_self_loops, + ) + + # make sure invalid node paths are invalid for path in invalid_straight_node_paths: assert not gis_iden.is_path_straight( - network, + network, path, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_straight_paths_reversed_edges_no_self_loops(self): - # create network - + network = nx.MultiDiGraph() - + # define and add edges list_edges = [ - (0,1),(1,2),(2,3), # path 1 - (4,3),(5,4),(6,5), # path 2 - (6,7),(7,8),(8,9), # path 3 - (2,2), # self loop on path 1 - (4,4), # self loop on path 2 - (9,9), # self loop on path 3 - (10,8) # extra lone neighbour for node 8 on - ] + (0, 1), + (1, 2), + (2, 3), # path 1 + (4, 3), + (5, 4), + (6, 5), # path 2 + (6, 7), + (7, 8), + (8, 9), # path 3 + (2, 2), # self loop on path 1 + (4, 4), # self loop on path 2 + (9, 9), # self loop on path 3 + (10, 8), # extra lone neighbour for node 8 on + ] network.add_edges_from(list_edges) - + # reversed edges are okay, self loops are not - + ignore_self_loops = False consider_reversed_edges = True - + # valid node paths - + valid_straight_node_paths = [ - [4,5,6], - [6,5,4], - [6,7,8], - [8,7,6], - [0,1,2], - [2,1,0] - ] - + [4, 5, 6], + [6, 5, 4], + [6, 7, 8], + [8, 7, 6], + [0, 1, 2], + [2, 1, 0], + ] + # invalid node paths - - invalid_straight_node_paths = [ - [1,2,3], - [0,1,2,3], - [5,4,3], - [6,5,4,3] - ] - - # make sure valid node paths are valid - for path in valid_straight_node_paths: + + invalid_straight_node_paths = [[1, 2, 3], [0, 1, 2, 3], [5, 4, 3], [6, 5, 4, 3]] + + # make sure valid node paths are valid + for path in valid_straight_node_paths: assert gis_iden.is_path_straight( - network, + network, path, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops) - - # make sure invalid node paths are invalid + ignore_self_loops=ignore_self_loops, + ) + + # make sure invalid node paths are invalid for path in invalid_straight_node_paths: assert not gis_iden.is_path_straight( - network, + network, path, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_straight_path_parallel_antiparallel_edges(self): - # create network - + network = nx.MultiDiGraph() - + # define and add edges list_edges = [ - (0,1,0),(1,2,0),(2,3,0), # path 1 - (4,5,0),(5,6,0),(6,7,0), # path 2 - (8,9,0),(9,10,0),(10,11,0), # path 3 + (0, 1, 0), + (1, 2, 0), + (2, 3, 0), # path 1 + (4, 5, 0), + (5, 6, 0), + (6, 7, 0), # path 2 + (8, 9, 0), + (9, 10, 0), + (10, 11, 0), # path 3 # extra edges - (0,1,0), # path 1 - (5,4,0), # path 2 - (8,9,0),(11,10,0) # path 3 - ] + (0, 1, 0), # path 1 + (5, 4, 0), # path 2 + (8, 9, 0), + (11, 10, 0), # path 3 + ] network.add_edges_from(list_edges) - + # reversed edges are okay, self loops are not - + ignore_self_loops = True consider_reversed_edges = True - + # valid node paths - - valid_straight_node_paths = [ - [0,1,2,3], - [4,5,6,7], - [8,9,10,11] - ] - - # make sure valid node paths are valid - for path in valid_straight_node_paths: + + valid_straight_node_paths = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] + + # make sure valid node paths are valid + for path in valid_straight_node_paths: assert gis_iden.is_path_straight( - network, + network, path, consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops) - + ignore_self_loops=ignore_self_loops, + ) + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_nearest_node_keys(self): - # ********************************************************************* - + # create a network network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" + network.graph["crs"] = "EPSG:4326" # network.graph['crs'] = 'init' # add edges and nodes - number_edges = random.randint(3,10) + number_edges = random.randint(3, 10) edge_keys = [ - (random.randint(0,number_edges),random.randint(0,number_edges)) - for edge_index in range(number_edges)] + (random.randint(0, number_edges), random.randint(0, number_edges)) + for edge_index in range(number_edges) + ] network.add_edges_from(edge_keys) # add attributes to the nodes used in the edges for node_key in network.nodes(): _xy = (random.random(), random.random()) - network.add_node(node_key, x=_xy[0], y=_xy[0]) + network.add_node(node_key, x=_xy[0], y=_xy[0]) # add new (unconnected) nodes - number_new_nodes = random.randint(3,5) + number_new_nodes = random.randint(3, 5) unconnected_node_keys = [] for node_index in range(number_new_nodes): new_node_key = uuid.uuid4() _xy = (random.random(), random.random()) network.add_node(new_node_key, x=_xy[0], y=_xy[0]) unconnected_node_keys.append(new_node_key) - + # ********************************************************************* - + # find the nearest nodes using the osmnx method nearest_node_keys = gis_iden.nearest_nodes( network, - [network.nodes[node_key]['x'] - for node_key in unconnected_node_keys], - [network.nodes[node_key]['y'] - for node_key in unconnected_node_keys] - ) - + [network.nodes[node_key]["x"] for node_key in unconnected_node_keys], + [network.nodes[node_key]["y"] for node_key in unconnected_node_keys], + ) + # assert that the test is meaningful assert len(nearest_node_keys) != 0 assert len(nearest_node_keys) == len(unconnected_node_keys) - + # assert that the nodes are the same for i, node_key in enumerate(unconnected_node_keys): assert node_key == nearest_node_keys[i] - + # ********************************************************************* - + # find the nodes nearest to select nodes excluding themselves nearest_node_keys = gis_iden.nearest_nodes_other_than_themselves( - network, - unconnected_node_keys) - + network, unconnected_node_keys + ) + # assert that the test is meaningful assert len(nearest_node_keys) != 0 assert len(nearest_node_keys) == len(unconnected_node_keys) all_node_keys = list(network.nodes()) list_all_geos = [] for node_key in all_node_keys: - list_all_geos.append(Point( - (network.nodes[node_key]['x'],network.nodes[node_key]['y']) - ) - ) + list_all_geos.append( + Point((network.nodes[node_key]["x"], network.nodes[node_key]["y"])) + ) all_node_geos = { - node_key: list_all_geos[i] - for i, node_key in enumerate(all_node_keys) - } + node_key: list_all_geos[i] for i, node_key in enumerate(all_node_keys) + } # for each node for i, node_key in enumerate(unconnected_node_keys): # assert that they are not the same @@ -4524,77 +4747,65 @@ class TestGisIdentify: # verify that the distance between is the lowest among all unconnected_node_geo = all_node_geos[node_key] all_distances = [ - unconnected_node_geo.distance( - all_node_geos[other_node_key] - ) + unconnected_node_geo.distance(all_node_geos[other_node_key]) for other_node_key in all_node_keys if other_node_key != node_key - ] + ] actual_distance = unconnected_node_geo.distance( all_node_geos[nearest_node_keys[i]] - ) - assert isclose( - min(all_distances), - actual_distance, - abs_tol=1) - + ) + assert isclose(min(all_distances), actual_distance, abs_tol=1) + # ************************************************************************* # ************************************************************************* - + def test_finding_roundabouts(self): - # network should be a OSM-nx formatted graph network = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', + (55.71654, 9.11728), + network_type="drive", custom_filter='["highway"~"residential|tertiary|unclassified|service"]', - truncate_by_edge=True - ) + truncate_by_edge=True, + ) # find all roundabouts roundabouts = gis_iden.find_roundabouts(network) # confirm they are roundabouts for roundabout in roundabouts: assert gis_iden.is_roundabout(network, roundabout) - + # find roundabouts with constraints roundabouts = gis_iden.find_roundabouts( network, maximum_perimeter=200, minimum_perimeter=25, maximum_number_nodes=6, - minimum_number_nodes=4 - ) + minimum_number_nodes=4, + ) # confirm they are roundabouts for roundabout in roundabouts: assert gis_iden.is_roundabout(network, roundabout) - + # ************************************************************************* # ************************************************************************* - + def test_finding_reversed_edges(self): - # network should be a OSM-nx formatted graph network = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', - custom_filter=( - '["highway"~"residential|tertiary|unclassified|service"]' - ), - truncate_by_edge=True - ) + (55.71654, 9.11728), + network_type="drive", + custom_filter=('["highway"~"residential|tertiary|unclassified|service"]'), + truncate_by_edge=True, + ) # find edges in reverse edges_in_rev = gis_iden.find_edges_in_reverse(network) # confirm for edge_key, reversed_edge_keys in edges_in_rev.items(): for _edge_key in reversed_edge_keys: - assert gis_iden.edges_are_in_reverse( - network, - edge_key, - _edge_key - ) - + assert gis_iden.edges_are_in_reverse(network, edge_key, _edge_key) + # ************************************************************************* # ************************************************************************* - + + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/tests/test_gis_modify.py b/tests/test_gis_modify.py index a6b7181..f9d9e39 100644 --- a/tests/test_gis_modify.py +++ b/tests/test_gis_modify.py @@ -12,6 +12,7 @@ from shapely.geometry import Point, LineString from shapely import length import networkx as nx import osmnx as ox + # from osmnx.utils_graph import get_undirected # local, internal @@ -25,67 +26,66 @@ import src.topupopt.problems.esipp.utils as prob_utils # ***************************************************************************** # ***************************************************************************** + class TestGisModify: - # ************************************************************************* # ************************************************************************* - + def test_create_reverse_edges(self): - network = nx.MultiDiGraph() - + node_key0 = 0 - node_key0_dict = { - osm.KEY_OSMNX_X: 55, - osm.KEY_OSMNX_Y: 25 - } + node_key0_dict = {osm.KEY_OSMNX_X: 55, osm.KEY_OSMNX_Y: 25} node_key1 = 1 - node_key1_dict = { - osm.KEY_OSMNX_X: 55.001, - osm.KEY_OSMNX_Y: 25.001 - } - + node_key1_dict = {osm.KEY_OSMNX_X: 55.001, osm.KEY_OSMNX_Y: 25.001} + network.add_node(node_key0, **node_key0_dict) network.add_node(node_key1, **node_key1_dict) # create a line between node 0 and node 1 - geo_line = LineString([ - (node_key0_dict[osm.KEY_OSMNX_X], - node_key0_dict[osm.KEY_OSMNX_Y]), - (node_key1_dict[osm.KEY_OSMNX_X], - node_key1_dict[osm.KEY_OSMNX_Y]) - ]) + geo_line = LineString( + [ + (node_key0_dict[osm.KEY_OSMNX_X], node_key0_dict[osm.KEY_OSMNX_Y]), + (node_key1_dict[osm.KEY_OSMNX_X], node_key1_dict[osm.KEY_OSMNX_Y]), + ] + ) # the same line, reversed geo_line_reversed = geo_line.reverse() - + k_no_geo = network.add_edge( - 0, - 1, - **{osm.KEY_OSMNX_LENGTH: 3, - osm.KEY_OSMNX_REVERSED: False, - osm.KEY_OSMNX_OSMID: 1, - osm.KEY_OSMNX_ONEWAY: False} - ) - + 0, + 1, + **{ + osm.KEY_OSMNX_LENGTH: 3, + osm.KEY_OSMNX_REVERSED: False, + osm.KEY_OSMNX_OSMID: 1, + osm.KEY_OSMNX_ONEWAY: False, + } + ) + k_normal = network.add_edge( - 0, - 1, - **{osm.KEY_OSMNX_LENGTH: 3, - osm.KEY_OSMNX_ONEWAY: False, - osm.KEY_OSMNX_OSMID: 1, - osm.KEY_OSMNX_REVERSED: [True, False], - osm.KEY_OSMNX_GEOMETRY: geo_line} - ) - + 0, + 1, + **{ + osm.KEY_OSMNX_LENGTH: 3, + osm.KEY_OSMNX_ONEWAY: False, + osm.KEY_OSMNX_OSMID: 1, + osm.KEY_OSMNX_REVERSED: [True, False], + osm.KEY_OSMNX_GEOMETRY: geo_line, + } + ) + k_reversed = network.add_edge( - 0, - 1, - **{osm.KEY_OSMNX_LENGTH: 3, - osm.KEY_OSMNX_OSMID: 1, - osm.KEY_OSMNX_ONEWAY: False, - osm.KEY_OSMNX_REVERSED: True, - osm.KEY_OSMNX_GEOMETRY: geo_line_reversed} - ) - + 0, + 1, + **{ + osm.KEY_OSMNX_LENGTH: 3, + osm.KEY_OSMNX_OSMID: 1, + osm.KEY_OSMNX_ONEWAY: False, + osm.KEY_OSMNX_REVERSED: True, + osm.KEY_OSMNX_GEOMETRY: geo_line_reversed, + } + ) + # edge without geometry should be consistent edge_key = (node_key0, node_key1, k_no_geo) assert gis_iden.is_edge_osmnx_compliant(network, edge_key) @@ -107,30 +107,32 @@ class TestGisModify: for new_edge in new_edges: assert gis_iden.is_edge_osmnx_compliant(network, new_edge) network.remove_edges_from(new_edges) - + assert network.number_of_edges() == 3 new_edges = gis_mod.create_reverse_edges(network) assert len(new_edges) == 3 - + # trigger no edge found error error_raised = False try: - edge_key = (node_key0, node_key1, k_no_geo-1) + edge_key = (node_key0, node_key1, k_no_geo - 1) gis_mod.create_reverse_edges(network, [edge_key]) except ValueError: error_raised = True assert error_raised - + # trigger valuerror due to incorrect reversed type k_error = network.add_edge( - 0, - 1, - **{osm.KEY_OSMNX_LENGTH: 3, - osm.KEY_OSMNX_REVERSED: {True, False}, - osm.KEY_OSMNX_ONEWAY: False, - osm.KEY_OSMNX_OSMID: 1, - osm.KEY_OSMNX_GEOMETRY: geo_line} - ) + 0, + 1, + **{ + osm.KEY_OSMNX_LENGTH: 3, + osm.KEY_OSMNX_REVERSED: {True, False}, + osm.KEY_OSMNX_ONEWAY: False, + osm.KEY_OSMNX_OSMID: 1, + osm.KEY_OSMNX_GEOMETRY: geo_line, + } + ) error_raised = False try: edge_key = (node_key0, node_key1, k_error) @@ -138,130 +140,140 @@ class TestGisModify: except ValueError: error_raised = True assert error_raised - + # ************************************************************************* # ************************************************************************* - + def test_replace_paths_osmnx(self): - # get the network _net = ox.graph_from_point( (55.71654, 9.11728), - network_type='drive', + network_type="drive", custom_filter='["highway"~"residential|tertiary|unclassified|service"]', - truncate_by_edge=True - ) - + truncate_by_edge=True, + ) + # define the settings ignore_self_loops = False consider_reversed_edges = True - + # find paths paths = gis_iden.find_simplifiable_paths( - _net, + _net, excluded_nodes=[], ignore_self_loops=ignore_self_loops, - consider_reversed_edges=consider_reversed_edges - ) - + consider_reversed_edges=consider_reversed_edges, + ) + # verify the paths for path in paths: gis_iden.is_path_straight( - _net, - path, - consider_reversed_edges=consider_reversed_edges, - ignore_self_loops=ignore_self_loops - ) - + _net, + path, + consider_reversed_edges=consider_reversed_edges, + ignore_self_loops=ignore_self_loops, + ) + # modify an edge in one of the paths to have list attributes - _edge_key = tuple(gis_iden.get_edges_from_a_to_b( - _net, - paths[0][0], # first path, first node - paths[0][1] # first paht, second node - ))[0] - _net.add_edge( - *_edge_key, - **{osm.KEY_OSMNX_ONEWAY: [ - _net.edges[_edge_key][osm.KEY_OSMNX_ONEWAY] - ], - osm.KEY_OSMNX_REVERSED: [ - _net.edges[_edge_key][osm.KEY_OSMNX_REVERSED] - ] - } + _edge_key = tuple( + gis_iden.get_edges_from_a_to_b( + _net, + paths[0][0], # first path, first node + paths[0][1], # first paht, second node ) + )[0] + _net.add_edge( + *_edge_key, + **{ + osm.KEY_OSMNX_ONEWAY: [_net.edges[_edge_key][osm.KEY_OSMNX_ONEWAY]], + osm.KEY_OSMNX_REVERSED: [_net.edges[_edge_key][osm.KEY_OSMNX_REVERSED]], + } + ) # measure the distances - true_path_lengths = [ - gis_calc.node_path_length(_net, path) for path in paths - ] + true_path_lengths = [gis_calc.node_path_length(_net, path) for path in paths] # replace the paths - new_path_edges = [ - gis_mod.replace_path(_net, path) for path in paths - ] + new_path_edges = [gis_mod.replace_path(_net, path) for path in paths] # compare for edge_key, true_length in zip(new_path_edges, true_path_lengths): assert isclose( _net.edges[edge_key][gis_iden.osm.KEY_OSMNX_LENGTH], true_length, - abs_tol=1e-3 # 23.400000000000034 - ) - + abs_tol=1e-3, # 23.400000000000034 + ) + # ************************************************************************* # ************************************************************************* - + def test_replace_nonsimplifiable_path(self): - _net = nx.MultiDiGraph() - + path = [0, 1] - + error_raised = False try: gis_mod.replace_path(_net, path) except ValueError: error_raised = True assert error_raised - + # ************************************************************************* # ************************************************************************* - + def example_replace_paths(self, project_the_graph: bool = False): - # ********************************************************************* - + # smallest possible path - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" + network.graph["crs"] = "EPSG:4326" network.graph["simplified"] = False # add nodes - network.add_nodes_from([ - (0,{osm.KEY_OSMNX_Y:56.00, osm.KEY_OSMNX_X: 12.00}), - (1,{osm.KEY_OSMNX_Y:56.01, osm.KEY_OSMNX_X: 12.00}), - (2,{osm.KEY_OSMNX_Y:56.02, osm.KEY_OSMNX_X: 12.01}), - ]) + network.add_nodes_from( + [ + (0, {osm.KEY_OSMNX_Y: 56.00, osm.KEY_OSMNX_X: 12.00}), + (1, {osm.KEY_OSMNX_Y: 56.01, osm.KEY_OSMNX_X: 12.00}), + (2, {osm.KEY_OSMNX_Y: 56.02, osm.KEY_OSMNX_X: 12.01}), + ] + ) # add edges - network.add_edges_from([ - (0,1,0,{osm.KEY_OSMNX_OSMID: 1, - osm.KEY_OSMNX_LENGTH: 3, - osm.KEY_OSMNX_REVERSED: False, - osm.KEY_OSMNX_ONEWAY: False}), - (1,2,0,{osm.KEY_OSMNX_OSMID: 2, - osm.KEY_OSMNX_LENGTH: 4, - osm.KEY_OSMNX_REVERSED: False, - osm.KEY_OSMNX_ONEWAY: False}) - ]) - lengths = gis_calc.edge_lengths(network, edge_keys=[(0,1,0),(1,2,0)]) - network.edges[(0,1,0)][osm.KEY_OSMNX_LENGTH] = lengths[(0,1,0)] - network.edges[(1,2,0)][osm.KEY_OSMNX_LENGTH] = lengths[(1,2,0)] + network.add_edges_from( + [ + ( + 0, + 1, + 0, + { + osm.KEY_OSMNX_OSMID: 1, + osm.KEY_OSMNX_LENGTH: 3, + osm.KEY_OSMNX_REVERSED: False, + osm.KEY_OSMNX_ONEWAY: False, + }, + ), + ( + 1, + 2, + 0, + { + osm.KEY_OSMNX_OSMID: 2, + osm.KEY_OSMNX_LENGTH: 4, + osm.KEY_OSMNX_REVERSED: False, + osm.KEY_OSMNX_ONEWAY: False, + }, + ), + ] + ) + lengths = gis_calc.edge_lengths(network, edge_keys=[(0, 1, 0), (1, 2, 0)]) + network.edges[(0, 1, 0)][osm.KEY_OSMNX_LENGTH] = lengths[(0, 1, 0)] + network.edges[(1, 2, 0)][osm.KEY_OSMNX_LENGTH] = lengths[(1, 2, 0)] number_edges = network.number_of_edges() if project_the_graph: network = ox.project_graph(G=network) - path = [0,1,2] + path = [0, 1, 2] new_edge_key = gis_mod.replace_path(network, path=path) # a new edge should exist assert network.has_edge(*new_edge_key) # assert that two edges are removed and one is created - assert network.number_of_edges() - number_edges == 1 - 2 + assert network.number_of_edges() - number_edges == 1 - 2 # intermediate nodes should not exist any longer for node in path[1:-1]: assert not network.has_node(node) @@ -269,73 +281,95 @@ class TestGisModify: assert isclose( sum(lengths.values()), network.edges[new_edge_key][osm.KEY_OSMNX_LENGTH], - abs_tol=1e-3 - ) + abs_tol=1e-3, + ) # the new edge needs to have a geometry because it is not simple assert osm.KEY_OSMNX_GEOMETRY in network.edges[new_edge_key] # the geometry must have 3 points - assert len( - tuple( - network.edges[new_edge_key][osm.KEY_OSMNX_GEOMETRY].coords - ) - ) == 3 - new_edge_key_lengths = gis_calc.edge_lengths( - network, - edge_keys=[new_edge_key] - ) + assert ( + len(tuple(network.edges[new_edge_key][osm.KEY_OSMNX_GEOMETRY].coords)) == 3 + ) + new_edge_key_lengths = gis_calc.edge_lengths(network, edge_keys=[new_edge_key]) # the geometry's length needs to match that of the edge's assert isclose( network.edges[new_edge_key][osm.KEY_OSMNX_LENGTH], new_edge_key_lengths[new_edge_key], - abs_tol=1 if not project_the_graph else 3.861 # 3.8601244551728087 - ) + abs_tol=1 if not project_the_graph else 3.861, # 3.8601244551728087 + ) # print('hallo1') # print(tuple(network.edges[new_edge_key][osm.KEY_OSMNX_GEOMETRY].coords)) # print(network.edges[new_edge_key][osm.KEY_OSMNX_LENGTH]) # print(new_edge_key_lengths[new_edge_key]) - + # ********************************************************************* - + # small path with 3 edges - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" + network.graph["crs"] = "EPSG:4326" network.graph["simplified"] = False # add nodes - network.add_nodes_from([ - (0,{osm.KEY_OSMNX_Y:56.00, osm.KEY_OSMNX_X: 12.00}), - (1,{osm.KEY_OSMNX_Y:56.01, osm.KEY_OSMNX_X: 12.00}), - (2,{osm.KEY_OSMNX_Y:56.02, osm.KEY_OSMNX_X: 12.01}), - (3,{osm.KEY_OSMNX_Y:56.04, osm.KEY_OSMNX_X: 12.02}), - ]) + network.add_nodes_from( + [ + (0, {osm.KEY_OSMNX_Y: 56.00, osm.KEY_OSMNX_X: 12.00}), + (1, {osm.KEY_OSMNX_Y: 56.01, osm.KEY_OSMNX_X: 12.00}), + (2, {osm.KEY_OSMNX_Y: 56.02, osm.KEY_OSMNX_X: 12.01}), + (3, {osm.KEY_OSMNX_Y: 56.04, osm.KEY_OSMNX_X: 12.02}), + ] + ) # add edges - network.add_edges_from([ - (0,1,0,{osm.KEY_OSMNX_OSMID: 1, - osm.KEY_OSMNX_LENGTH: 3, - osm.KEY_OSMNX_REVERSED: False, - osm.KEY_OSMNX_ONEWAY: False}), - (1,2,0,{osm.KEY_OSMNX_OSMID: 2, - osm.KEY_OSMNX_LENGTH: 4, - osm.KEY_OSMNX_REVERSED: False, - osm.KEY_OSMNX_ONEWAY: False}), - (2,3,0,{osm.KEY_OSMNX_OSMID: 3, - osm.KEY_OSMNX_LENGTH: 5, - osm.KEY_OSMNX_REVERSED: False, - osm.KEY_OSMNX_ONEWAY: False}) - ]) - lengths = gis_calc.edge_lengths(network, edge_keys=[(0,1,0),(1,2,0),(2,3,0)]) - network.edges[(0,1,0)][osm.KEY_OSMNX_LENGTH] = lengths[(0,1,0)] - network.edges[(1,2,0)][osm.KEY_OSMNX_LENGTH] = lengths[(1,2,0)] - network.edges[(2,3,0)][osm.KEY_OSMNX_LENGTH] = lengths[(2,3,0)] + network.add_edges_from( + [ + ( + 0, + 1, + 0, + { + osm.KEY_OSMNX_OSMID: 1, + osm.KEY_OSMNX_LENGTH: 3, + osm.KEY_OSMNX_REVERSED: False, + osm.KEY_OSMNX_ONEWAY: False, + }, + ), + ( + 1, + 2, + 0, + { + osm.KEY_OSMNX_OSMID: 2, + osm.KEY_OSMNX_LENGTH: 4, + osm.KEY_OSMNX_REVERSED: False, + osm.KEY_OSMNX_ONEWAY: False, + }, + ), + ( + 2, + 3, + 0, + { + osm.KEY_OSMNX_OSMID: 3, + osm.KEY_OSMNX_LENGTH: 5, + osm.KEY_OSMNX_REVERSED: False, + osm.KEY_OSMNX_ONEWAY: False, + }, + ), + ] + ) + lengths = gis_calc.edge_lengths( + network, edge_keys=[(0, 1, 0), (1, 2, 0), (2, 3, 0)] + ) + network.edges[(0, 1, 0)][osm.KEY_OSMNX_LENGTH] = lengths[(0, 1, 0)] + network.edges[(1, 2, 0)][osm.KEY_OSMNX_LENGTH] = lengths[(1, 2, 0)] + network.edges[(2, 3, 0)][osm.KEY_OSMNX_LENGTH] = lengths[(2, 3, 0)] number_edges = network.number_of_edges() if project_the_graph: network = ox.project_graph(G=network) - path = [0,1,2,3] + path = [0, 1, 2, 3] new_edge_key = gis_mod.replace_path(network, path=path) # a new edge should exist assert network.has_edge(*new_edge_key) # assert that two edges are removed and one is created - assert network.number_of_edges() - number_edges == 1 - 3 + assert network.number_of_edges() - number_edges == 1 - 3 # intermediate nodes should not exist any longer for node in path[1:-1]: assert not network.has_node(node) @@ -343,72 +377,85 @@ class TestGisModify: assert isclose( sum(lengths.values()), network.edges[new_edge_key][osm.KEY_OSMNX_LENGTH], - abs_tol=1e-3 - ) + abs_tol=1e-3, + ) # the new edge needs to have a geometry because it is not simple assert osm.KEY_OSMNX_GEOMETRY in network.edges[new_edge_key] # the geometry must have 4 points - assert len( - tuple( - network.edges[new_edge_key][osm.KEY_OSMNX_GEOMETRY].coords - ) - ) == 4 - new_edge_key_lengths = gis_calc.edge_lengths( - network, - edge_keys=[new_edge_key] - ) + assert ( + len(tuple(network.edges[new_edge_key][osm.KEY_OSMNX_GEOMETRY].coords)) == 4 + ) + new_edge_key_lengths = gis_calc.edge_lengths(network, edge_keys=[new_edge_key]) # the geometry's length needs to match that of the edge's assert isclose( network.edges[new_edge_key][osm.KEY_OSMNX_LENGTH], new_edge_key_lengths[new_edge_key], - abs_tol=1 if not project_the_graph else 7.33 # 7.327521377403173 - ) + abs_tol=1 if not project_the_graph else 7.33, # 7.327521377403173 + ) # print('hallo2') # print(tuple(network.edges[new_edge_key][osm.KEY_OSMNX_GEOMETRY].coords)) # print(network.edges[new_edge_key][osm.KEY_OSMNX_LENGTH]) # print(new_edge_key_lengths[new_edge_key]) - + # # ********************************************************************* - + # smallest possible path, but featuring simplified geometries already - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" + network.graph["crs"] = "EPSG:4326" network.graph["simplified"] = True # add nodes - network.add_nodes_from([ - (0,{osm.KEY_OSMNX_Y:56.00, osm.KEY_OSMNX_X: 12.00}), - (1,{osm.KEY_OSMNX_Y:56.01, osm.KEY_OSMNX_X: 12.00}), - (2,{osm.KEY_OSMNX_Y:56.02, osm.KEY_OSMNX_X: 12.01}), - ]) + network.add_nodes_from( + [ + (0, {osm.KEY_OSMNX_Y: 56.00, osm.KEY_OSMNX_X: 12.00}), + (1, {osm.KEY_OSMNX_Y: 56.01, osm.KEY_OSMNX_X: 12.00}), + (2, {osm.KEY_OSMNX_Y: 56.02, osm.KEY_OSMNX_X: 12.01}), + ] + ) # add edges - network.add_edges_from([ - (0,1,0,{osm.KEY_OSMNX_OSMID: 1, - osm.KEY_OSMNX_LENGTH: 5, - osm.KEY_OSMNX_REVERSED: False, - osm.KEY_OSMNX_ONEWAY: False}), - (1,2,0,{osm.KEY_OSMNX_OSMID: 2, - osm.KEY_OSMNX_LENGTH: 10, - osm.KEY_OSMNX_REVERSED: False, - osm.KEY_OSMNX_ONEWAY: False}) - ]) + network.add_edges_from( + [ + ( + 0, + 1, + 0, + { + osm.KEY_OSMNX_OSMID: 1, + osm.KEY_OSMNX_LENGTH: 5, + osm.KEY_OSMNX_REVERSED: False, + osm.KEY_OSMNX_ONEWAY: False, + }, + ), + ( + 1, + 2, + 0, + { + osm.KEY_OSMNX_OSMID: 2, + osm.KEY_OSMNX_LENGTH: 10, + osm.KEY_OSMNX_REVERSED: False, + osm.KEY_OSMNX_ONEWAY: False, + }, + ), + ] + ) # build create geometries - edge01_geo = LineString([(12.00,56.00),(11.99,56.00),(12.00,56.01)]) - edge12_geo = LineString([(12.00,56.01),(12.02,56.00),(12.01,56.02)]) - network.edges[(0,1,0)][osm.KEY_OSMNX_GEOMETRY] = edge01_geo - network.edges[(1,2,0)][osm.KEY_OSMNX_GEOMETRY] = edge12_geo - lengths = gis_calc.edge_lengths(network, edge_keys=[(0,1,0),(1,2,0)]) - network.edges[(0,1,0)][osm.KEY_OSMNX_LENGTH] = lengths[(0,1,0)] - network.edges[(1,2,0)][osm.KEY_OSMNX_LENGTH] = lengths[(1,2,0)] + edge01_geo = LineString([(12.00, 56.00), (11.99, 56.00), (12.00, 56.01)]) + edge12_geo = LineString([(12.00, 56.01), (12.02, 56.00), (12.01, 56.02)]) + network.edges[(0, 1, 0)][osm.KEY_OSMNX_GEOMETRY] = edge01_geo + network.edges[(1, 2, 0)][osm.KEY_OSMNX_GEOMETRY] = edge12_geo + lengths = gis_calc.edge_lengths(network, edge_keys=[(0, 1, 0), (1, 2, 0)]) + network.edges[(0, 1, 0)][osm.KEY_OSMNX_LENGTH] = lengths[(0, 1, 0)] + network.edges[(1, 2, 0)][osm.KEY_OSMNX_LENGTH] = lengths[(1, 2, 0)] number_edges = network.number_of_edges() if project_the_graph: network = ox.project_graph(G=network) - path = [0,1,2] + path = [0, 1, 2] new_edge_key = gis_mod.replace_path(network, path=path) # a new edge should exist assert network.has_edge(*new_edge_key) # assert that two edges are removed and one is created - assert network.number_of_edges() - number_edges == 1 - 2 + assert network.number_of_edges() - number_edges == 1 - 2 # intermediate nodes should not exist any longer for node in path[1:-1]: assert not network.has_node(node) @@ -416,70 +463,83 @@ class TestGisModify: assert isclose( sum(lengths.values()), network.edges[new_edge_key][osm.KEY_OSMNX_LENGTH], - abs_tol=1e-3 #1 if not project_the_graph else 1e-3 - ) + abs_tol=1e-3, # 1 if not project_the_graph else 1e-3 + ) # the new edge needs to have a geometry because it is not simple assert osm.KEY_OSMNX_GEOMETRY in network.edges[new_edge_key] # the geometry must have 5 points - assert len( - tuple( - network.edges[new_edge_key][osm.KEY_OSMNX_GEOMETRY].coords - ) - ) == 5 - new_edge_key_lengths = gis_calc.edge_lengths( - network, - edge_keys=[new_edge_key] - ) + assert ( + len(tuple(network.edges[new_edge_key][osm.KEY_OSMNX_GEOMETRY].coords)) == 5 + ) + new_edge_key_lengths = gis_calc.edge_lengths(network, edge_keys=[new_edge_key]) # the geometry's length needs to match that of the edge's assert isclose( network.edges[new_edge_key][osm.KEY_OSMNX_LENGTH], new_edge_key_lengths[new_edge_key], - abs_tol=1 if not project_the_graph else 12.2 # -12.178460200064364 - ) - + abs_tol=1 if not project_the_graph else 12.2, # -12.178460200064364 + ) + # ********************************************************************* - + # smallest possible path, but featuring reversed geometries already (#1) - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" + network.graph["crs"] = "EPSG:4326" network.graph["simplified"] = True # add nodes - network.add_nodes_from([ - (0,{osm.KEY_OSMNX_Y:56.00, osm.KEY_OSMNX_X: 12.00}), - (1,{osm.KEY_OSMNX_Y:56.01, osm.KEY_OSMNX_X: 12.00}), - (2,{osm.KEY_OSMNX_Y:56.02, osm.KEY_OSMNX_X: 12.01}), - ]) + network.add_nodes_from( + [ + (0, {osm.KEY_OSMNX_Y: 56.00, osm.KEY_OSMNX_X: 12.00}), + (1, {osm.KEY_OSMNX_Y: 56.01, osm.KEY_OSMNX_X: 12.00}), + (2, {osm.KEY_OSMNX_Y: 56.02, osm.KEY_OSMNX_X: 12.01}), + ] + ) # add edges - network.add_edges_from([ - (0,1,0,{osm.KEY_OSMNX_OSMID: 1, - osm.KEY_OSMNX_LENGTH: 3, - osm.KEY_OSMNX_REVERSED: False, - osm.KEY_OSMNX_ONEWAY: False}), - (1,2,0,{osm.KEY_OSMNX_OSMID: 2, - osm.KEY_OSMNX_LENGTH: 4, - osm.KEY_OSMNX_REVERSED: False, - osm.KEY_OSMNX_ONEWAY: False}) - ]) + network.add_edges_from( + [ + ( + 0, + 1, + 0, + { + osm.KEY_OSMNX_OSMID: 1, + osm.KEY_OSMNX_LENGTH: 3, + osm.KEY_OSMNX_REVERSED: False, + osm.KEY_OSMNX_ONEWAY: False, + }, + ), + ( + 1, + 2, + 0, + { + osm.KEY_OSMNX_OSMID: 2, + osm.KEY_OSMNX_LENGTH: 4, + osm.KEY_OSMNX_REVERSED: False, + osm.KEY_OSMNX_ONEWAY: False, + }, + ), + ] + ) # build create geometries - edge01_geo = LineString([(12.00,56.00),(11.99,56.00),(12.00,56.01)]) - edge12_geo = LineString([(12.01,56.02),(12.02,56.00),(12.00,56.01)]) - network.edges[(0,1,0)][osm.KEY_OSMNX_GEOMETRY] = edge01_geo - network.edges[(1,2,0)][osm.KEY_OSMNX_GEOMETRY] = edge12_geo - network.edges[(0,1,0)][osm.KEY_OSMNX_REVERSED] = False - network.edges[(1,2,0)][osm.KEY_OSMNX_REVERSED] = True - lengths = gis_calc.edge_lengths(network, edge_keys=[(0,1,0),(1,2,0)]) - network.edges[(0,1,0)][osm.KEY_OSMNX_LENGTH] = lengths[(0,1,0)] - network.edges[(1,2,0)][osm.KEY_OSMNX_LENGTH] = lengths[(1,2,0)] + edge01_geo = LineString([(12.00, 56.00), (11.99, 56.00), (12.00, 56.01)]) + edge12_geo = LineString([(12.01, 56.02), (12.02, 56.00), (12.00, 56.01)]) + network.edges[(0, 1, 0)][osm.KEY_OSMNX_GEOMETRY] = edge01_geo + network.edges[(1, 2, 0)][osm.KEY_OSMNX_GEOMETRY] = edge12_geo + network.edges[(0, 1, 0)][osm.KEY_OSMNX_REVERSED] = False + network.edges[(1, 2, 0)][osm.KEY_OSMNX_REVERSED] = True + lengths = gis_calc.edge_lengths(network, edge_keys=[(0, 1, 0), (1, 2, 0)]) + network.edges[(0, 1, 0)][osm.KEY_OSMNX_LENGTH] = lengths[(0, 1, 0)] + network.edges[(1, 2, 0)][osm.KEY_OSMNX_LENGTH] = lengths[(1, 2, 0)] number_edges = network.number_of_edges() if project_the_graph: network = ox.project_graph(G=network) - path = [0,1,2] + path = [0, 1, 2] new_edge_key = gis_mod.replace_path(network, path=path) # a new edge should exist assert network.has_edge(*new_edge_key) # assert that two edges are removed and one is created - assert network.number_of_edges() - number_edges == 1 - 2 + assert network.number_of_edges() - number_edges == 1 - 2 # intermediate nodes should not exist any longer for node in path[1:-1]: assert not network.has_node(node) @@ -487,73 +547,86 @@ class TestGisModify: assert isclose( sum(lengths.values()), network.edges[new_edge_key][osm.KEY_OSMNX_LENGTH], - abs_tol=1e-3 - ) + abs_tol=1e-3, + ) # the new edge needs to have a geometry because it is not simple assert osm.KEY_OSMNX_GEOMETRY in network.edges[new_edge_key] - assert len( - tuple( - network.edges[new_edge_key][osm.KEY_OSMNX_GEOMETRY].coords - ) - ) == 5 - new_edge_key_lengths = gis_calc.edge_lengths( - network, - edge_keys=[new_edge_key] - ) + assert ( + len(tuple(network.edges[new_edge_key][osm.KEY_OSMNX_GEOMETRY].coords)) == 5 + ) + new_edge_key_lengths = gis_calc.edge_lengths(network, edge_keys=[new_edge_key]) # the geometry's length needs to match that of the edge's assert isclose( network.edges[new_edge_key][osm.KEY_OSMNX_LENGTH], new_edge_key_lengths[new_edge_key], - abs_tol=1 if not project_the_graph else 12.2 # -12.178460200064364 - ) + abs_tol=1 if not project_the_graph else 12.2, # -12.178460200064364 + ) # print('hallo4') # print(tuple(network.edges[new_edge_key][osm.KEY_OSMNX_GEOMETRY].coords)) # print(network.edges[new_edge_key][osm.KEY_OSMNX_LENGTH]) # print(new_edge_key_lengths[new_edge_key]) - + # ********************************************************************* - + # smallest possible path, but featuring reversed geometries already (#2) - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" + network.graph["crs"] = "EPSG:4326" network.graph["simplified"] = True # add nodes - network.add_nodes_from([ - (0,{osm.KEY_OSMNX_Y:56.00, osm.KEY_OSMNX_X: 12.00}), - (1,{osm.KEY_OSMNX_Y:56.01, osm.KEY_OSMNX_X: 12.00}), - (2,{osm.KEY_OSMNX_Y:56.02, osm.KEY_OSMNX_X: 12.01}), - ]) + network.add_nodes_from( + [ + (0, {osm.KEY_OSMNX_Y: 56.00, osm.KEY_OSMNX_X: 12.00}), + (1, {osm.KEY_OSMNX_Y: 56.01, osm.KEY_OSMNX_X: 12.00}), + (2, {osm.KEY_OSMNX_Y: 56.02, osm.KEY_OSMNX_X: 12.01}), + ] + ) # add edges - network.add_edges_from([ - (0,1,0,{osm.KEY_OSMNX_OSMID: 1, - osm.KEY_OSMNX_LENGTH: 3, - osm.KEY_OSMNX_REVERSED: False, - osm.KEY_OSMNX_ONEWAY: False}), - (1,2,0,{osm.KEY_OSMNX_OSMID: 2, - osm.KEY_OSMNX_LENGTH: 4, - osm.KEY_OSMNX_REVERSED: False, - osm.KEY_OSMNX_ONEWAY: False}) - ]) + network.add_edges_from( + [ + ( + 0, + 1, + 0, + { + osm.KEY_OSMNX_OSMID: 1, + osm.KEY_OSMNX_LENGTH: 3, + osm.KEY_OSMNX_REVERSED: False, + osm.KEY_OSMNX_ONEWAY: False, + }, + ), + ( + 1, + 2, + 0, + { + osm.KEY_OSMNX_OSMID: 2, + osm.KEY_OSMNX_LENGTH: 4, + osm.KEY_OSMNX_REVERSED: False, + osm.KEY_OSMNX_ONEWAY: False, + }, + ), + ] + ) # build create geometries - edge01_geo = LineString([(12.00,56.01),(11.99,56.00),(12.00,56.00)]) - edge12_geo = LineString([(12.00,56.01),(12.02,56.00),(12.01,56.02)]) - network.edges[(0,1,0)][osm.KEY_OSMNX_GEOMETRY] = edge01_geo - network.edges[(1,2,0)][osm.KEY_OSMNX_GEOMETRY] = edge12_geo - network.edges[(0,1,0)][osm.KEY_OSMNX_REVERSED] = True - network.edges[(1,2,0)][osm.KEY_OSMNX_REVERSED] = False - lengths = gis_calc.edge_lengths(network, edge_keys=[(0,1,0),(1,2,0)]) - network.edges[(0,1,0)][osm.KEY_OSMNX_LENGTH] = lengths[(0,1,0)] - network.edges[(1,2,0)][osm.KEY_OSMNX_LENGTH] = lengths[(1,2,0)] + edge01_geo = LineString([(12.00, 56.01), (11.99, 56.00), (12.00, 56.00)]) + edge12_geo = LineString([(12.00, 56.01), (12.02, 56.00), (12.01, 56.02)]) + network.edges[(0, 1, 0)][osm.KEY_OSMNX_GEOMETRY] = edge01_geo + network.edges[(1, 2, 0)][osm.KEY_OSMNX_GEOMETRY] = edge12_geo + network.edges[(0, 1, 0)][osm.KEY_OSMNX_REVERSED] = True + network.edges[(1, 2, 0)][osm.KEY_OSMNX_REVERSED] = False + lengths = gis_calc.edge_lengths(network, edge_keys=[(0, 1, 0), (1, 2, 0)]) + network.edges[(0, 1, 0)][osm.KEY_OSMNX_LENGTH] = lengths[(0, 1, 0)] + network.edges[(1, 2, 0)][osm.KEY_OSMNX_LENGTH] = lengths[(1, 2, 0)] number_edges = network.number_of_edges() if project_the_graph: network = ox.project_graph(G=network) - path = [0,1,2] + path = [0, 1, 2] new_edge_key = gis_mod.replace_path(network, path=path) # a new edge should exist assert network.has_edge(*new_edge_key) # assert that two edges are removed and one is created - assert network.number_of_edges() - number_edges == 1 - 2 + assert network.number_of_edges() - number_edges == 1 - 2 # intermediate nodes should not exist any longer for node in path[1:-1]: assert not network.has_node(node) @@ -561,82 +634,104 @@ class TestGisModify: assert isclose( sum(lengths.values()), network.edges[new_edge_key][osm.KEY_OSMNX_LENGTH], - abs_tol=1e-3 - ) + abs_tol=1e-3, + ) # the new edge needs to have a geometry because it is not simple assert osm.KEY_OSMNX_GEOMETRY in network.edges[new_edge_key] - assert len( - tuple( - network.edges[new_edge_key][osm.KEY_OSMNX_GEOMETRY].coords - ) - ) == 5 - new_edge_key_lengths = gis_calc.edge_lengths( - network, - edge_keys=[new_edge_key] - ) + assert ( + len(tuple(network.edges[new_edge_key][osm.KEY_OSMNX_GEOMETRY].coords)) == 5 + ) + new_edge_key_lengths = gis_calc.edge_lengths(network, edge_keys=[new_edge_key]) # the geometry's length needs to match that of the edge's assert isclose( network.edges[new_edge_key][osm.KEY_OSMNX_LENGTH], new_edge_key_lengths[new_edge_key], - abs_tol=1 if not project_the_graph else 12.2 # -12.178460200064364 - ) + abs_tol=1 if not project_the_graph else 12.2, # -12.178460200064364 + ) # print('hallo5') # print(tuple(network.edges[new_edge_key][osm.KEY_OSMNX_GEOMETRY].coords)) # print(network.edges[new_edge_key][osm.KEY_OSMNX_LENGTH]) # print(new_edge_key_lengths[new_edge_key]) - + # ********************************************************************* - + # small path with 3 edges, but featuring reversed geometries already - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" + network.graph["crs"] = "EPSG:4326" network.graph["simplified"] = True # add nodes - network.add_nodes_from([ - (0,{osm.KEY_OSMNX_Y:56.00, osm.KEY_OSMNX_X: 12.00}), - (1,{osm.KEY_OSMNX_Y:56.01, osm.KEY_OSMNX_X: 12.00}), - (2,{osm.KEY_OSMNX_Y:56.02, osm.KEY_OSMNX_X: 12.01}), - (3,{osm.KEY_OSMNX_Y:56.04, osm.KEY_OSMNX_X: 12.02}), - ]) + network.add_nodes_from( + [ + (0, {osm.KEY_OSMNX_Y: 56.00, osm.KEY_OSMNX_X: 12.00}), + (1, {osm.KEY_OSMNX_Y: 56.01, osm.KEY_OSMNX_X: 12.00}), + (2, {osm.KEY_OSMNX_Y: 56.02, osm.KEY_OSMNX_X: 12.01}), + (3, {osm.KEY_OSMNX_Y: 56.04, osm.KEY_OSMNX_X: 12.02}), + ] + ) # add edges - network.add_edges_from([ - (0,1,0,{osm.KEY_OSMNX_OSMID: 1, - osm.KEY_OSMNX_LENGTH: 6, - osm.KEY_OSMNX_REVERSED: False, - osm.KEY_OSMNX_ONEWAY: False}), - (1,2,0,{osm.KEY_OSMNX_OSMID: 2, - osm.KEY_OSMNX_LENGTH: 7, - osm.KEY_OSMNX_REVERSED: False, - osm.KEY_OSMNX_ONEWAY: False}), - (2,3,0,{osm.KEY_OSMNX_OSMID: 3, - osm.KEY_OSMNX_LENGTH: 8, - osm.KEY_OSMNX_REVERSED: False, - osm.KEY_OSMNX_ONEWAY: False}) - ]) + network.add_edges_from( + [ + ( + 0, + 1, + 0, + { + osm.KEY_OSMNX_OSMID: 1, + osm.KEY_OSMNX_LENGTH: 6, + osm.KEY_OSMNX_REVERSED: False, + osm.KEY_OSMNX_ONEWAY: False, + }, + ), + ( + 1, + 2, + 0, + { + osm.KEY_OSMNX_OSMID: 2, + osm.KEY_OSMNX_LENGTH: 7, + osm.KEY_OSMNX_REVERSED: False, + osm.KEY_OSMNX_ONEWAY: False, + }, + ), + ( + 2, + 3, + 0, + { + osm.KEY_OSMNX_OSMID: 3, + osm.KEY_OSMNX_LENGTH: 8, + osm.KEY_OSMNX_REVERSED: False, + osm.KEY_OSMNX_ONEWAY: False, + }, + ), + ] + ) # build create geometries - edge01_geo = LineString([(12.00,56.00),(11.99,56.02),(12.00,56.01)]) - edge12_geo = LineString([(12.00,56.01),(12.02,56.00),(12.01,56.02)]) - edge23_geo = LineString([(12.01,56.02),(12.05,56.10),(12.02,56.04)]) - network.edges[(0,1,0)][osm.KEY_OSMNX_GEOMETRY] = edge01_geo - network.edges[(1,2,0)][osm.KEY_OSMNX_GEOMETRY] = edge12_geo - network.edges[(2,3,0)][osm.KEY_OSMNX_GEOMETRY] = edge23_geo - network.edges[(0,1,0)][osm.KEY_OSMNX_REVERSED] = False - network.edges[(1,2,0)][osm.KEY_OSMNX_REVERSED] = True - network.edges[(2,3,0)][osm.KEY_OSMNX_REVERSED] = False - lengths = gis_calc.edge_lengths(network, edge_keys=[(0,1,0),(1,2,0),(2,3,0)]) - network.edges[(0,1,0)][osm.KEY_OSMNX_LENGTH] = lengths[(0,1,0)] - network.edges[(1,2,0)][osm.KEY_OSMNX_LENGTH] = lengths[(1,2,0)] - network.edges[(2,3,0)][osm.KEY_OSMNX_LENGTH] = lengths[(2,3,0)] + edge01_geo = LineString([(12.00, 56.00), (11.99, 56.02), (12.00, 56.01)]) + edge12_geo = LineString([(12.00, 56.01), (12.02, 56.00), (12.01, 56.02)]) + edge23_geo = LineString([(12.01, 56.02), (12.05, 56.10), (12.02, 56.04)]) + network.edges[(0, 1, 0)][osm.KEY_OSMNX_GEOMETRY] = edge01_geo + network.edges[(1, 2, 0)][osm.KEY_OSMNX_GEOMETRY] = edge12_geo + network.edges[(2, 3, 0)][osm.KEY_OSMNX_GEOMETRY] = edge23_geo + network.edges[(0, 1, 0)][osm.KEY_OSMNX_REVERSED] = False + network.edges[(1, 2, 0)][osm.KEY_OSMNX_REVERSED] = True + network.edges[(2, 3, 0)][osm.KEY_OSMNX_REVERSED] = False + lengths = gis_calc.edge_lengths( + network, edge_keys=[(0, 1, 0), (1, 2, 0), (2, 3, 0)] + ) + network.edges[(0, 1, 0)][osm.KEY_OSMNX_LENGTH] = lengths[(0, 1, 0)] + network.edges[(1, 2, 0)][osm.KEY_OSMNX_LENGTH] = lengths[(1, 2, 0)] + network.edges[(2, 3, 0)][osm.KEY_OSMNX_LENGTH] = lengths[(2, 3, 0)] number_edges = network.number_of_edges() if project_the_graph: network = ox.project_graph(G=network) - path = [0,1,2,3] + path = [0, 1, 2, 3] new_edge_key = gis_mod.replace_path(network, path=path) # a new edge should exist assert network.has_edge(*new_edge_key) # assert that two edges are removed and one is created - assert network.number_of_edges() - number_edges == 1 - 3 + assert network.number_of_edges() - number_edges == 1 - 3 # intermediate nodes should not exist any longer for node in path[1:-1]: assert not network.has_node(node) @@ -644,673 +739,652 @@ class TestGisModify: assert isclose( sum(lengths.values()), network.edges[new_edge_key][osm.KEY_OSMNX_LENGTH], - abs_tol=1e-3 - ) + abs_tol=1e-3, + ) # the new edge needs to have a geometry because it is not simple assert osm.KEY_OSMNX_GEOMETRY in network.edges[new_edge_key] - assert len( - tuple( - network.edges[new_edge_key][osm.KEY_OSMNX_GEOMETRY].coords - ) - ) == 7 - new_edge_key_lengths = gis_calc.edge_lengths( - network, - edge_keys=[new_edge_key] - ) + assert ( + len(tuple(network.edges[new_edge_key][osm.KEY_OSMNX_GEOMETRY].coords)) == 7 + ) + new_edge_key_lengths = gis_calc.edge_lengths(network, edge_keys=[new_edge_key]) # the geometry's length needs to match that of the edge's assert isclose( network.edges[new_edge_key][osm.KEY_OSMNX_LENGTH], new_edge_key_lengths[new_edge_key], - abs_tol=1 if not project_the_graph else 37.77 # -37.76434146326574 - ) + abs_tol=1 if not project_the_graph else 37.77, # -37.76434146326574 + ) # print('hallo6') # print(tuple(network.edges[new_edge_key][osm.KEY_OSMNX_GEOMETRY].coords)) # print(network.edges[new_edge_key][osm.KEY_OSMNX_LENGTH]) # print(new_edge_key_lengths[new_edge_key]) - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_replace_paths_unprojected(self): - self.example_replace_paths(project_the_graph=False) - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_replace_paths_projected(self): - self.example_replace_paths(project_the_graph=True) - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_remove_reversed_edges(self): - # get the network network = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', + (55.71654, 9.11728), + network_type="drive", custom_filter='["highway"~"residential|tertiary|unclassified|service"]', - truncate_by_edge=True - ) - + truncate_by_edge=True, + ) + _net = network.copy() - + reversed_attribute = False removed_edges = gis_mod.remove_reversed_edges( - network=_net, - reversed_attr=reversed_attribute - ) + network=_net, reversed_attr=reversed_attribute + ) # verify that all edges have the correct attribute for edge_key in removed_edges: # confirm that there is at least one edge in reverse reverse_edge_found = False for other_edge_key in gis_iden.get_edges_from_a_to_b( - network, - edge_key[1], - edge_key[0]): + network, edge_key[1], edge_key[0] + ): if gis_iden.edges_are_in_reverse( - network, - edge_key, - other_edge_key) and _net.has_edge(*other_edge_key): + network, edge_key, other_edge_key + ) and _net.has_edge(*other_edge_key): # other_edge_key is a reversed edge and is still on _net reverse_edge_found = True break assert reverse_edge_found - + # ********************************************************************* - + # repeat for True argument _net = network.copy() reversed_attribute = True removed_edges = gis_mod.remove_reversed_edges( - network=_net, - reversed_attr=reversed_attribute - ) + network=_net, reversed_attr=reversed_attribute + ) # verify that all edges have the correct attribute for edge_key in removed_edges: # confirm that there is at least one edge in reverse reverse_edge_found = False for other_edge_key in gis_iden.get_edges_from_a_to_b( - network, - edge_key[1], - edge_key[0]): + network, edge_key[1], edge_key[0] + ): if gis_iden.edges_are_in_reverse( - network, - edge_key, - other_edge_key) and _net.has_edge(*other_edge_key): + network, edge_key, other_edge_key + ) and _net.has_edge(*other_edge_key): # other_edge_key is a reversed edge and is still on _net reverse_edge_found = True break assert reverse_edge_found - + # ************************************************************************* # ************************************************************************* - + def test_remove_self_loops(self): - # find one self-loop network = nx.MultiDiGraph() - network.add_edges_from([ - (0,1,0), - (1,2,0), - (2,0,0), - (1,1,0) - ]) + network.add_edges_from([(0, 1, 0), (1, 2, 0), (2, 0, 0), (1, 1, 0)]) true_selflooping_nodes = [1] selflooping_nodes = gis_mod.remove_self_loops(network) assert len(selflooping_nodes) == len(true_selflooping_nodes) for node_key in selflooping_nodes: assert node_key in true_selflooping_nodes - + # find two self-loops network = nx.MultiDiGraph() - network.add_edges_from([ - (0,1,0), - (1,2,0), - (2,0,0), - (1,1,0), - (2,2,0) - ]) - true_selflooping_nodes = [1,2] + network.add_edges_from([(0, 1, 0), (1, 2, 0), (2, 0, 0), (1, 1, 0), (2, 2, 0)]) + true_selflooping_nodes = [1, 2] selflooping_nodes = gis_mod.remove_self_loops(network) assert len(selflooping_nodes) == len(true_selflooping_nodes) - for node_key in selflooping_nodes: - assert node_key in true_selflooping_nodes - + for node_key in selflooping_nodes: + assert node_key in true_selflooping_nodes + # find no self-loops network = nx.MultiDiGraph() - network.add_edges_from([ - (0,1,0), - (1,2,0), - (2,0,0) - ]) + network.add_edges_from([(0, 1, 0), (1, 2, 0), (2, 0, 0)]) selflooping_nodes = gis_mod.remove_self_loops(network) assert len(selflooping_nodes) == 0 - + # ************************************************************************* # ************************************************************************* - + def test_remove_longer_edges(self): - # simple example network = nx.MultiDiGraph() - network.add_edges_from([ - (0,1,0,{'length': 3}), - (1,2,0,{'length': 4}), - (2,0,0,{'length': 5}), - # additional edges - (0,1,1,{'length': 4}), - (1,2,1,{'length': 5}), - (2,0,1,{'length': 6}), - ]) + network.add_edges_from( + [ + (0, 1, 0, {"length": 3}), + (1, 2, 0, {"length": 4}), + (2, 0, 0, {"length": 5}), + # additional edges + (0, 1, 1, {"length": 4}), + (1, 2, 1, {"length": 5}), + (2, 0, 1, {"length": 6}), + ] + ) initial_number_edges = network.number_of_edges() - true_edges_removed = [(0,1,1),(1,2,1),(2,0,1)] + true_edges_removed = [(0, 1, 1), (1, 2, 1), (2, 0, 1)] edges_removed = gis_mod.remove_longer_parallel_edges(network) assert len(edges_removed) == len(true_edges_removed) for edge_key in edges_removed: assert edge_key in true_edges_removed assert network.number_of_edges() == initial_number_edges - len(edges_removed) - + # example with more than one alternative network = nx.MultiDiGraph() - network.add_edges_from([ - (0,1,0,{'length': 3}), - (1,2,0,{'length': 4}), - (2,0,0,{'length': 5}), - # additional edges - (0,1,1,{'length': 4}), - (0,1,2,{'length': 5}), - (0,1,3,{'length': 6}), - ]) + network.add_edges_from( + [ + (0, 1, 0, {"length": 3}), + (1, 2, 0, {"length": 4}), + (2, 0, 0, {"length": 5}), + # additional edges + (0, 1, 1, {"length": 4}), + (0, 1, 2, {"length": 5}), + (0, 1, 3, {"length": 6}), + ] + ) initial_number_edges = network.number_of_edges() - true_edges_removed = [(0,1,1),(0,1,2),(0,1,3)] + true_edges_removed = [(0, 1, 1), (0, 1, 2), (0, 1, 3)] edges_removed = gis_mod.remove_longer_parallel_edges(network) assert len(edges_removed) == len(true_edges_removed) for edge_key in edges_removed: assert edge_key in true_edges_removed assert network.number_of_edges() == initial_number_edges - len(edges_removed) - + # example with opposite edges (that won't be removed) network = nx.MultiDiGraph() - network.add_edges_from([ - (0,1,0,{'length': 3}), - (1,2,0,{'length': 4}), - (2,0,0,{'length': 5}), - # additional edges - (0,1,1,{'length': 4}), - (1,2,1,{'length': 5}), - (2,0,1,{'length': 6}), - # oppposite edges - (1,0,0,{'length': 7}), - (2,1,0,{'length': 8}), - (0,2,0,{'length': 9}), - ]) + network.add_edges_from( + [ + (0, 1, 0, {"length": 3}), + (1, 2, 0, {"length": 4}), + (2, 0, 0, {"length": 5}), + # additional edges + (0, 1, 1, {"length": 4}), + (1, 2, 1, {"length": 5}), + (2, 0, 1, {"length": 6}), + # oppposite edges + (1, 0, 0, {"length": 7}), + (2, 1, 0, {"length": 8}), + (0, 2, 0, {"length": 9}), + ] + ) initial_number_edges = network.number_of_edges() - true_edges_removed = [(0,1,1),(1,2,1),(2,0,1)] + true_edges_removed = [(0, 1, 1), (1, 2, 1), (2, 0, 1)] edges_removed = gis_mod.remove_longer_parallel_edges(network) assert len(edges_removed) == len(true_edges_removed) for edge_key in edges_removed: assert edge_key in true_edges_removed assert network.number_of_edges() == initial_number_edges - len(edges_removed) - + # example with opposite edges (that will be removed) - + network = nx.MultiDiGraph() - network.add_edges_from([ - (0,1,0,{'length': 3}), - (1,2,0,{'length': 4}), - (2,0,0,{'length': 5}), - # additional edges - (0,1,1,{'length': 4}), - (1,2,1,{'length': 5}), - (2,0,1,{'length': 6}), - # oppposite edges - (1,0,0,{'length': 7}), - (2,1,0,{'length': 8}), - (0,2,0,{'length': 9}), - ]) + network.add_edges_from( + [ + (0, 1, 0, {"length": 3}), + (1, 2, 0, {"length": 4}), + (2, 0, 0, {"length": 5}), + # additional edges + (0, 1, 1, {"length": 4}), + (1, 2, 1, {"length": 5}), + (2, 0, 1, {"length": 6}), + # oppposite edges + (1, 0, 0, {"length": 7}), + (2, 1, 0, {"length": 8}), + (0, 2, 0, {"length": 9}), + ] + ) initial_number_edges = network.number_of_edges() true_edges_removed = [ - (0,1,1),(1,2,1),(2,0,1),(1,0,0),(2,1,0),(0,2,0) - ] + (0, 1, 1), + (1, 2, 1), + (2, 0, 1), + (1, 0, 0), + (2, 1, 0), + (0, 2, 0), + ] edges_removed = gis_mod.remove_longer_parallel_edges(network, True) assert len(edges_removed) == len(true_edges_removed) for edge_key in edges_removed: assert edge_key in true_edges_removed assert network.number_of_edges() == initial_number_edges - len(edges_removed) - + # test using non-integers as node keys network = nx.MultiDiGraph() - network.add_edges_from([ - (0,'a',0,{'length': 3}), - ('a','b',0,{'length': 4}), - ('b',0,0,{'length': 5}), - # additional edges - (0,'a',1,{'length': 4}), - ('a','b',1,{'length': 5}), - ('b',0,1,{'length': 6}), - # oppposite edges - ('a',0,0,{'length': 7}), - ('b','a',0,{'length': 8}), - (0,'b',0,{'length': 9}), - ]) + network.add_edges_from( + [ + (0, "a", 0, {"length": 3}), + ("a", "b", 0, {"length": 4}), + ("b", 0, 0, {"length": 5}), + # additional edges + (0, "a", 1, {"length": 4}), + ("a", "b", 1, {"length": 5}), + ("b", 0, 1, {"length": 6}), + # oppposite edges + ("a", 0, 0, {"length": 7}), + ("b", "a", 0, {"length": 8}), + (0, "b", 0, {"length": 9}), + ] + ) initial_number_edges = network.number_of_edges() true_edges_removed = [ - (0,'a',1),('a','b',1),('b',0,1),('a',0,0),('b','a',0),(0,'b',0) - ] + (0, "a", 1), + ("a", "b", 1), + ("b", 0, 1), + ("a", 0, 0), + ("b", "a", 0), + (0, "b", 0), + ] edges_removed = gis_mod.remove_longer_parallel_edges(network, True) assert len(edges_removed) == len(true_edges_removed) - for edge_key in edges_removed: + for edge_key in edges_removed: assert edge_key in true_edges_removed assert network.number_of_edges() == initial_number_edges - len(edges_removed) - + # ************************************************************************* # ************************************************************************* - + def test_remove_dead_ends(self): - # ********************************************************************* - + # example without dead ends network = nx.MultiDiGraph() nodes_removed = gis_mod.remove_dead_ends(network) assert len(nodes_removed) == 0 - + # ********************************************************************* - + # minimal example network = nx.MultiDiGraph() - network.add_edges_from([ - (0,1,0), (1,2,0), (2,0,0), - # removable edges - (3,0,0), - (4,1,0), - (5,2,0) - ]) + network.add_edges_from( + [ + (0, 1, 0), + (1, 2, 0), + (2, 0, 0), + # removable edges + (3, 0, 0), + (4, 1, 0), + (5, 2, 0), + ] + ) nodes_removed = gis_mod.remove_dead_ends(network) - true_nodes_removed = [3,4,5] + true_nodes_removed = [3, 4, 5] for node_key in true_nodes_removed: assert node_key in nodes_removed assert len(nodes_removed) == len(true_nodes_removed) - + # ********************************************************************* - + # example requiring multiple iterations - + network = nx.MultiDiGraph() - - network.add_edges_from([ - (0,1,0), (1,2,0), (2,0,0), - # removable edges - (3,0,0), - (4,1,0), - (5,2,0), - (3,6,0), - (4,7,0), - (5,8,0) - ]) - + + network.add_edges_from( + [ + (0, 1, 0), + (1, 2, 0), + (2, 0, 0), + # removable edges + (3, 0, 0), + (4, 1, 0), + (5, 2, 0), + (3, 6, 0), + (4, 7, 0), + (5, 8, 0), + ] + ) + nodes_removed = gis_mod.remove_dead_ends(network, max_iterations=2) - - true_nodes_removed = [3,4,5,6,7,8] - + + true_nodes_removed = [3, 4, 5, 6, 7, 8] + for node_key in true_nodes_removed: - assert node_key in nodes_removed - + assert len(nodes_removed) == len(true_nodes_removed) - + # ********************************************************************* - + # example with self-loops - + network = nx.MultiDiGraph() - - network.add_edges_from([ - (0,1,0), (1,2,0), (2,0,0), - # removable edges - (3,0,0), - (4,1,0), - (5,2,0), - (3,6,0), - (4,7,0), - (5,8,0), - (8,8,0), - (4,4,0) - ]) - + + network.add_edges_from( + [ + (0, 1, 0), + (1, 2, 0), + (2, 0, 0), + # removable edges + (3, 0, 0), + (4, 1, 0), + (5, 2, 0), + (3, 6, 0), + (4, 7, 0), + (5, 8, 0), + (8, 8, 0), + (4, 4, 0), + ] + ) + nodes_removed = gis_mod.remove_dead_ends(network, max_iterations=2) - - true_nodes_removed = [3,4,5,6,7,8] - + + true_nodes_removed = [3, 4, 5, 6, 7, 8] + for node_key in true_nodes_removed: - assert node_key in nodes_removed - + assert len(nodes_removed) == len(true_nodes_removed) - + # ********************************************************************* - + # example with excluded nodes - + network = nx.MultiDiGraph() - - network.add_edges_from([ - (0,1,0), (1,2,0), (2,0,0), - # removable edges - (3,0,0),(3,6,0), # 3 and 6 can be removed - (4,1,0),(4,7,0), # only 7 can be removed - (5,2,0),(5,8,0) # 8 cannot be removed, nor can 5 - ]) - + + network.add_edges_from( + [ + (0, 1, 0), + (1, 2, 0), + (2, 0, 0), + # removable edges + (3, 0, 0), + (3, 6, 0), # 3 and 6 can be removed + (4, 1, 0), + (4, 7, 0), # only 7 can be removed + (5, 2, 0), + (5, 8, 0), # 8 cannot be removed, nor can 5 + ] + ) + nodes_removed = gis_mod.remove_dead_ends( - network, - keepers=[4, 8], - max_iterations=2 - ) - - true_nodes_removed = [3,6,7] - + network, keepers=[4, 8], max_iterations=2 + ) + + true_nodes_removed = [3, 6, 7] + for node_key in true_nodes_removed: - assert node_key in nodes_removed - + assert len(nodes_removed) == len(true_nodes_removed) - + # ********************************************************************* - + # example with forward and reverse edges - + network = nx.MultiDiGraph() - - network.add_edges_from([ - (0,1,0), (1,2,0), (2,0,0), - # removable edges - (3,0,0),(0,3,0),(3,6,0), # branch has forward and reverse edges - (4,1,0),(1,4,0),(4,7,0), # branch has forward and reverse edges - (5,2,0),(5,8,0) # branch not affected - - ]) - + + network.add_edges_from( + [ + (0, 1, 0), + (1, 2, 0), + (2, 0, 0), + # removable edges + (3, 0, 0), + (0, 3, 0), + (3, 6, 0), # branch has forward and reverse edges + (4, 1, 0), + (1, 4, 0), + (4, 7, 0), # branch has forward and reverse edges + (5, 2, 0), + (5, 8, 0), # branch not affected + ] + ) + nodes_removed = gis_mod.remove_dead_ends(network, max_iterations=2) - - true_nodes_removed = [3,4,5,6,7,8] - + + true_nodes_removed = [3, 4, 5, 6, 7, 8] + for node_key in true_nodes_removed: - assert node_key in nodes_removed - + assert len(nodes_removed) == len(true_nodes_removed) - - + # ************************************************************************* # ************************************************************************* - + def test_remove_dead_ends_osmnx(self): - # test removing dead ends without restrictions - + # get the network network = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', + (55.71654, 9.11728), + network_type="drive", custom_filter='["highway"~"residential|tertiary|unclassified|service"]', - truncate_by_edge=True - ) - + truncate_by_edge=True, + ) + _net = network.copy() - + max_iterations = 5 - + nodes_removed = gis_mod.remove_dead_ends( - _net, - keepers=[], - max_iterations=max_iterations) - + _net, keepers=[], max_iterations=max_iterations + ) + # TODO: perform checks - + # ************************************************************************* # ************************************************************************* - + def example_connect_points_to_edges_osmnx( - self, - _net: nx.MultiDiGraph, - use_two_edges = False, - store_unsimplified_geo: bool = False, - project_network: bool = False): - + self, + _net: nx.MultiDiGraph, + use_two_edges=False, + store_unsimplified_geo: bool = False, + project_network: bool = False, + ): # test using projected graph - + # _net = network.copy() all_node_keys = tuple(_net.nodes()) all_edge_keys = tuple(_net.edges(keys=True)) - number_edges = len(all_edge_keys) + number_edges = len(all_edge_keys) number_nodes = len(all_node_keys) - + # create three random nodes number_new_points = 3 number_subgroup_points = ( - int(number_nodes/10) if number_nodes >= 20 else 2 - ) # should be at least 2 points + int(number_nodes / 10) if number_nodes >= 20 else 2 + ) # should be at least 2 points node_keys = [] for i in range(number_new_points): # define the sampling points sampling_point_node_keys = [ - all_node_keys[random.randint(0,number_nodes-1)] + all_node_keys[random.randint(0, number_nodes - 1)] for j in range(number_subgroup_points) - ] + ] # define the x and y coordinates - x = mean( - _net.nodes[node_key]['x'] - for node_key in sampling_point_node_keys - ) - y = mean( - _net.nodes[node_key]['y'] - for node_key in sampling_point_node_keys - ) + x = mean(_net.nodes[node_key]["x"] for node_key in sampling_point_node_keys) + y = mean(_net.nodes[node_key]["y"] for node_key in sampling_point_node_keys) node_key = prob_utils.unused_node_key(_net) # add the node _net.add_node(node_key, x=x, y=y) - # store the key + # store the key node_keys.append(node_key) - + # pick random edges to which to connect each new point edge_keys = [ - all_edge_keys[random.randint(0,number_edges-1)] + all_edge_keys[random.randint(0, number_edges - 1)] for i in range(number_new_points) - ] + ] # record paths and distances all_paths_ab = { edge_key: list( nx.all_simple_edge_paths( - _net, - edge_key[0], + _net, + edge_key[0], edge_key[1], - cutoff=2+len(node_keys) # only one extra node - ) + cutoff=2 + len(node_keys), # only one extra node ) + ) for edge_key in edge_keys - } + } original_path_lengths_ab = { edge_key: [ - gis_calc.edge_path_length(_net, path) - for path in all_paths_ab[edge_key] - ] + gis_calc.edge_path_length(_net, path) for path in all_paths_ab[edge_key] + ] for edge_key in edge_keys - } + } initial_number_edges = _net.number_of_edges() - + if project_network: _net = ox.project_graph(G=_net) - + # connect them mod_net, _, _, _ = gis_mod.connect_nodes_to_edges( - _net, - node_keys, - edge_keys, + _net, + node_keys, + edge_keys, store_unsimplified_geometries=store_unsimplified_geo, - use_one_edge_per_direction=use_two_edges - ) - + use_one_edge_per_direction=use_two_edges, + ) + # check the changes: all self.check_split_recreate_connect( - network=mod_net, - edge_keys=edge_keys, - node_keys=node_keys, - all_paths_ab=all_paths_ab, + network=mod_net, + edge_keys=edge_keys, + node_keys=node_keys, + all_paths_ab=all_paths_ab, original_path_lengths_ab=original_path_lengths_ab, - abs_tol=0.925 # 0.29327665321937957, 0.9249539991553775 - ) - + abs_tol=0.925, # 0.29327665321937957, 0.9249539991553775 + ) + # there should be at least one extra edge per node - + assert mod_net.number_of_edges() >= initial_number_edges + len(node_keys) - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_connect_points_to_edges_osmnx_default(self): - # get the network network = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', - custom_filter=( - '["highway"~"residential|tertiary|unclassified|service"]' - ), - truncate_by_edge=True - ) + (55.71654, 9.11728), + network_type="drive", + custom_filter=('["highway"~"residential|tertiary|unclassified|service"]'), + truncate_by_edge=True, + ) # find one edge and create a reversed version w/ inconsistent geometry # to cover a few more lines of tests in check_split_recreate_connect for edge_key in network.edges(keys=True): break edge_dict = network.get_edge_data(*edge_key) - network.add_edge( - edge_key[1], - edge_key[0], - **edge_dict - ) + network.add_edge(edge_key[1], edge_key[0], **edge_dict) # try method self.example_connect_points_to_edges_osmnx( _net=network, use_two_edges=False, store_unsimplified_geo=False, - project_network=False - ) - + project_network=False, + ) + # ************************************************************************* # ************************************************************************* - + def test_connect_points_to_edges_osmnx_2edges(self): - # get the network network = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', - custom_filter=( - '["highway"~"residential|tertiary|unclassified|service"]' - ), - truncate_by_edge=True - ) + (55.71654, 9.11728), + network_type="drive", + custom_filter=('["highway"~"residential|tertiary|unclassified|service"]'), + truncate_by_edge=True, + ) # find one edge and create a reversed version w/ inconsistent geometry # to cover a few more lines of tests in check_split_recreate_connect for edge_key in network.edges(keys=True): break edge_dict = network.get_edge_data(*edge_key) - network.add_edge( - edge_key[1], - edge_key[0], - **edge_dict - ) + network.add_edge(edge_key[1], edge_key[0], **edge_dict) # try method self.example_connect_points_to_edges_osmnx( _net=network, use_two_edges=True, store_unsimplified_geo=False, - project_network=False - ) - + project_network=False, + ) + # ************************************************************************* # ************************************************************************* - + def test_connect_points_to_edges_osmnx_unsimplified(self): - # get the network network = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', - custom_filter=( - '["highway"~"residential|tertiary|unclassified|service"]' - ), - truncate_by_edge=True - ) + (55.71654, 9.11728), + network_type="drive", + custom_filter=('["highway"~"residential|tertiary|unclassified|service"]'), + truncate_by_edge=True, + ) # try method self.example_connect_points_to_edges_osmnx( _net=network, use_two_edges=False, store_unsimplified_geo=True, - project_network=False - ) - + project_network=False, + ) + # ************************************************************************* # ************************************************************************* - + def test_connect_points_to_edges_osmnx_projected(self): - # get the network network = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', - custom_filter=( - '["highway"~"residential|tertiary|unclassified|service"]' - ), - truncate_by_edge=True - ) + (55.71654, 9.11728), + network_type="drive", + custom_filter=('["highway"~"residential|tertiary|unclassified|service"]'), + truncate_by_edge=True, + ) # try method self.example_connect_points_to_edges_osmnx( _net=network, use_two_edges=False, store_unsimplified_geo=False, - project_network=True - ) - + project_network=True, + ) + # ************************************************************************* # ************************************************************************* - + def check_split_recreate_connect( - self, - network, - edge_keys, - node_keys, - all_paths_ab, - original_path_lengths_ab, - abs_tol: float = 2e-3): - + self, + network, + edge_keys, + node_keys, + all_paths_ab, + original_path_lengths_ab, + abs_tol: float = 2e-3, + ): new_paths_ab = { edge_key: list( nx.all_simple_edge_paths( - network, - edge_key[0], + network, + edge_key[0], edge_key[1], - cutoff=2+len(node_keys) # keep it as short as possible - ) + cutoff=2 + len(node_keys), # keep it as short as possible ) + ) for edge_key in edge_keys - } + } # how to check? # for each edge and node pair for edge_key, node_key in zip(edge_keys, node_keys): @@ -1318,7 +1392,7 @@ class TestGisModify: assert nx.has_path(network, edge_key[0], edge_key[1]) # the nodes must be connected to the start node assert nx.has_path(network, edge_key[0], node_key) - # length from beginning to end must be roughly the same + # length from beginning to end must be roughly the same for new_path in new_paths_ab[edge_key]: # exclude new paths with self-loops no_self_loops = True @@ -1329,31 +1403,25 @@ class TestGisModify: if not no_self_loops: # there are self loops, skip path continue - if new_path in all_paths_ab[edge_key]: + if new_path in all_paths_ab[edge_key]: # old path: it must have the same length (the edge is unchanged) - assert isclose( - gis_calc.edge_path_length( - network, - new_path - ), - original_path_lengths_ab[edge_key][ + assert isclose( + gis_calc.edge_path_length(network, new_path), + original_path_lengths_ab[edge_key][ all_paths_ab[edge_key].index(new_path) - ], - abs_tol=abs_tol - ) + ], + abs_tol=abs_tol, + ) else: # new path:must have the same length (with intermediate points) - assert isclose( - gis_calc.edge_path_length( - network, - new_path - ), - original_path_lengths_ab[edge_key][ + assert isclose( + gis_calc.edge_path_length(network, new_path), + original_path_lengths_ab[edge_key][ all_paths_ab[edge_key].index([edge_key]) - ], - abs_tol=abs_tol - ) - + ], + abs_tol=abs_tol, + ) + # each edge with a geometry must have the correct start and end points for edge_key in network.edges(keys=True): # if gis_mod.KEY_OSMNX_GEOMETRY @@ -1363,609 +1431,515 @@ class TestGisModify: start_y = network.nodes[edge_key[0]][osm.KEY_OSMNX_Y] end_x = network.nodes[edge_key[1]][osm.KEY_OSMNX_X] end_y = network.nodes[edge_key[1]][osm.KEY_OSMNX_Y] - coords = tuple( - network.edges[edge_key][osm.KEY_OSMNX_GEOMETRY].coords - ) - + coords = tuple(network.edges[edge_key][osm.KEY_OSMNX_GEOMETRY].coords) + if gis_iden.is_edge_consistent_with_geometry(network, edge_key): # no reversed attr or not reversed assert start_x == coords[0][0] assert start_y == coords[0][1] assert end_x == coords[-1][0] assert end_y == coords[-1][1] - else: + else: # reversed attr and reversed assert start_x == coords[-1][0] assert start_y == coords[-1][1] assert end_x == coords[0][0] assert end_y == coords[0][1] - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_connect_points_to_edges(self): - # single edge, one intermediate point - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" - line = LineString([(0,0),(0,1)]) - edge_key = (0,1,0) + network.graph["crs"] = "EPSG:4326" + line = LineString([(0, 0), (0, 1)]) + edge_key = (0, 1, 0) network.add_node(0, x=0, y=0) network.add_node(1, x=0, y=1) network.add_node(2, x=0.4, y=0.6) network.add_edge( - *edge_key, - geometry=line, - length=gis_calc.great_circle_distance_along_path(line), + *edge_key, + geometry=line, + length=gis_calc.great_circle_distance_along_path(line), undirected=False - ) + ) node_keys = [2] edge_keys = [edge_key] all_paths_ab = { - edge_key: list( - nx.all_simple_edge_paths( - network, - edge_key[0], - edge_key[1] - ) - ) + edge_key: list(nx.all_simple_edge_paths(network, edge_key[0], edge_key[1])) for edge_key in edge_keys - } + } original_path_lengths_ab = { edge_key: [ gis_calc.edge_path_length(network, path) for path in all_paths_ab[edge_key] - ] + ] for edge_key in edge_keys - } + } initial_number_edges = network.number_of_edges() network, _, _, _ = gis_mod.connect_nodes_to_edges( - network=network, - node_keys=node_keys, - edge_keys=edge_keys - ) - + network=network, node_keys=node_keys, edge_keys=edge_keys + ) + # the original edge must no longer exist - assert not network.has_edge(*edge_keys[0]) + assert not network.has_edge(*edge_keys[0]) # edge balance: one edge less, two replacements, plus one per node - assert network.number_of_edges() - initial_number_edges == 2-1+1 - # make sure the edge was correctly recreated + assert network.number_of_edges() - initial_number_edges == 2 - 1 + 1 + # make sure the edge was correctly recreated self.check_split_recreate_connect( - network, - edge_keys, - node_keys, - all_paths_ab, + network, + edge_keys, + node_keys, + all_paths_ab, original_path_lengths_ab, - abs_tol=1e-3) + abs_tol=1e-3, + ) # make sure the number of edges is the expected one - + # ********************************************************************* - + # single edge, one point (start) - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" - line = LineString([(0,0),(0,1)]) - edge_key = (0,1,0) + network.graph["crs"] = "EPSG:4326" + line = LineString([(0, 0), (0, 1)]) + edge_key = (0, 1, 0) network.add_node(0, x=0, y=0) network.add_node(1, x=0, y=1) network.add_node(2, x=0.3, y=0) network.add_edge( - *edge_key, - geometry=line, - length=gis_calc.great_circle_distance_along_path(line), + *edge_key, + geometry=line, + length=gis_calc.great_circle_distance_along_path(line), undirected=False - ) + ) node_keys = [2] edge_keys = [edge_key] all_paths_ab = { - edge_key: list( - nx.all_simple_edge_paths( - network, - edge_key[0], - edge_key[1] - ) - ) + edge_key: list(nx.all_simple_edge_paths(network, edge_key[0], edge_key[1])) for edge_key in edge_keys - } + } original_path_lengths_ab = { edge_key: [ - gis_calc.edge_path_length(network, path) + gis_calc.edge_path_length(network, path) for path in all_paths_ab[edge_key] - ] + ] for edge_key in edge_keys - } + } initial_number_edges = network.number_of_edges() network, _, _, _ = gis_mod.connect_nodes_to_edges( - network=network, - node_keys=node_keys, - edge_keys=edge_keys - ) - + network=network, node_keys=node_keys, edge_keys=edge_keys + ) + # the original edge must no longer exist - assert network.has_edge(*edge_keys[0]) + assert network.has_edge(*edge_keys[0]) # edge balance: one extra edge assert network.number_of_edges() - initial_number_edges == 1 self.check_split_recreate_connect( - network, - edge_keys, - node_keys, - all_paths_ab, - original_path_lengths_ab, - abs_tol=1e-3) - - # ********************************************************************* - + network, + edge_keys, + node_keys, + all_paths_ab, + original_path_lengths_ab, + abs_tol=1e-3, + ) + + # ********************************************************************* + # single edge, one point (end) - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" - line = LineString([(0,0),(0,1)]) - edge_key = (0,1,0) + network.graph["crs"] = "EPSG:4326" + line = LineString([(0, 0), (0, 1)]) + edge_key = (0, 1, 0) network.add_node(0, x=0, y=0) network.add_node(1, x=0, y=1) network.add_node(2, x=0.6, y=1) network.add_edge( - *edge_key, - geometry=line, - length=gis_calc.great_circle_distance_along_path(line), + *edge_key, + geometry=line, + length=gis_calc.great_circle_distance_along_path(line), undirected=False - ) + ) node_keys = [2] edge_keys = [edge_key] all_paths_ab = { - edge_key: list( - nx.all_simple_edge_paths( - network, - edge_key[0], - edge_key[1] - ) - ) + edge_key: list(nx.all_simple_edge_paths(network, edge_key[0], edge_key[1])) for edge_key in edge_keys - } + } original_path_lengths_ab = { edge_key: [ - gis_calc.edge_path_length( - network, - path - ) + gis_calc.edge_path_length(network, path) for path in all_paths_ab[edge_key] - ] + ] for edge_key in edge_keys - } + } initial_number_edges = network.number_of_edges() network, _, _, _ = gis_mod.connect_nodes_to_edges( - network=network, - node_keys=node_keys, - edge_keys=edge_keys - ) - + network=network, node_keys=node_keys, edge_keys=edge_keys + ) + # the original edge should still exist - assert network.has_edge(*edge_keys[0]) + assert network.has_edge(*edge_keys[0]) # edge balance: one extra - assert network.number_of_edges() - initial_number_edges == 1 + assert network.number_of_edges() - initial_number_edges == 1 self.check_split_recreate_connect( - network, - edge_keys, - node_keys, - all_paths_ab, + network, + edge_keys, + node_keys, + all_paths_ab, original_path_lengths_ab, - abs_tol=1e-3) - + abs_tol=1e-3, + ) + # ********************************************************************* - + # single edge, one point of each kind - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" - line = LineString([(0,0),(0,1)]) - edge_key = (0,1,0) + network.graph["crs"] = "EPSG:4326" + line = LineString([(0, 0), (0, 1)]) + edge_key = (0, 1, 0) network.add_node(0, x=0, y=0) network.add_node(1, x=0, y=1) - network.add_node(2, x=0.3, y=0) # start + network.add_node(2, x=0.3, y=0) # start network.add_node(3, x=0.5, y=0.5) - network.add_node(4, x=0.7, y=1) # end + network.add_node(4, x=0.7, y=1) # end network.add_edge( - *edge_key, - geometry=line, - length=gis_calc.great_circle_distance_along_path(line), + *edge_key, + geometry=line, + length=gis_calc.great_circle_distance_along_path(line), undirected=False - ) - node_keys = [2,3,4] - edge_keys = [edge_key,edge_key,edge_key] + ) + node_keys = [2, 3, 4] + edge_keys = [edge_key, edge_key, edge_key] all_paths_ab = { - edge_key: list( - nx.all_simple_edge_paths( - network, - edge_key[0], - edge_key[1] - ) - ) + edge_key: list(nx.all_simple_edge_paths(network, edge_key[0], edge_key[1])) for edge_key in edge_keys - } + } original_path_lengths_ab = { edge_key: [ - gis_calc.edge_path_length( - network, - path - ) + gis_calc.edge_path_length(network, path) for path in all_paths_ab[edge_key] - ] + ] for edge_key in edge_keys - } + } initial_number_edges = network.number_of_edges() network, _, _, _ = gis_mod.connect_nodes_to_edges( - network=network, - node_keys=node_keys, - edge_keys=edge_keys - ) - + network=network, node_keys=node_keys, edge_keys=edge_keys + ) + # the original edge must no longer exist - assert not network.has_edge(*edge_keys[0]) + assert not network.has_edge(*edge_keys[0]) # edge balance: one edge less, two replacements, plus one per node - assert network.number_of_edges() - initial_number_edges == 2-1+3 + assert network.number_of_edges() - initial_number_edges == 2 - 1 + 3 self.check_split_recreate_connect( - network, - edge_keys, - node_keys, - all_paths_ab, + network, + edge_keys, + node_keys, + all_paths_ab, original_path_lengths_ab, - abs_tol=1e-3) - + abs_tol=1e-3, + ) + # ********************************************************************* - + # test multiple nodes closer to the same edge - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" - line = LineString([(0,0),(0,1)]) - edge_key = (0,1,0) + network.graph["crs"] = "EPSG:4326" + line = LineString([(0, 0), (0, 1)]) + edge_key = (0, 1, 0) network.add_node(0, x=0, y=0) network.add_node(1, x=0, y=1) - network.add_node(2, x=-0.3, y=0.2) + network.add_node(2, x=-0.3, y=0.2) network.add_node(3, x=0.5, y=0.5) - network.add_node(4, x=-0.7, y=0.8) + network.add_node(4, x=-0.7, y=0.8) network.add_edge( - *edge_key, - geometry=line, - length=gis_calc.great_circle_distance_along_path(line), + *edge_key, + geometry=line, + length=gis_calc.great_circle_distance_along_path(line), undirected=False - ) - node_keys = [2,3,4] - edge_keys = [edge_key,edge_key,edge_key] + ) + node_keys = [2, 3, 4] + edge_keys = [edge_key, edge_key, edge_key] all_paths_ab = { - edge_key: list( - nx.all_simple_edge_paths( - network, - edge_key[0], - edge_key[1] - ) - ) + edge_key: list(nx.all_simple_edge_paths(network, edge_key[0], edge_key[1])) for edge_key in edge_keys - } + } original_path_lengths_ab = { edge_key: [ - gis_calc.edge_path_length( - network, - path - ) + gis_calc.edge_path_length(network, path) for path in all_paths_ab[edge_key] - ] + ] for edge_key in edge_keys - } - + } + initial_number_edges = network.number_of_edges() network, _, _, _ = gis_mod.connect_nodes_to_edges( - network=network, - node_keys=node_keys, - edge_keys=edge_keys - ) - + network=network, node_keys=node_keys, edge_keys=edge_keys + ) + # the original edge must no longer exist - assert not network.has_edge(*edge_keys[0]) + assert not network.has_edge(*edge_keys[0]) # edge balance: one edge less, four replacements, plus one per node - assert network.number_of_edges() - initial_number_edges == 4-1+3 + assert network.number_of_edges() - initial_number_edges == 4 - 1 + 3 self.check_split_recreate_connect( - network, - edge_keys, - node_keys, - all_paths_ab, + network, + edge_keys, + node_keys, + all_paths_ab, original_path_lengths_ab, - abs_tol=1e-3) - + abs_tol=1e-3, + ) + # ********************************************************************* - + # test edge geometries with three points - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" - line = LineString([(0,0),(0,0.5),(0,1)]) - edge_key = (0,1,0) + network.graph["crs"] = "EPSG:4326" + line = LineString([(0, 0), (0, 0.5), (0, 1)]) + edge_key = (0, 1, 0) network.add_node(0, x=0, y=0) network.add_node(1, x=0, y=1) network.add_node(2, x=0.1, y=0.25) # closer to the first segment (split) - network.add_node(3, x=-0.1, y=0.75) # closer to the second segment (split) - network.add_node(4, x=0, y=-0.1) # closer to the start point (no split) - network.add_node(5, x=0.1, y=0.5) # closer to the middle point (split) - network.add_node(6, x=0, y=1.1) # closer to the end point (no split) + network.add_node(3, x=-0.1, y=0.75) # closer to the second segment (split) + network.add_node(4, x=0, y=-0.1) # closer to the start point (no split) + network.add_node(5, x=0.1, y=0.5) # closer to the middle point (split) + network.add_node(6, x=0, y=1.1) # closer to the end point (no split) network.add_edge( - *edge_key, - geometry=line, - length=gis_calc.great_circle_distance_along_path(line), + *edge_key, + geometry=line, + length=gis_calc.great_circle_distance_along_path(line), undirected=False - ) - node_keys = [2,3,4,5,6] - edge_keys = [edge_key,edge_key,edge_key,edge_key,edge_key] + ) + node_keys = [2, 3, 4, 5, 6] + edge_keys = [edge_key, edge_key, edge_key, edge_key, edge_key] all_paths_ab = { - edge_key: list( - nx.all_simple_edge_paths( - network, - edge_key[0], - edge_key[1] - ) - ) + edge_key: list(nx.all_simple_edge_paths(network, edge_key[0], edge_key[1])) for edge_key in edge_keys - } + } original_path_lengths_ab = { edge_key: [ - gis_calc.edge_path_length( - network, - path - ) + gis_calc.edge_path_length(network, path) for path in all_paths_ab[edge_key] - ] + ] for edge_key in edge_keys - } - + } + initial_number_edges = network.number_of_edges() network, _, _, _ = gis_mod.connect_nodes_to_edges( - network=network, - node_keys=node_keys, - edge_keys=edge_keys - ) - + network=network, node_keys=node_keys, edge_keys=edge_keys + ) + # the original edge must no longer exist - assert not network.has_edge(*edge_keys[0]) - # make sure everything adds up + assert not network.has_edge(*edge_keys[0]) + # make sure everything adds up self.check_split_recreate_connect( - network, - edge_keys, - node_keys, - all_paths_ab, + network, + edge_keys, + node_keys, + all_paths_ab, original_path_lengths_ab, - abs_tol=1e-3) + abs_tol=1e-3, + ) # edge balance: one edge less, four replacements, plus one per node - assert network.number_of_edges() - initial_number_edges == 4-1+5 - + assert network.number_of_edges() - initial_number_edges == 4 - 1 + 5 + # ********************************************************************* - + # test nodes closer to the same point on an edge - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" - line = LineString([(0,0),(0,0.5),(0,1)]) - edge_key = (0,1,0) + network.graph["crs"] = "EPSG:4326" + line = LineString([(0, 0), (0, 0.5), (0, 1)]) + edge_key = (0, 1, 0) network.add_node(0, x=0, y=0) network.add_node(1, x=0, y=1) network.add_node(2, x=-0.1, y=0.5) # closer to the middle point (split) - network.add_node(5, x=0.1, y=0.5) # closer to the middle point (split) + network.add_node(5, x=0.1, y=0.5) # closer to the middle point (split) network.add_edge( - *edge_key, - geometry=line, - length=gis_calc.great_circle_distance_along_path(line), + *edge_key, + geometry=line, + length=gis_calc.great_circle_distance_along_path(line), undirected=False - ) - node_keys = [2,5] - edge_keys = [edge_key,edge_key] + ) + node_keys = [2, 5] + edge_keys = [edge_key, edge_key] all_paths_ab = { - edge_key: list( - nx.all_simple_edge_paths( - network, - edge_key[0], - edge_key[1] - ) - ) + edge_key: list(nx.all_simple_edge_paths(network, edge_key[0], edge_key[1])) for edge_key in edge_keys - } + } original_path_lengths_ab = { edge_key: [ - gis_calc.edge_path_length( - network, - path - ) + gis_calc.edge_path_length(network, path) for path in all_paths_ab[edge_key] - ] + ] for edge_key in edge_keys - } - + } + initial_number_edges = network.number_of_edges() network, _, _, _ = gis_mod.connect_nodes_to_edges( - network=network, - node_keys=node_keys, - edge_keys=edge_keys - ) - + network=network, node_keys=node_keys, edge_keys=edge_keys + ) + # the original edge must no longer exist - assert not network.has_edge(*edge_keys[0]) - # make sure everything adds up + assert not network.has_edge(*edge_keys[0]) + # make sure everything adds up self.check_split_recreate_connect( - network, - edge_keys, - node_keys, - all_paths_ab, + network, + edge_keys, + node_keys, + all_paths_ab, original_path_lengths_ab, - abs_tol=1e-3) + abs_tol=1e-3, + ) # edge balance: one edge less, two replacements, plus one per node - assert network.number_of_edges() - initial_number_edges == 2-1+2 - + assert network.number_of_edges() - initial_number_edges == 2 - 1 + 2 + # ********************************************************************* - + # test connecting points to two different edges - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" - line1 = LineString([(0,0),(0,0.5),(0,1)]) - edge_key1 = (0,1,0) + network.graph["crs"] = "EPSG:4326" + line1 = LineString([(0, 0), (0, 0.5), (0, 1)]) + edge_key1 = (0, 1, 0) network.add_node(0, x=0, y=0) network.add_node(1, x=0, y=1) - line2 = LineString([(1,0),(1,0.5),(1,1)]) - edge_key2 = (2,3,0) + line2 = LineString([(1, 0), (1, 0.5), (1, 1)]) + edge_key2 = (2, 3, 0) network.add_node(2, x=1, y=0) network.add_node(3, x=1, y=1) # unconnected nodes network.add_node(4, x=-0.5, y=0.5) # closer to the first edge - network.add_node(5, x=1.5, y=0.5) # closer to the second edge + network.add_node(5, x=1.5, y=0.5) # closer to the second edge network.add_edge( - *edge_key1, - geometry=line1, - length=gis_calc.great_circle_distance_along_path(line1), + *edge_key1, + geometry=line1, + length=gis_calc.great_circle_distance_along_path(line1), undirected=False - ) + ) network.add_edge( - *edge_key2, - geometry=line2, - length=gis_calc.great_circle_distance_along_path(line2), + *edge_key2, + geometry=line2, + length=gis_calc.great_circle_distance_along_path(line2), undirected=False - ) - node_keys = [4,5] - edge_keys = [edge_key1,edge_key2] + ) + node_keys = [4, 5] + edge_keys = [edge_key1, edge_key2] all_paths_ab = { - edge_key: list( - nx.all_simple_edge_paths( - network, - edge_key[0], - edge_key[1] - ) - ) + edge_key: list(nx.all_simple_edge_paths(network, edge_key[0], edge_key[1])) for edge_key in edge_keys - } + } original_path_lengths_ab = { edge_key: [ - gis_calc.edge_path_length( - network, - path - ) + gis_calc.edge_path_length(network, path) for path in all_paths_ab[edge_key] - ] + ] for edge_key in edge_keys - } - + } + initial_number_edges = network.number_of_edges() network, _, _, _ = gis_mod.connect_nodes_to_edges( - network=network, - node_keys=node_keys, - edge_keys=edge_keys - ) - + network=network, node_keys=node_keys, edge_keys=edge_keys + ) + # the original edges should no longer exist - assert not network.has_edge(*edge_keys[0]) - assert not network.has_edge(*edge_keys[1]) - # make sure everything adds up + assert not network.has_edge(*edge_keys[0]) + assert not network.has_edge(*edge_keys[1]) + # make sure everything adds up self.check_split_recreate_connect( - network, - edge_keys, - node_keys, - all_paths_ab, + network, + edge_keys, + node_keys, + all_paths_ab, original_path_lengths_ab, - abs_tol=1e-3) + abs_tol=1e-3, + ) # edge balance: two fewer edges, four extra, plus one per node - assert network.number_of_edges() - initial_number_edges == 4-2+2 - + assert network.number_of_edges() - initial_number_edges == 4 - 2 + 2 + # ********************************************************************* - + # test connecting a point that is already connected - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" - line1 = LineString([(0,0),(0,0.5),(0,1)]) - edge_key1 = (0,1,0) + network.graph["crs"] = "EPSG:4326" + line1 = LineString([(0, 0), (0, 0.5), (0, 1)]) + edge_key1 = (0, 1, 0) network.add_node(0, x=0, y=0) network.add_node(1, x=0, y=1) network.add_edge( - *edge_key1, - geometry=line1, - length=gis_calc.great_circle_distance_along_path(line1), + *edge_key1, + geometry=line1, + length=gis_calc.great_circle_distance_along_path(line1), undirected=False - ) + ) node_keys = [1] edge_keys = [edge_key1] all_paths_ab = { - edge_key: list( - nx.all_simple_edge_paths( - network, - edge_key[0], - edge_key[1] - ) - ) + edge_key: list(nx.all_simple_edge_paths(network, edge_key[0], edge_key[1])) for edge_key in edge_keys - } + } original_path_lengths_ab = { edge_key: [ - gis_calc.edge_path_length( - network, - path - ) + gis_calc.edge_path_length(network, path) for path in all_paths_ab[edge_key] - ] + ] for edge_key in edge_keys - } - + } + initial_number_edges = network.number_of_edges() network, _, _, _ = gis_mod.connect_nodes_to_edges( - network=network, - node_keys=node_keys, - edge_keys=edge_keys - ) - + network=network, node_keys=node_keys, edge_keys=edge_keys + ) + # the original edge still exists - assert network.has_edge(*edge_keys[0]) - # make sure everything adds up + assert network.has_edge(*edge_keys[0]) + # make sure everything adds up self.check_split_recreate_connect( - network, - edge_keys, - node_keys, - all_paths_ab, + network, + edge_keys, + node_keys, + all_paths_ab, original_path_lengths_ab, - abs_tol=1e-3) + abs_tol=1e-3, + ) # edge balance: two fewer edges, four extra, plus one per node assert network.number_of_edges() - initial_number_edges == 0 - + # ************************************************************************* # ************************************************************************* - + def test_recreate_edges_01(self): - # create network - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" - + network.graph["crs"] = "EPSG:4326" + # create and add simple edge to the network - - line = LineString([(0,0),(0,1)]) - - edge_key = (0,1,0) - - network.add_edge( - *edge_key, - geometry=line - ) - network.add_node(0,x=0,y=0) - network.add_node(1,x=0,y=1) - + + line = LineString([(0, 0), (0, 1)]) + + edge_key = (0, 1, 0) + + network.add_edge(*edge_key, geometry=line) + network.add_node(0, x=0, y=0) + network.add_node(1, x=0, y=1) + # recreate the edge using an intermediate point - + connection_node_keys_per_edge, recreated_edges = gis_mod.recreate_edges( - network=network, - points={ - edge_key: [Point(0,0.5)] - } - ) - + network=network, points={edge_key: [Point(0, 0.5)]} + ) + # verify if the edge was recreated assert len(recreated_edges) == 1 assert edge_key in recreated_edges @@ -1974,66 +1948,55 @@ class TestGisModify: assert network.has_node(connection_node_keys_per_edge[edge_key][0]) # verify that the replacing edges exist assert network.has_edge( - u=edge_key[0], - v=connection_node_keys_per_edge[edge_key][0] - ) + u=edge_key[0], v=connection_node_keys_per_edge[edge_key][0] + ) assert network.has_edge( u=connection_node_keys_per_edge[edge_key][0], - v=edge_key[1], - ) + v=edge_key[1], + ) # make sure the geometries make sense assert isclose( length( network.edges[ (edge_key[0], connection_node_keys_per_edge[edge_key][0], 0) - ]['geometry'] - ) - + - length( + ]["geometry"] + ) + + length( network.edges[ (connection_node_keys_per_edge[edge_key][0], edge_key[1], 0) - ]['geometry'] - ), - length(network.edges[edge_key]['geometry']), - abs_tol=1e-3 - ) + ]["geometry"] + ), + length(network.edges[edge_key]["geometry"]), + abs_tol=1e-3, + ) # verify the geometries edge_01 = (edge_key[0], connection_node_keys_per_edge[edge_key][0], 0) edge_02 = (connection_node_keys_per_edge[edge_key][0], edge_key[1], 0) - assert tuple( - network.edges[edge_01]['geometry'].coords) == ((0,0),(0,0.5)) - assert tuple( - network.edges[edge_02]['geometry'].coords) == ((0,0.5),(0,1)) - + assert tuple(network.edges[edge_01]["geometry"].coords) == ((0, 0), (0, 0.5)) + assert tuple(network.edges[edge_02]["geometry"].coords) == ((0, 0.5), (0, 1)) + # ************************************************************************* # ************************************************************************* - + def test_recreate_edges_02(self): - # test using multiple segments and points - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" - + network.graph["crs"] = "EPSG:4326" + # create and add simple edge to the network - line = LineString([(0,0),(0,0.5),(0,1)]) - edge_key = (0,1,0) - network.add_edge( - *edge_key, - geometry=line - ) - network.add_node(0,x=0,y=0) - network.add_node(1,x=0,y=1) - + line = LineString([(0, 0), (0, 0.5), (0, 1)]) + edge_key = (0, 1, 0) + network.add_edge(*edge_key, geometry=line) + network.add_node(0, x=0, y=0) + network.add_node(1, x=0, y=1) + # recreate the edge using intermediate points - + connection_node_keys_per_edge, recreated_edges = gis_mod.recreate_edges( - network=network, - points={ - edge_key: [Point(0,0.2),Point(0,0.8)] - } - ) - + network=network, points={edge_key: [Point(0, 0.2), Point(0, 0.8)]} + ) + # verify if the edge was recreated assert len(recreated_edges) == 1 assert edge_key in recreated_edges @@ -2043,88 +2006,82 @@ class TestGisModify: assert network.has_node(connection_node_keys_per_edge[edge_key][1]) # verify that the replacing edges exist assert network.has_edge( - u=edge_key[0], - v=connection_node_keys_per_edge[edge_key][0] - ) + u=edge_key[0], v=connection_node_keys_per_edge[edge_key][0] + ) assert network.has_edge( u=connection_node_keys_per_edge[edge_key][0], v=connection_node_keys_per_edge[edge_key][1], - ) + ) assert network.has_edge( u=connection_node_keys_per_edge[edge_key][1], - v=edge_key[1], - ) + v=edge_key[1], + ) # make sure the geometries make sense assert isclose( length( network.edges[ (edge_key[0], connection_node_keys_per_edge[edge_key][0], 0) - ]['geometry'] - ) - + - length( + ]["geometry"] + ) + + length( network.edges[ - (connection_node_keys_per_edge[edge_key][0], - connection_node_keys_per_edge[edge_key][1], - 0) - ]['geometry'] - ) - + - length( + ( + connection_node_keys_per_edge[edge_key][0], + connection_node_keys_per_edge[edge_key][1], + 0, + ) + ]["geometry"] + ) + + length( network.edges[ (connection_node_keys_per_edge[edge_key][1], edge_key[1], 0) - ]['geometry'] - ), - length(network.edges[edge_key]['geometry']), - abs_tol=1e-3 - ) - + ]["geometry"] + ), + length(network.edges[edge_key]["geometry"]), + abs_tol=1e-3, + ) + # verify the geometries edge_01 = (edge_key[0], connection_node_keys_per_edge[edge_key][0], 0) - edge_02 = (connection_node_keys_per_edge[edge_key][0], - connection_node_keys_per_edge[edge_key][1], - 0) + edge_02 = ( + connection_node_keys_per_edge[edge_key][0], + connection_node_keys_per_edge[edge_key][1], + 0, + ) edge_03 = (connection_node_keys_per_edge[edge_key][1], edge_key[1], 0) - assert tuple( - network.edges[edge_01]['geometry'].coords) == ((0,0),(0,0.2)) - assert tuple( - network.edges[edge_02]['geometry'].coords) == ( - (0,0.2),(0,0.5),(0,0.8)) - assert tuple( - network.edges[edge_03]['geometry'].coords) == ((0,0.8),(0,1)) - - # ********************************************************************* - + assert tuple(network.edges[edge_01]["geometry"].coords) == ((0, 0), (0, 0.2)) + assert tuple(network.edges[edge_02]["geometry"].coords) == ( + (0, 0.2), + (0, 0.5), + (0, 0.8), + ) + assert tuple(network.edges[edge_03]["geometry"].coords) == ((0, 0.8), (0, 1)) + + # ********************************************************************* + # ************************************************************************* # ************************************************************************* - + def test_recreate_edges_03(self): - # test using equidistant points - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" - + network.graph["crs"] = "EPSG:4326" + # create and add simple edge to the network - - line = LineString([(0,0),(0,0.5),(0,1)]) - edge_key = (0,1,0) - network.add_edge( - *edge_key, - geometry=line - ) - network.add_node(0,x=0,y=0) - network.add_node(1,x=0,y=1) - + + line = LineString([(0, 0), (0, 0.5), (0, 1)]) + edge_key = (0, 1, 0) + network.add_edge(*edge_key, geometry=line) + network.add_node(0, x=0, y=0) + network.add_node(1, x=0, y=1) + # recreate the edge using an intermediate point - + connection_node_keys_per_edge, recreated_edges = gis_mod.recreate_edges( - network=network, - points={ - edge_key: [Point(0,0.25),Point(0,0.75)] - } - ) - + network=network, points={edge_key: [Point(0, 0.25), Point(0, 0.75)]} + ) + # verify if the edge was recreated assert len(recreated_edges) == 1 assert edge_key in recreated_edges @@ -2134,85 +2091,78 @@ class TestGisModify: assert network.has_node(connection_node_keys_per_edge[edge_key][1]) # verify that the replacing edges exist assert network.has_edge( - u=edge_key[0], - v=connection_node_keys_per_edge[edge_key][0] - ) + u=edge_key[0], v=connection_node_keys_per_edge[edge_key][0] + ) assert network.has_edge( u=connection_node_keys_per_edge[edge_key][0], v=connection_node_keys_per_edge[edge_key][1], - ) + ) assert network.has_edge( u=connection_node_keys_per_edge[edge_key][1], - v=edge_key[1], - ) + v=edge_key[1], + ) # make sure the geometries make sense assert isclose( length( network.edges[ (edge_key[0], connection_node_keys_per_edge[edge_key][0], 0) - ]['geometry'] - ) - + - length( + ]["geometry"] + ) + + length( network.edges[ - (connection_node_keys_per_edge[edge_key][0], - connection_node_keys_per_edge[edge_key][1], - 0) - ]['geometry'] - ) - + - length( + ( + connection_node_keys_per_edge[edge_key][0], + connection_node_keys_per_edge[edge_key][1], + 0, + ) + ]["geometry"] + ) + + length( network.edges[ (connection_node_keys_per_edge[edge_key][1], edge_key[1], 0) - ]['geometry'] - ), - length(network.edges[edge_key]['geometry']), - abs_tol=1e-3 - ) - - + ]["geometry"] + ), + length(network.edges[edge_key]["geometry"]), + abs_tol=1e-3, + ) + # verify the geometries edge_01 = (edge_key[0], connection_node_keys_per_edge[edge_key][0], 0) - edge_02 = (connection_node_keys_per_edge[edge_key][0], - connection_node_keys_per_edge[edge_key][1], - 0) + edge_02 = ( + connection_node_keys_per_edge[edge_key][0], + connection_node_keys_per_edge[edge_key][1], + 0, + ) edge_03 = (connection_node_keys_per_edge[edge_key][1], edge_key[1], 0) - assert tuple( - network.edges[edge_01]['geometry'].coords) == ((0,0),(0,0.25)) - assert tuple( - network.edges[edge_02]['geometry'].coords) == ( - (0,0.25),(0,0.5),(0,0.75)) - assert tuple( - network.edges[edge_03]['geometry'].coords) == ((0,0.75),(0,1)) - - # ********************************************************************* - + assert tuple(network.edges[edge_01]["geometry"].coords) == ((0, 0), (0, 0.25)) + assert tuple(network.edges[edge_02]["geometry"].coords) == ( + (0, 0.25), + (0, 0.5), + (0, 0.75), + ) + assert tuple(network.edges[edge_03]["geometry"].coords) == ((0, 0.75), (0, 1)) + + # ********************************************************************* + # ************************************************************************* # ************************************************************************* - + def test_recreate_edges_04(self): - # test using start points - - network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" - line = LineString([(0,0),(0,1)]) - edge_key = (0,1,0) - network.add_edge( - *edge_key, - geometry=line - ) - network.add_node(0,x=0,y=0) - network.add_node(1,x=0,y=1) + + network = nx.MultiDiGraph() + network.graph["crs"] = "EPSG:4326" + line = LineString([(0, 0), (0, 1)]) + edge_key = (0, 1, 0) + network.add_edge(*edge_key, geometry=line) + network.add_node(0, x=0, y=0) + network.add_node(1, x=0, y=1) connection_node_keys_per_edge, recreated_edges = gis_mod.recreate_edges( - network=network, - points={ - edge_key: [Point(0,0)] - } - ) - + network=network, points={edge_key: [Point(0, 0)]} + ) + # verify that the edge was not recreated - assert len(recreated_edges) == 0 # it should not be recreated + assert len(recreated_edges) == 0 # it should not be recreated assert edge_key not in recreated_edges # verify that the connection nodes exist assert len(connection_node_keys_per_edge[edge_key]) == 1 @@ -2220,42 +2170,34 @@ class TestGisModify: assert edge_key[0] == connection_node_keys_per_edge[edge_key][0] # verify that there are not replacing edges assert not network.has_edge( - u=edge_key[0], - v=connection_node_keys_per_edge[edge_key][0] - ) + u=edge_key[0], v=connection_node_keys_per_edge[edge_key][0] + ) assert network.has_edge( u=connection_node_keys_per_edge[edge_key][0], - v=edge_key[1], - ) - + v=edge_key[1], + ) + # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_recreate_edges_05(self): - # test using end points - - network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" - line = LineString([(0,0),(0,1)]) - edge_key = (0,1,0) - network.add_edge( - *edge_key, - geometry=line - ) - network.add_node(0,x=0,y=0) - network.add_node(1,x=0,y=1) + + network = nx.MultiDiGraph() + network.graph["crs"] = "EPSG:4326" + line = LineString([(0, 0), (0, 1)]) + edge_key = (0, 1, 0) + network.add_edge(*edge_key, geometry=line) + network.add_node(0, x=0, y=0) + network.add_node(1, x=0, y=1) connection_node_keys_per_edge, recreated_edges = gis_mod.recreate_edges( - network=network, - points={ - edge_key: [Point(0,1)] - } - ) - + network=network, points={edge_key: [Point(0, 1)]} + ) + # verify that the edge was not recreated - assert len(recreated_edges) == 0 # it should not be recreated + assert len(recreated_edges) == 0 # it should not be recreated assert edge_key not in recreated_edges # verify that the connection nodes exist assert len(connection_node_keys_per_edge[edge_key]) == 1 @@ -2263,42 +2205,34 @@ class TestGisModify: assert edge_key[1] == connection_node_keys_per_edge[edge_key][0] # verify that there are no replacing edges assert network.has_edge( - u=edge_key[0], - v=connection_node_keys_per_edge[edge_key][0] - ) + u=edge_key[0], v=connection_node_keys_per_edge[edge_key][0] + ) assert not network.has_edge( u=connection_node_keys_per_edge[edge_key][0], - v=edge_key[1], - ) - + v=edge_key[1], + ) + # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_recreate_edges_06(self): - # test using multiple start points - - network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" - line = LineString([(0,0),(0,1)]) - edge_key = (0,1,0) - network.add_edge( - *edge_key, - geometry=line - ) - network.add_node(0,x=0,y=0) - network.add_node(1,x=0,y=1) + + network = nx.MultiDiGraph() + network.graph["crs"] = "EPSG:4326" + line = LineString([(0, 0), (0, 1)]) + edge_key = (0, 1, 0) + network.add_edge(*edge_key, geometry=line) + network.add_node(0, x=0, y=0) + network.add_node(1, x=0, y=1) connection_node_keys_per_edge, recreated_edges = gis_mod.recreate_edges( - network=network, - points={ - edge_key: [Point(0,0),Point(0,0)] - } - ) - + network=network, points={edge_key: [Point(0, 0), Point(0, 0)]} + ) + # verify that the edge was not recreated - assert len(recreated_edges) == 0 # it should not be recreated + assert len(recreated_edges) == 0 # it should not be recreated assert edge_key not in recreated_edges # verify that the connection nodes exist assert len(connection_node_keys_per_edge[edge_key]) == 2 @@ -2310,50 +2244,41 @@ class TestGisModify: assert edge_key[0] == connection_node_keys_per_edge[edge_key][1] # verify that there are not replacing edges assert not network.has_edge( - u=edge_key[0], - v=connection_node_keys_per_edge[edge_key][0] - ) + u=edge_key[0], v=connection_node_keys_per_edge[edge_key][0] + ) assert not network.has_edge( - u=edge_key[0], - v=connection_node_keys_per_edge[edge_key][1] - ) + u=edge_key[0], v=connection_node_keys_per_edge[edge_key][1] + ) assert network.has_edge( u=connection_node_keys_per_edge[edge_key][0], - v=edge_key[1], - ) + v=edge_key[1], + ) assert network.has_edge( u=connection_node_keys_per_edge[edge_key][1], - v=edge_key[1], - ) - + v=edge_key[1], + ) + # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_recreate_edges_07(self): - # test using multiple end points - - network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" - line = LineString([(0,0),(0,1)]) - edge_key = (0,1,0) - network.add_edge( - *edge_key, - geometry=line - ) - network.add_node(0,x=0,y=0) - network.add_node(1,x=0,y=1) + + network = nx.MultiDiGraph() + network.graph["crs"] = "EPSG:4326" + line = LineString([(0, 0), (0, 1)]) + edge_key = (0, 1, 0) + network.add_edge(*edge_key, geometry=line) + network.add_node(0, x=0, y=0) + network.add_node(1, x=0, y=1) connection_node_keys_per_edge, recreated_edges = gis_mod.recreate_edges( - network=network, - points={ - edge_key: [Point(0,1),Point(0,1)] - } - ) - + network=network, points={edge_key: [Point(0, 1), Point(0, 1)]} + ) + # verify that the edge was not recreated - assert len(recreated_edges) == 0 # it should not be recreated + assert len(recreated_edges) == 0 # it should not be recreated assert edge_key not in recreated_edges # verify that the connection nodes exist assert len(connection_node_keys_per_edge[edge_key]) == 2 @@ -2363,59 +2288,51 @@ class TestGisModify: assert edge_key[1] == connection_node_keys_per_edge[edge_key][1] # verify that there are no replacing edges assert network.has_edge( - u=edge_key[0], - v=connection_node_keys_per_edge[edge_key][0] - ) + u=edge_key[0], v=connection_node_keys_per_edge[edge_key][0] + ) assert network.has_edge( - u=edge_key[0], - v=connection_node_keys_per_edge[edge_key][1] - ) + u=edge_key[0], v=connection_node_keys_per_edge[edge_key][1] + ) assert not network.has_edge( u=connection_node_keys_per_edge[edge_key][0], - v=edge_key[1], - ) + v=edge_key[1], + ) assert not network.has_edge( u=connection_node_keys_per_edge[edge_key][1], - v=edge_key[1], - ) - + v=edge_key[1], + ) + # ********************************************************************* - + def test_recreate_edges_08(self): - # test using geometries that do not match the edge declaration - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" - + network.graph["crs"] = "EPSG:4326" + # create and add simple edge to the network - - line = LineString([(0,1),(0,0.5),(0,0)]) - - edge_key = (0,1,0) + + line = LineString([(0, 1), (0, 0.5), (0, 0)]) + + edge_key = (0, 1, 0) edge_dict = { - 'geometry':line, - 'reversed':False, - 'oneway':True, - 'osmid': 3, - 'length': length(line) - } - network.add_edge( - *edge_key, - **edge_dict - ) - network.add_node(0,x=0,y=0) - network.add_node(1,x=0,y=1) - + "geometry": line, + "reversed": False, + "oneway": True, + "osmid": 3, + "length": length(line), + } + network.add_edge(*edge_key, **edge_dict) + network.add_node(0, x=0, y=0) + network.add_node(1, x=0, y=1) + # recreate the edge using intermediate points - + connection_node_keys_per_edge, recreated_edges = gis_mod.recreate_edges( network=network, - points={ - edge_key: [Point(0,0),Point(0,0.2),Point(0,0.8),Point(0,1)] - } - ) - + points={edge_key: [Point(0, 0), Point(0, 0.2), Point(0, 0.8), Point(0, 1)]}, + ) + # verify if the edge was recreated assert len(recreated_edges) == 1 assert edge_key in recreated_edges @@ -2432,112 +2349,124 @@ class TestGisModify: assert edge_key[1] == connection_node_keys_per_edge[edge_key][3] # the start node connects to the second point assert network.has_edge( - u=edge_key[0], - v=connection_node_keys_per_edge[edge_key][1] - ) + u=edge_key[0], v=connection_node_keys_per_edge[edge_key][1] + ) # the second point connects to the third assert network.has_edge( - u=connection_node_keys_per_edge[edge_key][1], - v=connection_node_keys_per_edge[edge_key][2] - ) + u=connection_node_keys_per_edge[edge_key][1], + v=connection_node_keys_per_edge[edge_key][2], + ) # the third point connects to the end node assert network.has_edge( - u=connection_node_keys_per_edge[edge_key][2], - v=edge_key[1] - ) + u=connection_node_keys_per_edge[edge_key][2], v=edge_key[1] + ) # make sure the geometries make sense assert network.edges[ (edge_key[0], connection_node_keys_per_edge[edge_key][1], 0) - ]['geometry'] == LineString([(0,0),(0,0.2)]) + ]["geometry"] == LineString([(0, 0), (0, 0.2)]) assert network.edges[ - (connection_node_keys_per_edge[edge_key][1], - connection_node_keys_per_edge[edge_key][2], - 0) - ]['geometry'] == LineString([(0,0.2),(0,0.5),(0,0.8)]) + ( + connection_node_keys_per_edge[edge_key][1], + connection_node_keys_per_edge[edge_key][2], + 0, + ) + ]["geometry"] == LineString([(0, 0.2), (0, 0.5), (0, 0.8)]) assert network.edges[ (connection_node_keys_per_edge[edge_key][2], edge_key[1], 0) - ]['geometry'] == LineString([(0,0.8),(0,1)]) + ]["geometry"] == LineString([(0, 0.8), (0, 1)]) # make sure the lengths add up - assert isclose( - network.edges[ - (edge_key[0], connection_node_keys_per_edge[edge_key][1], 0) - ]['length'], - gis_calc.great_circle_distance_along_path(LineString([(0,0),(0,0.2)])), - abs_tol=2e-3 - ) - assert isclose( + assert isclose( + network.edges[(edge_key[0], connection_node_keys_per_edge[edge_key][1], 0)][ + "length" + ], + gis_calc.great_circle_distance_along_path(LineString([(0, 0), (0, 0.2)])), + abs_tol=2e-3, + ) + assert isclose( network.edges[ - (connection_node_keys_per_edge[edge_key][1], - connection_node_keys_per_edge[edge_key][2], - 0)]['length'], + ( + connection_node_keys_per_edge[edge_key][1], + connection_node_keys_per_edge[edge_key][2], + 0, + ) + ]["length"], gis_calc.great_circle_distance_along_path( - LineString([(0,0.2),(0,0.5),(0,0.8)]) - ), - abs_tol=2e-3 - ) - assert isclose( - network.edges[ - (connection_node_keys_per_edge[edge_key][2], edge_key[1], 0)]['length'], - gis_calc.great_circle_distance_along_path( - LineString([(0,0.8),(0,1)]) - ), - abs_tol=2e-3 - ) + LineString([(0, 0.2), (0, 0.5), (0, 0.8)]) + ), + abs_tol=2e-3, + ) + assert isclose( + network.edges[(connection_node_keys_per_edge[edge_key][2], edge_key[1], 0)][ + "length" + ], + gis_calc.great_circle_distance_along_path(LineString([(0, 0.8), (0, 1)])), + abs_tol=2e-3, + ) # make sure the edges have the original 'reversed' value for key in osm.KEYS_OSMNX_EDGES_ESSENTIAL: if key == osm.KEY_OSMNX_GEOMETRY or key == osm.KEY_OSMNX_LENGTH: continue - assert network.edges[ - (edge_key[0], connection_node_keys_per_edge[edge_key][1], 0) - ][key] == edge_dict[key] - assert network.edges[ - (connection_node_keys_per_edge[edge_key][1], - connection_node_keys_per_edge[edge_key][2], - 0)][key] == edge_dict[key] - assert network.edges[ - (connection_node_keys_per_edge[edge_key][2], edge_key[1], 0) - ][key] == edge_dict[key] - + assert ( + network.edges[ + (edge_key[0], connection_node_keys_per_edge[edge_key][1], 0) + ][key] + == edge_dict[key] + ) + assert ( + network.edges[ + ( + connection_node_keys_per_edge[edge_key][1], + connection_node_keys_per_edge[edge_key][2], + 0, + ) + ][key] + == edge_dict[key] + ) + assert ( + network.edges[ + (connection_node_keys_per_edge[edge_key][2], edge_key[1], 0) + ][key] + == edge_dict[key] + ) + # verify the geometries edge_01 = (edge_key[0], connection_node_keys_per_edge[edge_key][1], 0) - edge_02 = (connection_node_keys_per_edge[edge_key][1], - connection_node_keys_per_edge[edge_key][2], - 0) + edge_02 = ( + connection_node_keys_per_edge[edge_key][1], + connection_node_keys_per_edge[edge_key][2], + 0, + ) edge_03 = (connection_node_keys_per_edge[edge_key][2], edge_key[1], 0) - assert tuple( - network.edges[edge_01]['geometry'].coords) == ((0,0),(0,0.2)) - assert tuple( - network.edges[edge_02]['geometry'].coords) == ( - (0,0.2),(0,0.5),(0,0.8)) - assert tuple( - network.edges[edge_03]['geometry'].coords) == ((0,0.8),(0,1)) - # ********************************************************************* - + assert tuple(network.edges[edge_01]["geometry"].coords) == ((0, 0), (0, 0.2)) + assert tuple(network.edges[edge_02]["geometry"].coords) == ( + (0, 0.2), + (0, 0.5), + (0, 0.8), + ) + assert tuple(network.edges[edge_03]["geometry"].coords) == ((0, 0.8), (0, 1)) + # ********************************************************************* + def test_recreate_edges_09(self): - # test using reversed geometries and multiple start points - + network = nx.MultiDiGraph() - network.graph['crs'] = "EPSG:4326" - + network.graph["crs"] = "EPSG:4326" + # create and add simple edge to the network - - line = LineString([(0,1),(0,0)]) - - edge_key = (0,1,0) + + line = LineString([(0, 1), (0, 0)]) + + edge_key = (0, 1, 0) edge_dict = { - 'geometry':line, - 'reversed':True, - 'oneway':True, - 'osmid': 3, - 'length': length(line) - } - network.add_edge( - *edge_key, - **edge_dict - ) - network.add_node(0,x=0,y=0) - network.add_node(1,x=0,y=1) + "geometry": line, + "reversed": True, + "oneway": True, + "osmid": 3, + "length": length(line), + } + network.add_edge(*edge_key, **edge_dict) + network.add_node(0, x=0, y=0) + network.add_node(1, x=0, y=1) # edge_key = (0,1,1) # network.add_edge( # *edge_key, @@ -2548,22 +2477,22 @@ class TestGisModify: # length=length(line) # ) # edge_key = (0,1,0) - + # recreate the edge using an intermediate point - + connection_node_keys_per_edge, recreated_edges = gis_mod.recreate_edges( network=network, points={ edge_key: [ - Point(0,0.2), - Point(0,0), - Point(0,0.5), - Point(0,0.8), - Point(0,0), - ] - } - ) - + Point(0, 0.2), + Point(0, 0), + Point(0, 0.5), + Point(0, 0.8), + Point(0, 0), + ] + }, + ) + # verify if the edge was recreated assert len(recreated_edges) == 1 assert edge_key in recreated_edges @@ -2581,652 +2510,587 @@ class TestGisModify: assert edge_key[0] == connection_node_keys_per_edge[edge_key][4] # the start node connects to the first point assert network.has_edge( - u=edge_key[0], - v=connection_node_keys_per_edge[edge_key][0] - ) + u=edge_key[0], v=connection_node_keys_per_edge[edge_key][0] + ) # the first point connects to the third assert network.has_edge( - u=connection_node_keys_per_edge[edge_key][0], - v=connection_node_keys_per_edge[edge_key][2] - ) + u=connection_node_keys_per_edge[edge_key][0], + v=connection_node_keys_per_edge[edge_key][2], + ) # the third point connects to the fourth node assert network.has_edge( - u=connection_node_keys_per_edge[edge_key][2], - v=connection_node_keys_per_edge[edge_key][3] - ) + u=connection_node_keys_per_edge[edge_key][2], + v=connection_node_keys_per_edge[edge_key][3], + ) # the fourth point connects to the end node assert network.has_edge( - u=connection_node_keys_per_edge[edge_key][3], - v=edge_key[1] - ) + u=connection_node_keys_per_edge[edge_key][3], v=edge_key[1] + ) # make sure the geometries make sense assert network.edges[ (edge_key[0], connection_node_keys_per_edge[edge_key][0], 0) - ]['geometry'] == LineString([(0,0),(0,0.2)]) + ]["geometry"] == LineString([(0, 0), (0, 0.2)]) assert network.edges[ - (connection_node_keys_per_edge[edge_key][0], - connection_node_keys_per_edge[edge_key][2], - 0) - ]['geometry'] == LineString([(0,0.2),(0,0.5)]) + ( + connection_node_keys_per_edge[edge_key][0], + connection_node_keys_per_edge[edge_key][2], + 0, + ) + ]["geometry"] == LineString([(0, 0.2), (0, 0.5)]) assert network.edges[ - (connection_node_keys_per_edge[edge_key][2], - connection_node_keys_per_edge[edge_key][3], - 0) - ]['geometry'] == LineString([(0,0.5),(0,0.8)]) + ( + connection_node_keys_per_edge[edge_key][2], + connection_node_keys_per_edge[edge_key][3], + 0, + ) + ]["geometry"] == LineString([(0, 0.5), (0, 0.8)]) assert network.edges[ (connection_node_keys_per_edge[edge_key][3], edge_key[1], 0) - ]['geometry'] == LineString([(0,0.8),(0,1)]) + ]["geometry"] == LineString([(0, 0.8), (0, 1)]) # make sure the lengths add up - assert isclose( - network.edges[ - (edge_key[0], connection_node_keys_per_edge[edge_key][0], 0) - ]['length'], - gis_calc.great_circle_distance_along_path(LineString([(0,0),(0,0.2)])), - abs_tol=2e-3 - ) - assert isclose( - network.edges[ - (connection_node_keys_per_edge[edge_key][0], - connection_node_keys_per_edge[edge_key][2], - 0)]['length'], - gis_calc.great_circle_distance_along_path( - LineString([(0,0.2),(0,0.5)]) - ), - abs_tol=2e-3 - ) - assert isclose( + assert isclose( + network.edges[(edge_key[0], connection_node_keys_per_edge[edge_key][0], 0)][ + "length" + ], + gis_calc.great_circle_distance_along_path(LineString([(0, 0), (0, 0.2)])), + abs_tol=2e-3, + ) + assert isclose( network.edges[ - (connection_node_keys_per_edge[edge_key][2], - connection_node_keys_per_edge[edge_key][3], - 0)]['length'], - gis_calc.great_circle_distance_along_path( - LineString([(0,0.5),(0,0.8)]) - ), - abs_tol=2e-3 - ) - assert isclose( + ( + connection_node_keys_per_edge[edge_key][0], + connection_node_keys_per_edge[edge_key][2], + 0, + ) + ]["length"], + gis_calc.great_circle_distance_along_path(LineString([(0, 0.2), (0, 0.5)])), + abs_tol=2e-3, + ) + assert isclose( network.edges[ - (connection_node_keys_per_edge[edge_key][3], edge_key[1], 0)]['length'], - gis_calc.great_circle_distance_along_path( - LineString([(0,0.8),(0,1)]) - ), - abs_tol=2e-3 - ) + ( + connection_node_keys_per_edge[edge_key][2], + connection_node_keys_per_edge[edge_key][3], + 0, + ) + ]["length"], + gis_calc.great_circle_distance_along_path(LineString([(0, 0.5), (0, 0.8)])), + abs_tol=2e-3, + ) + assert isclose( + network.edges[(connection_node_keys_per_edge[edge_key][3], edge_key[1], 0)][ + "length" + ], + gis_calc.great_circle_distance_along_path(LineString([(0, 0.8), (0, 1)])), + abs_tol=2e-3, + ) # verify the geometries edge_01 = (edge_key[0], connection_node_keys_per_edge[edge_key][0], 0) - edge_02 = (connection_node_keys_per_edge[edge_key][0], - connection_node_keys_per_edge[edge_key][2], - 0) - edge_03 = (connection_node_keys_per_edge[edge_key][2], - connection_node_keys_per_edge[edge_key][3], - 0) + edge_02 = ( + connection_node_keys_per_edge[edge_key][0], + connection_node_keys_per_edge[edge_key][2], + 0, + ) + edge_03 = ( + connection_node_keys_per_edge[edge_key][2], + connection_node_keys_per_edge[edge_key][3], + 0, + ) edge_04 = (connection_node_keys_per_edge[edge_key][3], edge_key[1], 0) - assert tuple( - network.edges[edge_01]['geometry'].coords) == ((0,0),(0,0.2)) - assert tuple( - network.edges[edge_02]['geometry'].coords) == ((0,0.2),(0,0.5)) - assert tuple( - network.edges[edge_03]['geometry'].coords) == ((0,0.5),(0,0.8)) - assert tuple( - network.edges[edge_04]['geometry'].coords) == ((0,0.8),(0,1)) - + assert tuple(network.edges[edge_01]["geometry"].coords) == ((0, 0), (0, 0.2)) + assert tuple(network.edges[edge_02]["geometry"].coords) == ((0, 0.2), (0, 0.5)) + assert tuple(network.edges[edge_03]["geometry"].coords) == ((0, 0.5), (0, 0.8)) + assert tuple(network.edges[edge_04]["geometry"].coords) == ((0, 0.8), (0, 1)) + # make sure the edges have the original 'reversed' value for key in osm.KEYS_OSMNX_EDGES_ESSENTIAL: if key == osm.KEY_OSMNX_GEOMETRY or key == osm.KEY_OSMNX_LENGTH: continue - assert network.edges[ - (edge_key[0], connection_node_keys_per_edge[edge_key][0], 0) - ][key] == edge_dict[key] - assert network.edges[ - (connection_node_keys_per_edge[edge_key][0], - connection_node_keys_per_edge[edge_key][2], - 0)][key] == edge_dict[key] - assert network.edges[ - (connection_node_keys_per_edge[edge_key][2], - connection_node_keys_per_edge[edge_key][3], - 0)][key] == edge_dict[key] - assert network.edges[ - (connection_node_keys_per_edge[edge_key][3], edge_key[1], 0) - ][key] == edge_dict[key] - + assert ( + network.edges[ + (edge_key[0], connection_node_keys_per_edge[edge_key][0], 0) + ][key] + == edge_dict[key] + ) + assert ( + network.edges[ + ( + connection_node_keys_per_edge[edge_key][0], + connection_node_keys_per_edge[edge_key][2], + 0, + ) + ][key] + == edge_dict[key] + ) + assert ( + network.edges[ + ( + connection_node_keys_per_edge[edge_key][2], + connection_node_keys_per_edge[edge_key][3], + 0, + ) + ][key] + == edge_dict[key] + ) + assert ( + network.edges[ + (connection_node_keys_per_edge[edge_key][3], edge_key[1], 0) + ][key] + == edge_dict[key] + ) + # ************************************************************************* # ************************************************************************* - + def test_split_linestring(self): - # ********************************************************************* # ********************************************************************* - + # simple line split by a point - - line_coords = tuple([(0.0,0.0),(0.0,1.0)]) + + line_coords = tuple([(0.0, 0.0), (0.0, 1.0)]) line = LineString(line_coords) - + segments, close_to_start, close_to_end = gis_mod.split_linestring( - line=line, - points=[Point(0,0.5)] - ) - + line=line, points=[Point(0, 0.5)] + ) + assert len(segments) == 2 - assert repr(tuple(segments[0].coords)) == repr(((0.0,0.0),(0.0,0.5))) - assert repr(tuple(segments[1].coords)) == repr(((0.0,0.5),(0.0,1.0))) - + assert repr(tuple(segments[0].coords)) == repr(((0.0, 0.0), (0.0, 0.5))) + assert repr(tuple(segments[1].coords)) == repr(((0.0, 0.5), (0.0, 1.0))) + # simple line split by a point slightly off the line - - line_coords = tuple([(0.0,0.0),(0.0,1.0)]) + + line_coords = tuple([(0.0, 0.0), (0.0, 1.0)]) line = LineString(line_coords) - - segments, _, _ = gis_mod.split_linestring( - line=line, - points=[Point(0.1,0.5)] - ) - + + segments, _, _ = gis_mod.split_linestring(line=line, points=[Point(0.1, 0.5)]) + assert len(segments) == 2 - assert repr(tuple(segments[0].coords)) == repr(((0.0,0.0),(0.1,0.5))) - assert repr(tuple(segments[1].coords)) == repr(((0.1,0.5),(0.0,1.0))) - + assert repr(tuple(segments[0].coords)) == repr(((0.0, 0.0), (0.1, 0.5))) + assert repr(tuple(segments[1].coords)) == repr(((0.1, 0.5), (0.0, 1.0))) + # ********************************************************************* # ********************************************************************* - + # simple line split by two points - - line_coords = tuple([(0.0,0.0),(0.0,1.0),(0.0,2.0)]) + + line_coords = tuple([(0.0, 0.0), (0.0, 1.0), (0.0, 2.0)]) line = LineString(line_coords) - + segments, _, _ = gis_mod.split_linestring( - line=line, - points=[Point(0,0.5),Point(0,1.5)] - ) - + line=line, points=[Point(0, 0.5), Point(0, 1.5)] + ) + assert len(segments) == 3 - assert repr(tuple(segments[0].coords)) == repr(((0.0,0.0),(0.0,0.5))) + assert repr(tuple(segments[0].coords)) == repr(((0.0, 0.0), (0.0, 0.5))) assert repr(tuple(segments[1].coords)) == repr( - ((0.0,0.5),(0.0,1.0),(0.0,1.5)) - ) - assert repr(tuple(segments[2].coords)) == repr(((0.0,1.5),(0.0,2.0))) - + ((0.0, 0.5), (0.0, 1.0), (0.0, 1.5)) + ) + assert repr(tuple(segments[2].coords)) == repr(((0.0, 1.5), (0.0, 2.0))) + # simple line split by two points slightly off the line - - line_coords = tuple([(0.0,0.0),(0.0,1.0),(0.0,2.0)]) + + line_coords = tuple([(0.0, 0.0), (0.0, 1.0), (0.0, 2.0)]) line = LineString(line_coords) - + segments, _, _ = gis_mod.split_linestring( - line=line, - points=[Point(-0.1,0.5),Point(0.2,1.5)] - ) - + line=line, points=[Point(-0.1, 0.5), Point(0.2, 1.5)] + ) + assert len(segments) == 3 - assert repr(tuple(segments[0].coords)) == repr(((0.0,0.0),(-0.1,0.5))) + assert repr(tuple(segments[0].coords)) == repr(((0.0, 0.0), (-0.1, 0.5))) assert repr(tuple(segments[1].coords)) == repr( - ((-0.1,0.5),(0.0,1.0),(0.2,1.5)) - ) - assert repr(tuple(segments[2].coords)) == repr(((0.2,1.5),(0.0,2.0))) - + ((-0.1, 0.5), (0.0, 1.0), (0.2, 1.5)) + ) + assert repr(tuple(segments[2].coords)) == repr(((0.2, 1.5), (0.0, 2.0))) + # ********************************************************************* # ********************************************************************* - + # simple line split by a point matching the start point - - line_coords = tuple([(0.0,0.0),(0.0,1.0)]) + + line_coords = tuple([(0.0, 0.0), (0.0, 1.0)]) line = LineString(line_coords) - + segments, _start, _end = gis_mod.split_linestring( - line=line, - points=[Point(0.0,0.0)] - ) - + line=line, points=[Point(0.0, 0.0)] + ) + assert len(segments) == 0 assert len(_start) == 1 assert len(_end) == 0 assert repr(_start) == repr([0]) assert repr(_end) == repr([]) - + # ********************************************************************* # ********************************************************************* - + # simple line split by a point matching the end point - - line_coords = tuple([(0.0,0.0),(0.0,1.0)]) + + line_coords = tuple([(0.0, 0.0), (0.0, 1.0)]) line = LineString(line_coords) - + segments, _start, _end = gis_mod.split_linestring( - line=line, - points=[Point(0.0,1.0)] - ) - + line=line, points=[Point(0.0, 1.0)] + ) + assert len(segments) == 0 assert len(_start) == 0 assert len(_end) == 1 assert repr(_start) == repr([]) assert repr(_end) == repr([0]) - + # ********************************************************************* # ********************************************************************* - + # simple line split by two points, both within the same segment (first) - - line_coords = tuple([(0.0,0.0),(0.0,1.0),(0.0,2.0)]) + + line_coords = tuple([(0.0, 0.0), (0.0, 1.0), (0.0, 2.0)]) line = LineString(line_coords) - + segments, _, _ = gis_mod.split_linestring( - line=line, - points=[Point(0,0.25),Point(0,0.75)] - ) - + line=line, points=[Point(0, 0.25), Point(0, 0.75)] + ) + assert len(segments) == 3 - assert repr(tuple(segments[0].coords)) == repr(((0.0,0.0),(0.0,0.25))) - assert repr(tuple(segments[1].coords)) == repr(((0.0,0.25),(0.0,0.75))) + assert repr(tuple(segments[0].coords)) == repr(((0.0, 0.0), (0.0, 0.25))) + assert repr(tuple(segments[1].coords)) == repr(((0.0, 0.25), (0.0, 0.75))) assert repr(tuple(segments[2].coords)) == repr( - ((0.0,0.75),(0.0,1.0),(0.0,2.0)) - ) - + ((0.0, 0.75), (0.0, 1.0), (0.0, 2.0)) + ) + # simple line split by two points, both within the same segment (last) - - line_coords = tuple([(0.0,0.0),(0.0,1.0),(0.0,2.0)]) + + line_coords = tuple([(0.0, 0.0), (0.0, 1.0), (0.0, 2.0)]) line = LineString(line_coords) - + segments, _, _ = gis_mod.split_linestring( - line=line, - points=[Point(0.0,1.25),Point(0.0,1.75)] - ) - + line=line, points=[Point(0.0, 1.25), Point(0.0, 1.75)] + ) + assert len(segments) == 3 assert repr(tuple(segments[0].coords)) == repr( - ((0.0,0.0),(0.0,1.0),(0.0,1.25)) - ) - assert repr(tuple(segments[1].coords)) == repr(((0.0,1.25),(0.0,1.75))) - assert repr(tuple(segments[2].coords)) == repr(((0.0,1.75),(0.0,2.0))) - + ((0.0, 0.0), (0.0, 1.0), (0.0, 1.25)) + ) + assert repr(tuple(segments[1].coords)) == repr(((0.0, 1.25), (0.0, 1.75))) + assert repr(tuple(segments[2].coords)) == repr(((0.0, 1.75), (0.0, 2.0))) + # ********************************************************************* # ********************************************************************* - + # simple line split by two points, all within the same segment (first) - - line_coords = tuple([(0.0,0.0),(0.0,1.0),(0.0,2.0)]) + + line_coords = tuple([(0.0, 0.0), (0.0, 1.0), (0.0, 2.0)]) line = LineString(line_coords) - + segments, _, _ = gis_mod.split_linestring( - line=line, - points=[Point(0,0.25),Point(0,0.5),Point(0,0.75)] - ) - + line=line, points=[Point(0, 0.25), Point(0, 0.5), Point(0, 0.75)] + ) + assert len(segments) == 4 - assert repr(tuple(segments[0].coords)) == repr(((0.0,0.0),(0.0,0.25))) - assert repr(tuple(segments[1].coords)) == repr(((0.0,0.25),(0.0,0.5))) - assert repr(tuple(segments[2].coords)) == repr(((0.0,0.5),(0.0,0.75))) + assert repr(tuple(segments[0].coords)) == repr(((0.0, 0.0), (0.0, 0.25))) + assert repr(tuple(segments[1].coords)) == repr(((0.0, 0.25), (0.0, 0.5))) + assert repr(tuple(segments[2].coords)) == repr(((0.0, 0.5), (0.0, 0.75))) assert repr(tuple(segments[3].coords)) == repr( - ((0.0,0.75),(0.0,1.0),(0.0,2.0)) - ) - + ((0.0, 0.75), (0.0, 1.0), (0.0, 2.0)) + ) + # simple line split by two points, all within the same segment (last) - - line_coords = tuple([(0.0,0.0),(0.0,1.0),(0.0,2.0)]) + + line_coords = tuple([(0.0, 0.0), (0.0, 1.0), (0.0, 2.0)]) line = LineString(line_coords) - + segments, _, _ = gis_mod.split_linestring( - line=line, - points=[Point(0.0,1.25),Point(0.0,1.5),Point(0.0,1.75)] - ) - + line=line, points=[Point(0.0, 1.25), Point(0.0, 1.5), Point(0.0, 1.75)] + ) + assert len(segments) == 4 assert repr(tuple(segments[0].coords)) == repr( - ((0.0,0.0),(0.0,1.0),(0.0,1.25)) - ) - assert repr(tuple(segments[1].coords)) == repr(((0.0,1.25),(0.0,1.5))) - assert repr(tuple(segments[2].coords)) == repr(((0.0,1.5),(0.0,1.75))) - assert repr(tuple(segments[3].coords)) == repr(((0.0,1.75),(0.0,2.0))) - + ((0.0, 0.0), (0.0, 1.0), (0.0, 1.25)) + ) + assert repr(tuple(segments[1].coords)) == repr(((0.0, 1.25), (0.0, 1.5))) + assert repr(tuple(segments[2].coords)) == repr(((0.0, 1.5), (0.0, 1.75))) + assert repr(tuple(segments[3].coords)) == repr(((0.0, 1.75), (0.0, 2.0))) + # ********************************************************************* # ********************************************************************* - + # simple line split by two points, both within the same segment (intermed.) - - line_coords = tuple([(0.0,0.0),(0.0,0.5),(0.0,1.5),(0.0,2.0)]) + + line_coords = tuple([(0.0, 0.0), (0.0, 0.5), (0.0, 1.5), (0.0, 2.0)]) line = LineString(line_coords) - + segments, _, _ = gis_mod.split_linestring( - line=line, - points=[Point(0,0.75),Point(0,1.25)] - ) - + line=line, points=[Point(0, 0.75), Point(0, 1.25)] + ) + assert len(segments) == 3 assert repr(tuple(segments[0].coords)) == repr( - ((0.0,0.0),(0.0,0.5),(0.0,0.75)) - ) - assert repr(tuple(segments[1].coords)) == repr(((0.0,0.75),(0.0,1.25))) + ((0.0, 0.0), (0.0, 0.5), (0.0, 0.75)) + ) + assert repr(tuple(segments[1].coords)) == repr(((0.0, 0.75), (0.0, 1.25))) assert repr(tuple(segments[2].coords)) == repr( - ((0.0,1.25),(0.0,1.5),(0.0,2.0)) - ) - + ((0.0, 1.25), (0.0, 1.5), (0.0, 2.0)) + ) + # simple line split by three points, all within the same segment (interme.) - - line_coords = tuple([(0.0,0.0),(0.0,0.5),(0.0,1.5),(0.0,2.0)]) + + line_coords = tuple([(0.0, 0.0), (0.0, 0.5), (0.0, 1.5), (0.0, 2.0)]) line = LineString(line_coords) - + segments, _, _ = gis_mod.split_linestring( - line=line, - points=[Point(0.0,0.75),Point(0.0,1.0),Point(0.0,1.25)] - ) - + line=line, points=[Point(0.0, 0.75), Point(0.0, 1.0), Point(0.0, 1.25)] + ) + assert len(segments) == 4 assert repr(tuple(segments[0].coords)) == repr( - ((0.0,0.0),(0.0,0.5),(0.0,0.75)) - ) - assert repr(tuple(segments[1].coords)) == repr(((0.0,0.75),(0.0,1.0))) - assert repr(tuple(segments[2].coords)) == repr(((0.0,1.0),(0.0,1.25))) + ((0.0, 0.0), (0.0, 0.5), (0.0, 0.75)) + ) + assert repr(tuple(segments[1].coords)) == repr(((0.0, 0.75), (0.0, 1.0))) + assert repr(tuple(segments[2].coords)) == repr(((0.0, 1.0), (0.0, 1.25))) assert repr(tuple(segments[3].coords)) == repr( - ((0.0,1.25),(0.0,1.5),(0.0,2.0)) - ) - + ((0.0, 1.25), (0.0, 1.5), (0.0, 2.0)) + ) + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_merge_points_into_linestring(self): - # ********************************************************************* # ********************************************************************* - + # simple line - - line_coords = tuple([(0.0,0.0),(0.0,1.0)]) + + line_coords = tuple([(0.0, 0.0), (0.0, 1.0)]) line = LineString(line_coords) - + _line, close_to_start, close_to_end = gis_mod.merge_points_into_linestring( - line, - points=[Point(0,0.5)] - ) - - assert repr(tuple(_line.coords)) == repr(((0.0,0.0),(0.0,0.5),(0.0,1.0))) + line, points=[Point(0, 0.5)] + ) + + assert repr(tuple(_line.coords)) == repr(((0.0, 0.0), (0.0, 0.5), (0.0, 1.0))) assert repr(close_to_start) == repr([]) assert repr(close_to_end) == repr([]) - + # ********************************************************************* # ********************************************************************* - + # redundant points # expected result: the points are ignored - - line_coords = tuple([(1.0,1.0),(2.0,2.0),(3.0,0.0)]) + + line_coords = tuple([(1.0, 1.0), (2.0, 2.0), (3.0, 0.0)]) line = LineString(line_coords) - - (new_line, - close_to_start, - close_to_end) = gis_mod.merge_points_into_linestring( - line, - points=[Point(1,1), Point(2,2), Point(3,0)] - ) - + + (new_line, close_to_start, close_to_end) = gis_mod.merge_points_into_linestring( + line, points=[Point(1, 1), Point(2, 2), Point(3, 0)] + ) + assert repr(line_coords) == repr(tuple(new_line.coords)) assert repr(close_to_start) == repr([0]) assert repr(close_to_end) == repr([2]) - + # redundant points, different order - - (new_line, - close_to_start, - close_to_end) = gis_mod.merge_points_into_linestring( - line, - points=[Point(3,0), Point(2,2), Point(1,1)] - ) - + + (new_line, close_to_start, close_to_end) = gis_mod.merge_points_into_linestring( + line, points=[Point(3, 0), Point(2, 2), Point(1, 1)] + ) + assert repr(line_coords) == repr(tuple(new_line.coords)) assert repr(close_to_start) == repr([2]) assert repr(close_to_end) == repr([0]) - + # redundant points, yet another order - - (new_line, - close_to_start, - close_to_end) = gis_mod.merge_points_into_linestring( - line, - points=[Point(2,2), Point(3,0), Point(1,1)] - ) - + + (new_line, close_to_start, close_to_end) = gis_mod.merge_points_into_linestring( + line, points=[Point(2, 2), Point(3, 0), Point(1, 1)] + ) + assert repr(line_coords) == repr(tuple(new_line.coords)) assert repr(close_to_start) == repr([2]) assert repr(close_to_end) == repr([1]) - + # ********************************************************************* # ********************************************************************* - + # new points, directly on the line # expected result: the new points appear on the geometry - - (new_line, - close_to_start, - close_to_end) = gis_mod.merge_points_into_linestring( - line, - points=[Point(1.2,1.2), Point(3,0), Point(1.5,1.5)], - ) - - assert ( - repr( - ((1.0, 1.0), (1.2, 1.2), (1.5, 1.5), (2.0, 2.0), (3.0, 0.0)) - ) - == repr(tuple(new_line.coords)) - ) + + (new_line, close_to_start, close_to_end) = gis_mod.merge_points_into_linestring( + line, + points=[Point(1.2, 1.2), Point(3, 0), Point(1.5, 1.5)], + ) + + assert repr( + ((1.0, 1.0), (1.2, 1.2), (1.5, 1.5), (2.0, 2.0), (3.0, 0.0)) + ) == repr(tuple(new_line.coords)) assert repr(close_to_start) == repr([]) assert repr(close_to_end) == repr([1]) - + # ********************************************************************* # ********************************************************************* - + # new points, extending beyond the line # expected result: the line is NOT extended - - (new_line, - close_to_start, - close_to_end) = gis_mod.merge_points_into_linestring( - line, + + (new_line, close_to_start, close_to_end) = gis_mod.merge_points_into_linestring( + line, points=[Point(0.5, 0.5), Point(3, 0), Point(4, -2)], - ) - + ) + assert repr(tuple(new_line.coords)) == repr(line_coords) assert repr(close_to_start) == repr([0]) - assert repr(close_to_end) == repr([1,2]) - - + assert repr(close_to_end) == repr([1, 2]) + # ********************************************************************* # ********************************************************************* - + # new points, not on the line # expected result: the new points appear on the geometry - - (new_line, - close_to_start, - close_to_end) = gis_mod.merge_points_into_linestring( - line, - points=[ - Point(0.5, 0.5), - Point(1.5, 1.75), - Point(2.5, 2.5), - Point(4, -2) - ], - ) - - assert ( - repr( - ((1.0, 1.0), (1.5, 1.75), (2.0, 2.0), (2.5, 2.5), (3.0, 0.0)) - ) == repr(tuple(new_line.coords)) - ) + + (new_line, close_to_start, close_to_end) = gis_mod.merge_points_into_linestring( + line, + points=[Point(0.5, 0.5), Point(1.5, 1.75), Point(2.5, 2.5), Point(4, -2)], + ) + + assert repr( + ((1.0, 1.0), (1.5, 1.75), (2.0, 2.0), (2.5, 2.5), (3.0, 0.0)) + ) == repr(tuple(new_line.coords)) assert repr(close_to_start) == repr([0]) assert repr(close_to_end) == repr([3]) - + # new points, not on the line, different order # expected result: the new points appear on the geometry - - (new_line, - close_to_start, - close_to_end) = gis_mod.merge_points_into_linestring( - line, - points=[ - Point(2.5, 2.5), - Point(0.5, 0.5), - Point(4, -2), - Point(1.5, 1.75) - ], - ) - - assert ( - repr( - ((1.0, 1.0), (1.5, 1.75), (2.0, 2.0), (2.5, 2.5), (3.0, 0.0)) - ) == repr(tuple(new_line.coords)) - ) + + (new_line, close_to_start, close_to_end) = gis_mod.merge_points_into_linestring( + line, + points=[Point(2.5, 2.5), Point(0.5, 0.5), Point(4, -2), Point(1.5, 1.75)], + ) + + assert repr( + ((1.0, 1.0), (1.5, 1.75), (2.0, 2.0), (2.5, 2.5), (3.0, 0.0)) + ) == repr(tuple(new_line.coords)) assert repr(close_to_start) == repr([1]) assert repr(close_to_end) == repr([2]) - + # ********************************************************************* # ********************************************************************* - + # new points, equidistant from start and end points, preference former - - line_coords = tuple([(0.0,0.0),(0.0,1.0),(1.0,1.0),(1.0,0.0)]) + + line_coords = tuple([(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)]) line = LineString(line_coords) - - (new_line, - close_to_start, - close_to_end) = gis_mod.merge_points_into_linestring( - line, - points=[ - Point(0.5, 0.0) - ], - ) - - assert ( - repr( - ((0.0,0.0),(0.0,1.0),(1.0,1.0),(1.0,0.0)) - ) == repr(tuple(new_line.coords)) - ) + + (new_line, close_to_start, close_to_end) = gis_mod.merge_points_into_linestring( + line, + points=[Point(0.5, 0.0)], + ) + + assert repr(((0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0))) == repr( + tuple(new_line.coords) + ) assert repr(close_to_start) == repr([0]) assert repr(close_to_end) == repr([]) - + # new points, equidistant from start and end points, preference latter - - (new_line, - close_to_start, - close_to_end) = gis_mod.merge_points_into_linestring( - line, - points=[ - Point(0.5, 0.0) - ], - use_start_point_equidistant=False - ) - - assert ( - repr( - ((0.0,0.0),(0.0,1.0),(1.0,1.0),(1.0,0.0)) - ) == repr(tuple(new_line.coords)) - ) + + (new_line, close_to_start, close_to_end) = gis_mod.merge_points_into_linestring( + line, points=[Point(0.5, 0.0)], use_start_point_equidistant=False + ) + + assert repr(((0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0))) == repr( + tuple(new_line.coords) + ) assert repr(close_to_start) == repr([]) assert repr(close_to_end) == repr([0]) - + # ********************************************************************* # ********************************************************************* - + # new points, equidistant from multiple segments - + # two segments - - line_coords = tuple([(0.0,0.0),(0.0,1.0),(1.0,1.0)]) + + line_coords = tuple([(0.0, 0.0), (0.0, 1.0), (1.0, 1.0)]) line = LineString(line_coords) - - (new_line, - close_to_start, - close_to_end) = gis_mod.merge_points_into_linestring( - line, - points=[ - Point(0.5, 0.5) - ], - ) - assert ( - repr( - ((0.0,0.0),(0.5,0.5),(0.0,1.0),(1.0,1.0)) - ) == repr(tuple(new_line.coords)) - ) + + (new_line, close_to_start, close_to_end) = gis_mod.merge_points_into_linestring( + line, + points=[Point(0.5, 0.5)], + ) + assert repr(((0.0, 0.0), (0.5, 0.5), (0.0, 1.0), (1.0, 1.0))) == repr( + tuple(new_line.coords) + ) assert repr(close_to_start) == repr([]) assert repr(close_to_end) == repr([]) - + # three segments - - line_coords = tuple([(0.0,0.0),(0.0,1.0),(1.0,1.0),(1.0,0.0)]) + + line_coords = tuple([(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)]) line = LineString(line_coords) - - (new_line, - close_to_start, - close_to_end) = gis_mod.merge_points_into_linestring( - line, - points=[ - Point(0.5, 0.5) - ], - ) - assert ( - repr( - ((0.0,0.0),(0.5,0.5),(0.0,1.0),(1.0,1.0),(1.0,0.0)) - ) == repr(tuple(new_line.coords)) - ) + + (new_line, close_to_start, close_to_end) = gis_mod.merge_points_into_linestring( + line, + points=[Point(0.5, 0.5)], + ) + assert repr( + ((0.0, 0.0), (0.5, 0.5), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)) + ) == repr(tuple(new_line.coords)) assert repr(close_to_start) == repr([]) assert repr(close_to_end) == repr([]) - + # four segments - + line_coords = tuple( - [(0.0,0.0),(0.0,1.0),(1.0,1.0),(1.0,0.0),(0.0,0.0)] - ) + [(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)] + ) line = LineString(line_coords) - - (new_line, - close_to_start, - close_to_end) = gis_mod.merge_points_into_linestring( - line, - points=[ - Point(0.5, 0.5) - ], - ) - assert ( - repr( - ((0.0,0.0),(0.5,0.5),(0.0,1.0),(1.0,1.0),(1.0,0.0),(0.0,0.0)) - ) == repr(tuple(new_line.coords)) - ) + + (new_line, close_to_start, close_to_end) = gis_mod.merge_points_into_linestring( + line, + points=[Point(0.5, 0.5)], + ) + assert repr( + ((0.0, 0.0), (0.5, 0.5), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)) + ) == repr(tuple(new_line.coords)) assert repr(close_to_start) == repr([]) assert repr(close_to_end) == repr([]) - + # ********************************************************************* # ********************************************************************* - + def test_merge_points_into_linestring_wo_fixed_extremities(self): - # simple line - line_coords = tuple([(0.0,0.0),(0.0,1.0)]) + line_coords = tuple([(0.0, 0.0), (0.0, 1.0)]) line = LineString(line_coords) - + error_raised = False try: - (_line, - close_to_start, - close_to_end) = gis_mod.merge_points_into_linestring( - line, - points=[Point(0,0.5)], - fixed_extremities=False - ) + ( + _line, + close_to_start, + close_to_end, + ) = gis_mod.merge_points_into_linestring( + line, points=[Point(0, 0.5)], fixed_extremities=False + ) except NotImplementedError: error_raised = True assert error_raised - + # ************************************************************************* # ************************************************************************* - + def test_modify_roundabouts_unprojected(self): - # network should be a OSM-nx formatted graph network = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', + (55.71654, 9.11728), + network_type="drive", custom_filter='["highway"~"residential|tertiary|unclassified|service"]', - truncate_by_edge=True - ) + truncate_by_edge=True, + ) # find all roundabouts roundabouts = gis_iden.find_roundabouts(network) # confirm they are roundabouts @@ -3236,25 +3100,23 @@ class TestGisModify: roundabouts.append([0, 1, 2]) # modify the roundabouts node_replacements = gis_mod.transform_roundabouts_into_crossroads( - network, - roundabouts - ) + network, roundabouts + ) # make sure the fake roundabout was detected assert type(node_replacements[-1]) == type(None) # TODO: test the conversion itself - + # ************************************************************************* # ************************************************************************* - + def test_modify_roundabouts_projected(self): - # network should be a OSM-nx formatted graph network = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', + (55.71654, 9.11728), + network_type="drive", custom_filter='["highway"~"residential|tertiary|unclassified|service"]', - truncate_by_edge=True - ) + truncate_by_edge=True, + ) # project the network network = ox.project_graph(G=network) # find all roundabouts @@ -3266,12 +3128,12 @@ class TestGisModify: roundabouts.append([0, 1, 2]) # modify the roundabouts node_replacements = gis_mod.transform_roundabouts_into_crossroads( - network, - roundabouts - ) + network, roundabouts + ) # make sure the fake roundabout was detected assert type(node_replacements[-1]) == type(None) # TODO: test the conversion itself - + + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/tests/test_gis_utils.py b/tests/test_gis_utils.py index d1d2761..0fb9059 100644 --- a/tests/test_gis_utils.py +++ b/tests/test_gis_utils.py @@ -24,1333 +24,1381 @@ ox.settings.use_cache = True # ***************************************************************************** # ***************************************************************************** + class TestGisUtils: - # ************************************************************************* # ************************************************************************* - + def test_examples(self): - # test io - - self.example_io_geodataframe( - preserve_original_gdf=True, identify_columns=False - ) - + + self.example_io_geodataframe(preserve_original_gdf=True, identify_columns=False) + self.example_io_geodataframe( preserve_original_gdf=False, identify_columns=False - ) - + ) + # example_io_geodataframe( # preserve_original_gdf=True, identify_columns=True # ) - + # example_io_geodataframe( # preserve_original_gdf=False, identify_columns=True # ) - + # TODO: handle GeoJSON files - + # example_io_geodataframe(preserve_original_gdf=True, # file_extension='.json') - + # example_io_geodataframe(preserve_original_gdf=False, # file_extension='.json') - + # ************************************************************************* # ************************************************************************* - + def test_identifying_entrances_simple_no_driveway(self): - # no driveway, all nodes - + self.example_identify_entrances_simple_no_driveway( AB_right_BC_wrong=True, create_reversed_edges=False, - focus_on_node_P_only=False) - + focus_on_node_P_only=False, + ) + self.example_identify_entrances_simple_no_driveway( AB_right_BC_wrong=False, create_reversed_edges=False, - focus_on_node_P_only=False) - + focus_on_node_P_only=False, + ) + self.example_identify_entrances_simple_no_driveway( AB_right_BC_wrong=True, create_reversed_edges=True, - focus_on_node_P_only=False) - + focus_on_node_P_only=False, + ) + self.example_identify_entrances_simple_no_driveway( AB_right_BC_wrong=False, create_reversed_edges=True, - focus_on_node_P_only=False) - + focus_on_node_P_only=False, + ) + # no driveway, all nodes, multiple addresses per edge - + self.example_identify_entrances_simple_no_driveway( AB_right_BC_wrong=True, create_reversed_edges=False, focus_on_node_P_only=False, - use_multiple_addresses=True) - + use_multiple_addresses=True, + ) + self.example_identify_entrances_simple_no_driveway( AB_right_BC_wrong=False, create_reversed_edges=False, focus_on_node_P_only=False, - use_multiple_addresses=True) - + use_multiple_addresses=True, + ) + self.example_identify_entrances_simple_no_driveway( AB_right_BC_wrong=True, create_reversed_edges=True, focus_on_node_P_only=False, - use_multiple_addresses=True) - + use_multiple_addresses=True, + ) + self.example_identify_entrances_simple_no_driveway( AB_right_BC_wrong=False, create_reversed_edges=True, focus_on_node_P_only=False, - use_multiple_addresses=True) - + use_multiple_addresses=True, + ) + # no driveway, all nodes, revert projection - + self.example_identify_entrances_simple_no_driveway( AB_right_BC_wrong=False, create_reversed_edges=True, focus_on_node_P_only=False, - revert_to_original_crs=True) - + revert_to_original_crs=True, + ) + # no driveway, limited selection of nodes - + self.example_identify_entrances_simple_no_driveway( AB_right_BC_wrong=True, create_reversed_edges=False, - focus_on_node_P_only=True) - + focus_on_node_P_only=True, + ) + self.example_identify_entrances_simple_no_driveway( AB_right_BC_wrong=False, create_reversed_edges=False, - focus_on_node_P_only=True) - + focus_on_node_P_only=True, + ) + self.example_identify_entrances_simple_no_driveway( AB_right_BC_wrong=True, create_reversed_edges=True, - focus_on_node_P_only=True) - + focus_on_node_P_only=True, + ) + self.example_identify_entrances_simple_no_driveway( AB_right_BC_wrong=False, create_reversed_edges=True, - focus_on_node_P_only=True) - + focus_on_node_P_only=True, + ) + # driveway, all nodes - + self.example_identify_entrances_simple_driveway( - AB_right_BC_wrong=True, - create_reversed_edges=False) - + AB_right_BC_wrong=True, create_reversed_edges=False + ) + self.example_identify_entrances_simple_driveway( - AB_right_BC_wrong=False, - create_reversed_edges=False) - + AB_right_BC_wrong=False, create_reversed_edges=False + ) + self.example_identify_entrances_simple_driveway( - AB_right_BC_wrong=True, - create_reversed_edges=True) - + AB_right_BC_wrong=True, create_reversed_edges=True + ) + self.example_identify_entrances_simple_driveway( - AB_right_BC_wrong=False, - create_reversed_edges=True) - + AB_right_BC_wrong=False, create_reversed_edges=True + ) + # driveway, all nodes, multiple addresses per edge - + self.example_identify_entrances_simple_driveway( AB_right_BC_wrong=True, create_reversed_edges=False, - use_multiple_addresses=True) - + use_multiple_addresses=True, + ) + self.example_identify_entrances_simple_driveway( AB_right_BC_wrong=False, create_reversed_edges=False, - use_multiple_addresses=True) - + use_multiple_addresses=True, + ) + self.example_identify_entrances_simple_driveway( AB_right_BC_wrong=True, create_reversed_edges=True, - use_multiple_addresses=True) - + use_multiple_addresses=True, + ) + self.example_identify_entrances_simple_driveway( AB_right_BC_wrong=False, create_reversed_edges=True, - use_multiple_addresses=True) - + use_multiple_addresses=True, + ) + # driveway, limited selection - + self.example_identify_entrances_simple_driveway( AB_right_BC_wrong=True, create_reversed_edges=False, - focus_on_node_P_only=True) - + focus_on_node_P_only=True, + ) + self.example_identify_entrances_simple_driveway( AB_right_BC_wrong=False, create_reversed_edges=False, - focus_on_node_P_only=True) - + focus_on_node_P_only=True, + ) + self.example_identify_entrances_simple_driveway( AB_right_BC_wrong=True, create_reversed_edges=True, - focus_on_node_P_only=True) - + focus_on_node_P_only=True, + ) + self.example_identify_entrances_simple_driveway( AB_right_BC_wrong=False, create_reversed_edges=True, - focus_on_node_P_only=True) - + focus_on_node_P_only=True, + ) + # driveway variation - + self.example_identify_entrances_simple_driveway( AB_right_BC_wrong=True, create_reversed_edges=False, focus_on_node_P_only=False, BD_with_name=False, - BD_right_address=False) - + BD_right_address=False, + ) + self.example_identify_entrances_simple_driveway( AB_right_BC_wrong=True, create_reversed_edges=False, focus_on_node_P_only=False, BD_with_name=True, - BD_right_address=False) - + BD_right_address=False, + ) + self.example_identify_entrances_simple_driveway( AB_right_BC_wrong=False, create_reversed_edges=False, focus_on_node_P_only=False, BD_with_name=True, - BD_right_address=False) - + BD_right_address=False, + ) + self.example_identify_entrances_simple_driveway( AB_right_BC_wrong=True, create_reversed_edges=False, focus_on_node_P_only=False, BD_with_name=True, - BD_right_address=True) - + BD_right_address=True, + ) + # special cases - + self.example_identify_entrances_simple_special( create_reversed_edges=False, revert_to_original_crs=False, - focus_on_node_P_only=False) - + focus_on_node_P_only=False, + ) + self.example_identify_entrances_simple_special( create_reversed_edges=True, revert_to_original_crs=False, - focus_on_node_P_only=False) - + focus_on_node_P_only=False, + ) + self.example_identify_entrances_simple_special( create_reversed_edges=False, revert_to_original_crs=False, - focus_on_node_P_only=True) - + focus_on_node_P_only=True, + ) + self.example_identify_entrances_simple_special( create_reversed_edges=True, revert_to_original_crs=False, - focus_on_node_P_only=True) - + focus_on_node_P_only=True, + ) + # no matching street name in the entire network - + self.example_identify_entrances_simple_special( create_reversed_edges=False, revert_to_original_crs=False, focus_on_node_P_only=False, - CE_wrong=True) - + CE_wrong=True, + ) + # TODO: test a case with multiple parallel edges - + # ************************************************************************* # ************************************************************************* - + def test_generating_node_containers(self): - # test generating containers - + self.example_generate_node_container(False, False) - + self.example_generate_node_container(True, False) - + self.example_generate_node_container(False, True) - + self.example_generate_node_container(True, True) - + self.example_generate_node_container(False, False, True) - + # ************************************************************************* # ************************************************************************* - - def get_node_gdf_A(self, - right_address: str = 'right', - wrong_address: str = 'wrong', - country_code: str = _osm.KEY_COUNTRY_DK): - + + def get_node_gdf_A( + self, + right_address: str = "right", + wrong_address: str = "wrong", + country_code: str = _osm.KEY_COUNTRY_DK, + ): # 4 nodes: A, B, C and P - - # node A - - osmid_A = 'A' - + + # node A + + osmid_A = "A" + xy_A = (0, 0) - + geo_A = Point(xy_A) - + node_uid_A = 5123 - + address_A = None - + # node B - - osmid_B = 'B' - + + osmid_B = "B" + xy_B = (1, 0) - + geo_B = Point(xy_B) - + node_uid_B = 1844 - + address_B = None - + # node C - - osmid_C = 'C' - + + osmid_C = "C" + xy_C = (2, 0) - + geo_C = Point(xy_C) - + node_uid_C = 1845 - + address_C = None - + # node P - - osmid_P = 'P' - + + osmid_P = "P" + xy_P = (0.5, 1) - + geo_P = Point(xy_P) - + node_uid_P = 9475 - + address_P = right_address - - #************************************************************************** - + + # ************************************************************************** + # geodataframe: should have 'addr:street', 'osak:identifier' and index - + gdf = GeoDataFrame( - data={_osm.KEY_OSM_STREET: [address_A, # A - address_B, # B - address_C, # C - address_P], # P - _osm.KEY_OSM_BUILDING_ENTRANCE_ID[country_code]: [node_uid_A, # A - node_uid_B, # B - node_uid_C, # C - node_uid_P],# P - _osm.KEY_OSMNX_ELEMENT_TYPE: ['node','node','node','node'], - _osm.KEY_OSMNX_OSMID: [osmid_A, osmid_B, osmid_C, osmid_P]}, - geometry=[geo_A, geo_B, geo_C, geo_P] - ) - - gdf.set_index([_osm.KEY_OSMNX_ELEMENT_TYPE, _osm.KEY_OSMNX_OSMID], - drop=True, - inplace=True) - + data={ + _osm.KEY_OSM_STREET: [ + address_A, # A + address_B, # B + address_C, # C + address_P, + ], # P + _osm.KEY_OSM_BUILDING_ENTRANCE_ID[country_code]: [ + node_uid_A, # A + node_uid_B, # B + node_uid_C, # C + node_uid_P, + ], # P + _osm.KEY_OSMNX_ELEMENT_TYPE: ["node", "node", "node", "node"], + _osm.KEY_OSMNX_OSMID: [osmid_A, osmid_B, osmid_C, osmid_P], + }, + geometry=[geo_A, geo_B, geo_C, geo_P], + ) + + gdf.set_index( + [_osm.KEY_OSMNX_ELEMENT_TYPE, _osm.KEY_OSMNX_OSMID], drop=True, inplace=True + ) + return gdf - - #************************************************************************** - + + # ************************************************************************** + # ***************************************************************************** # ***************************************************************************** - - def get_node_gdf_B(self, - right_address: str = 'right', - wrong_address: str = 'wrong', - country_code: str = _osm.KEY_COUNTRY_DK): - - #************************************************************************** - - gdf = self.get_node_gdf_A(right_address=right_address, - wrong_address=wrong_address, - country_code=country_code) - + + def get_node_gdf_B( + self, + right_address: str = "right", + wrong_address: str = "wrong", + country_code: str = _osm.KEY_COUNTRY_DK, + ): + # ************************************************************************** + + gdf = self.get_node_gdf_A( + right_address=right_address, + wrong_address=wrong_address, + country_code=country_code, + ) + # add another node D closer to P than A, B and C - - osmid_D = 'D' - + + osmid_D = "D" + xy_D = (0.75, 1) - + geo_D = Point(xy_D) - + node_uid_D = 8842 - + # address_D = None - - gdf_node_D = GeoDataFrame({_osm.KEY_OSMNX_GEOMETRY: [geo_D], - _osm.KEY_OSM_BUILDING_ENTRANCE_ID[country_code]: - [node_uid_D], - #_osm.KEY_OSM_STREET: [address_D],# P - #_osm.KEY_OSMNX_ELEMENT_TYPE: ['node'], - #_osm.KEY_OSMNX_OSMID: [osmid_D] - }, - #index=[('node', osmid_D)], - index=MultiIndex.from_tuples( - [('node',osmid_D)], - names=[_osm.KEY_OSMNX_ELEMENT_TYPE, - _osm.KEY_OSMNX_OSMID]) - ) - - #************************************************************************** - + + gdf_node_D = GeoDataFrame( + { + _osm.KEY_OSMNX_GEOMETRY: [geo_D], + _osm.KEY_OSM_BUILDING_ENTRANCE_ID[country_code]: [node_uid_D], + # _osm.KEY_OSM_STREET: [address_D],# P + # _osm.KEY_OSMNX_ELEMENT_TYPE: ['node'], + # _osm.KEY_OSMNX_OSMID: [osmid_D] + }, + # index=[('node', osmid_D)], + index=MultiIndex.from_tuples( + [("node", osmid_D)], + names=[_osm.KEY_OSMNX_ELEMENT_TYPE, _osm.KEY_OSMNX_OSMID], + ), + ) + + # ************************************************************************** + gdf = concat([gdf, gdf_node_D]) - + return gdf - - #************************************************************************** - + + # ************************************************************************** + # ***************************************************************************** # ***************************************************************************** - - def get_node_gdf_C(self, - right_address: str = 'right', - wrong_address: str = 'wrong', - country_code: str = _osm.KEY_COUNTRY_DK): - - #************************************************************************** - - gdf = self.get_node_gdf_B(right_address=right_address, - wrong_address=wrong_address, - country_code=country_code) - + + def get_node_gdf_C( + self, + right_address: str = "right", + wrong_address: str = "wrong", + country_code: str = _osm.KEY_COUNTRY_DK, + ): + # ************************************************************************** + + gdf = self.get_node_gdf_B( + right_address=right_address, + wrong_address=wrong_address, + country_code=country_code, + ) + # add another node E, east of C - - osmid_E = 'E' - + + osmid_E = "E" + xy_E = (3, 0) - + geo_E = Point(xy_E) - + node_uid_E = 9173 - - #address_E = right_address - - gdf_node_E = GeoDataFrame({_osm.KEY_OSMNX_GEOMETRY: [geo_E], - _osm.KEY_OSM_BUILDING_ENTRANCE_ID[country_code]: - [node_uid_E], - #_osm.KEY_OSM_STREET: [address_E], - }, - #index=[('node', osmid_E)] - index=MultiIndex.from_tuples( - [('node',osmid_E)], - names=[_osm.KEY_OSMNX_ELEMENT_TYPE, - _osm.KEY_OSMNX_OSMID]) - ) - - #************************************************************************** - + + # address_E = right_address + + gdf_node_E = GeoDataFrame( + { + _osm.KEY_OSMNX_GEOMETRY: [geo_E], + _osm.KEY_OSM_BUILDING_ENTRANCE_ID[country_code]: [node_uid_E], + # _osm.KEY_OSM_STREET: [address_E], + }, + # index=[('node', osmid_E)] + index=MultiIndex.from_tuples( + [("node", osmid_E)], + names=[_osm.KEY_OSMNX_ELEMENT_TYPE, _osm.KEY_OSMNX_OSMID], + ), + ) + + # ************************************************************************** + gdf = concat([gdf, gdf_node_E]) - + return gdf - - #************************************************************************** - + + # ************************************************************************** + # ***************************************************************************** # ***************************************************************************** - - def get_network_A(self, - gdf: GeoDataFrame, - right_address: str = 'right', - wrong_address: str = 'wrong', - AB_right_BC_wrong: bool = True, - country_code: str = _osm.KEY_COUNTRY_DK, - use_multiple_addresses: bool = False): - - #************************************************************************** - + + def get_network_A( + self, + gdf: GeoDataFrame, + right_address: str = "right", + wrong_address: str = "wrong", + AB_right_BC_wrong: bool = True, + country_code: str = _osm.KEY_COUNTRY_DK, + use_multiple_addresses: bool = False, + ): + # ************************************************************************** + # create network - - (node_keys, - node_data_container, - node_key_to_gdf_index_dict) = gis_utils.prepare_node_data_from_geodataframe( - include_geometry=True, - gdf=gdf) - + + ( + node_keys, + node_data_container, + node_key_to_gdf_index_dict, + ) = gis_utils.prepare_node_data_from_geodataframe( + include_geometry=True, gdf=gdf + ) + network = nx.MultiDiGraph() - - network.graph['crs'] = "EPSG:4326" - + + network.graph["crs"] = "EPSG:4326" + network.add_nodes_from(node_data_container) - - #************************************************************************** - + + # ************************************************************************** + # two edges: AB and BC - - node_key_A = 'A' - node_key_B = 'B' - node_key_C = 'C' - + + node_key_A = "A" + node_key_B = "B" + node_key_C = "C" + # edge AB - + geo_AB = LineString( - [(network.nodes[node_key_A][_osm.KEY_OSMNX_X], - network.nodes[node_key_A][_osm.KEY_OSMNX_Y]), - (network.nodes[node_key_B][_osm.KEY_OSMNX_X], - network.nodes[node_key_B][_osm.KEY_OSMNX_Y])] - ) - + [ + ( + network.nodes[node_key_A][_osm.KEY_OSMNX_X], + network.nodes[node_key_A][_osm.KEY_OSMNX_Y], + ), + ( + network.nodes[node_key_B][_osm.KEY_OSMNX_X], + network.nodes[node_key_B][_osm.KEY_OSMNX_Y], + ), + ] + ) + length_AB = network.nodes[node_key_A][_osm.KEY_OSMNX_GEOMETRY].distance( network.nodes[node_key_B][_osm.KEY_OSMNX_GEOMETRY] - ) - - network.add_edge(node_key_A, - node_key_B, - **{_osm.KEY_OSMNX_GEOMETRY: geo_AB, - _osm.KEY_OSMNX_LENGTH: length_AB, - _osm.KEY_OSMNX_NAME: ( - ['HZ', (right_address if AB_right_BC_wrong else - wrong_address)] if use_multiple_addresses else ( - right_address if AB_right_BC_wrong else - wrong_address) - ) - }) - + ) + + network.add_edge( + node_key_A, + node_key_B, + **{ + _osm.KEY_OSMNX_GEOMETRY: geo_AB, + _osm.KEY_OSMNX_LENGTH: length_AB, + _osm.KEY_OSMNX_NAME: ( + ["HZ", (right_address if AB_right_BC_wrong else wrong_address)] + if use_multiple_addresses + else (right_address if AB_right_BC_wrong else wrong_address) + ), + } + ) + # edge BC - + geo_BC = LineString( - [(network.nodes[node_key_B][_osm.KEY_OSMNX_X], - network.nodes[node_key_B][_osm.KEY_OSMNX_Y]), - (network.nodes[node_key_C][_osm.KEY_OSMNX_X], - network.nodes[node_key_C][_osm.KEY_OSMNX_Y])] - ) - + [ + ( + network.nodes[node_key_B][_osm.KEY_OSMNX_X], + network.nodes[node_key_B][_osm.KEY_OSMNX_Y], + ), + ( + network.nodes[node_key_C][_osm.KEY_OSMNX_X], + network.nodes[node_key_C][_osm.KEY_OSMNX_Y], + ), + ] + ) + length_BC = network.nodes[node_key_B][_osm.KEY_OSMNX_GEOMETRY].distance( network.nodes[node_key_C][_osm.KEY_OSMNX_GEOMETRY] - ) - - network.add_edge(node_key_B, - node_key_C, - **{_osm.KEY_OSMNX_GEOMETRY: geo_BC, - _osm.KEY_OSMNX_LENGTH: length_BC, - _osm.KEY_OSMNX_NAME: ( - [(wrong_address if AB_right_BC_wrong else - right_address), 'UQ'] if use_multiple_addresses - else (wrong_address if AB_right_BC_wrong else - right_address) - ) - }) - - #************************************************************************** - + ) + + network.add_edge( + node_key_B, + node_key_C, + **{ + _osm.KEY_OSMNX_GEOMETRY: geo_BC, + _osm.KEY_OSMNX_LENGTH: length_BC, + _osm.KEY_OSMNX_NAME: ( + [(wrong_address if AB_right_BC_wrong else right_address), "UQ"] + if use_multiple_addresses + else (wrong_address if AB_right_BC_wrong else right_address) + ), + } + ) + + # ************************************************************************** + return network, node_keys, node_key_to_gdf_index_dict - - #************************************************************************** - + + # ************************************************************************** + # ***************************************************************************** # ***************************************************************************** - - def get_network_B(self, - gdf: GeoDataFrame, - right_address: str = 'right', - wrong_address: str = 'wrong', - AB_right_BC_wrong: bool = True, - BD_with_name: bool = False, - BD_right_address: bool = False, - country_code: str = _osm.KEY_COUNTRY_DK, - use_multiple_addresses: bool = False): - - #************************************************************************** - + + def get_network_B( + self, + gdf: GeoDataFrame, + right_address: str = "right", + wrong_address: str = "wrong", + AB_right_BC_wrong: bool = True, + BD_with_name: bool = False, + BD_right_address: bool = False, + country_code: str = _osm.KEY_COUNTRY_DK, + use_multiple_addresses: bool = False, + ): + # ************************************************************************** + # create network - + network, node_keys, node_key_to_gdf_index_dict = self.get_network_A( gdf=gdf, right_address=right_address, wrong_address=wrong_address, country_code=country_code, AB_right_BC_wrong=AB_right_BC_wrong, - use_multiple_addresses=use_multiple_addresses) - + use_multiple_addresses=use_multiple_addresses, + ) + # add nameless BD edge - - node_key_B = 'B' - node_key_D = 'D' - + + node_key_B = "B" + node_key_D = "D" + # edge AB - + geo_BD = LineString( - [(network.nodes[node_key_B][_osm.KEY_OSMNX_X], - network.nodes[node_key_B][_osm.KEY_OSMNX_Y]), - (network.nodes[node_key_D][_osm.KEY_OSMNX_X], - network.nodes[node_key_D][_osm.KEY_OSMNX_Y])] - ) - + [ + ( + network.nodes[node_key_B][_osm.KEY_OSMNX_X], + network.nodes[node_key_B][_osm.KEY_OSMNX_Y], + ), + ( + network.nodes[node_key_D][_osm.KEY_OSMNX_X], + network.nodes[node_key_D][_osm.KEY_OSMNX_Y], + ), + ] + ) + length_BD = network.nodes[node_key_B][_osm.KEY_OSMNX_GEOMETRY].distance( network.nodes[node_key_D][_osm.KEY_OSMNX_GEOMETRY] - ) - + ) + BD_dict = { - _osm.KEY_OSMNX_GEOMETRY: geo_BD, - _osm.KEY_OSMNX_LENGTH: length_BD, - #_osm.KEY_OSMNX_NAME: ( # no name for BD - # right_address if AB_right_BC_wrong else - # wrong_address) - } - + _osm.KEY_OSMNX_GEOMETRY: geo_BD, + _osm.KEY_OSMNX_LENGTH: length_BD, + # _osm.KEY_OSMNX_NAME: ( # no name for BD + # right_address if AB_right_BC_wrong else + # wrong_address) + } + if BD_with_name: - - BD_dict[_osm.KEY_OSMNX_NAME] = ( + BD_dict[_osm.KEY_OSMNX_NAME] = ( right_address if BD_right_address else wrong_address - ) - - network.add_edge(node_key_B, - node_key_D, - **BD_dict) - - #************************************************************************** - + ) + + network.add_edge(node_key_B, node_key_D, **BD_dict) + + # ************************************************************************** + return network, node_keys, node_key_to_gdf_index_dict - - #************************************************************************** - + + # ************************************************************************** + # ***************************************************************************** # ***************************************************************************** - - def get_network_C(self, - gdf: GeoDataFrame, - right_address: str = 'right', - wrong_address: str = 'wrong', - country_code: str = _osm.KEY_COUNTRY_DK, - CE_wrong: bool = False, - use_multiple_addresses: bool = False): - - #************************************************************************** - + + def get_network_C( + self, + gdf: GeoDataFrame, + right_address: str = "right", + wrong_address: str = "wrong", + country_code: str = _osm.KEY_COUNTRY_DK, + CE_wrong: bool = False, + use_multiple_addresses: bool = False, + ): + # ************************************************************************** + # create network - + network, node_keys, node_key_to_gdf_index_dict = self.get_network_B( gdf=gdf, right_address=wrong_address, wrong_address=wrong_address, country_code=country_code, - AB_right_BC_wrong=True) - + AB_right_BC_wrong=True, + ) + # add a CE edge with the right name - - node_key_C = 'C' - node_key_E = 'E' - + + node_key_C = "C" + node_key_E = "E" + # edge AB - + geo_CE = LineString( - [(network.nodes[node_key_C][_osm.KEY_OSMNX_X], - network.nodes[node_key_C][_osm.KEY_OSMNX_Y]), - (network.nodes[node_key_E][_osm.KEY_OSMNX_X], - network.nodes[node_key_E][_osm.KEY_OSMNX_Y])] - ) - + [ + ( + network.nodes[node_key_C][_osm.KEY_OSMNX_X], + network.nodes[node_key_C][_osm.KEY_OSMNX_Y], + ), + ( + network.nodes[node_key_E][_osm.KEY_OSMNX_X], + network.nodes[node_key_E][_osm.KEY_OSMNX_Y], + ), + ] + ) + length_CE = network.nodes[node_key_C][_osm.KEY_OSMNX_GEOMETRY].distance( network.nodes[node_key_E][_osm.KEY_OSMNX_GEOMETRY] - ) - - network.add_edge(node_key_C, - node_key_E, - **{_osm.KEY_OSMNX_GEOMETRY: geo_CE, - _osm.KEY_OSMNX_LENGTH: length_CE, - _osm.KEY_OSMNX_NAME: ( - wrong_address if CE_wrong else right_address - ) - }) - - #************************************************************************** - + ) + + network.add_edge( + node_key_C, + node_key_E, + **{ + _osm.KEY_OSMNX_GEOMETRY: geo_CE, + _osm.KEY_OSMNX_LENGTH: length_CE, + _osm.KEY_OSMNX_NAME: (wrong_address if CE_wrong else right_address), + } + ) + + # ************************************************************************** + return network, node_keys, node_key_to_gdf_index_dict - - #************************************************************************** - + + # ************************************************************************** + # ***************************************************************************** # ***************************************************************************** - + def example_identify_entrances_simple_special( - self, - create_reversed_edges: bool = False, - revert_to_original_crs: bool = False, - focus_on_node_P_only: bool = False, - CE_wrong: bool = False): - + self, + create_reversed_edges: bool = False, + revert_to_original_crs: bool = False, + focus_on_node_P_only: bool = False, + CE_wrong: bool = False, + ): # get problem details - + country_code = _osm.KEY_COUNTRY_DK - + gdf = self.get_node_gdf_C(country_code=country_code) - + network, node_keys, node_key_to_gdf_index_dict = self.get_network_C( - gdf=gdf, - country_code=country_code, - CE_wrong=CE_wrong) - + gdf=gdf, country_code=country_code, CE_wrong=CE_wrong + ) + # create reverse edges - + if create_reversed_edges: - - previous_edge_keys = list( - edge_key for edge_key in network.edges(keys=True) - ) - + previous_edge_keys = list(edge_key for edge_key in network.edges(keys=True)) + for edge_key in previous_edge_keys: - - edge_dict = network.get_edge_data(u=edge_key[0], - v=edge_key[1], - key=edge_key[2]) - - network.add_edge(u_for_edge=edge_key[1], - v_for_edge=edge_key[0], - **edge_dict) - + edge_dict = network.get_edge_data( + u=edge_key[0], v=edge_key[1], key=edge_key[2] + ) + + network.add_edge( + u_for_edge=edge_key[1], v_for_edge=edge_key[0], **edge_dict + ) + # find out which is the closest edge - + if focus_on_node_P_only: - nearest_edge_keys, _, _ = gis_utils.identify_building_entrance_edges( - gdf=gdf, + gdf=gdf, gdf_street_column=_osm.KEY_OSM_STREET, network=network, - node_key_to_gdf_index_dict={ - 'P': node_key_to_gdf_index_dict['P'] - }, + node_key_to_gdf_index_dict={"P": node_key_to_gdf_index_dict["P"]}, crs=None, - revert_to_original_crs=revert_to_original_crs) - + revert_to_original_crs=revert_to_original_crs, + ) + else: - nearest_edge_keys, _, _ = gis_utils.identify_building_entrance_edges( - gdf=gdf, + gdf=gdf, gdf_street_column=_osm.KEY_OSM_STREET, network=network, node_key_to_gdf_index_dict=node_key_to_gdf_index_dict, crs=None, - revert_to_original_crs=revert_to_original_crs) - + revert_to_original_crs=revert_to_original_crs, + ) + # validate the outcome - + if CE_wrong: - # no edges with the right address, the closest edge should be selected - - assert (('B','D', 0) == nearest_edge_keys['P'] or - ('D','B', 0) == nearest_edge_keys['P']) - + + assert ("B", "D", 0) == nearest_edge_keys["P"] or ( + "D", + "B", + 0, + ) == nearest_edge_keys["P"] + else: - # CE has the right address, it should be selected - - assert (('C','E', 0) == nearest_edge_keys['P'] or - ('E','C', 0) == nearest_edge_keys['P']) - - #************************************************************************** - + + assert ("C", "E", 0) == nearest_edge_keys["P"] or ( + "E", + "C", + 0, + ) == nearest_edge_keys["P"] + + # ************************************************************************** + # ***************************************************************************** # ***************************************************************************** - + def example_identify_entrances_simple_driveway( - self, - AB_right_BC_wrong: bool = True, - create_reversed_edges: bool = False, - revert_to_original_crs: bool = False, - focus_on_node_P_only: bool = False, - BD_with_name: bool = False, - BD_right_address: bool = False, - use_multiple_addresses: bool = False): - - + self, + AB_right_BC_wrong: bool = True, + create_reversed_edges: bool = False, + revert_to_original_crs: bool = False, + focus_on_node_P_only: bool = False, + BD_with_name: bool = False, + BD_right_address: bool = False, + use_multiple_addresses: bool = False, + ): # get problem details - + country_code = _osm.KEY_COUNTRY_DK - + gdf = self.get_node_gdf_B(country_code=country_code) - + network, node_keys, node_key_to_gdf_index_dict = self.get_network_B( gdf=gdf, country_code=country_code, BD_with_name=BD_with_name, BD_right_address=BD_right_address, AB_right_BC_wrong=AB_right_BC_wrong, - use_multiple_addresses=use_multiple_addresses) - + use_multiple_addresses=use_multiple_addresses, + ) + # create reverse edges - + if create_reversed_edges: - - previous_edge_keys = list( - edge_key for edge_key in network.edges(keys=True) - ) - + previous_edge_keys = list(edge_key for edge_key in network.edges(keys=True)) + for edge_key in previous_edge_keys: - - edge_dict = network.get_edge_data(u=edge_key[0], - v=edge_key[1], - key=edge_key[2]) - - network.add_edge(u_for_edge=edge_key[1], - v_for_edge=edge_key[0], - **edge_dict) - + edge_dict = network.get_edge_data( + u=edge_key[0], v=edge_key[1], key=edge_key[2] + ) + + network.add_edge( + u_for_edge=edge_key[1], v_for_edge=edge_key[0], **edge_dict + ) + # find out which is the closest edge - + if focus_on_node_P_only: - nearest_edge_keys, _, _ = gis_utils.identify_building_entrance_edges( - gdf=gdf, + gdf=gdf, gdf_street_column=_osm.KEY_OSM_STREET, network=network, - node_key_to_gdf_index_dict={ - 'P': node_key_to_gdf_index_dict['P'] - }, + node_key_to_gdf_index_dict={"P": node_key_to_gdf_index_dict["P"]}, crs=None, - revert_to_original_crs=revert_to_original_crs) - + revert_to_original_crs=revert_to_original_crs, + ) + else: - nearest_edge_keys, _, _ = gis_utils.identify_building_entrance_edges( - gdf=gdf, + gdf=gdf, gdf_street_column=_osm.KEY_OSM_STREET, network=network, node_key_to_gdf_index_dict=node_key_to_gdf_index_dict, crs=None, - revert_to_original_crs=revert_to_original_crs) - + revert_to_original_crs=revert_to_original_crs, + ) + # validate the outcome - + if BD_with_name and not BD_right_address: - if AB_right_BC_wrong: - - assert (('A','B', 0) == nearest_edge_keys['P'] or - ('B','A', 0) == nearest_edge_keys['P']) - + assert ("A", "B", 0) == nearest_edge_keys["P"] or ( + "B", + "A", + 0, + ) == nearest_edge_keys["P"] + else: - - assert (('B','C', 0) == nearest_edge_keys['P'] or - ('C','B', 0) == nearest_edge_keys['P']) - + assert ("B", "C", 0) == nearest_edge_keys["P"] or ( + "C", + "B", + 0, + ) == nearest_edge_keys["P"] + # elif BD_with_name and BD_right_address: - + # assert (('B','D', 0) == nearest_edge_keys['P'] or # ('D','B', 0) == nearest_edge_keys['P']) - + else: - - assert (('B','D', 0) == nearest_edge_keys['P'] or - ('D','B', 0) == nearest_edge_keys['P']) - - #************************************************************************** - + assert ("B", "D", 0) == nearest_edge_keys["P"] or ( + "D", + "B", + 0, + ) == nearest_edge_keys["P"] + + # ************************************************************************** + # ***************************************************************************** # ***************************************************************************** - + def example_identify_entrances_simple_no_driveway( - self, - AB_right_BC_wrong: bool = True, - create_reversed_edges: bool = False, - focus_on_node_P_only: bool = False, - revert_to_original_crs: bool = False, - use_multiple_addresses: bool = False): - + self, + AB_right_BC_wrong: bool = True, + create_reversed_edges: bool = False, + focus_on_node_P_only: bool = False, + revert_to_original_crs: bool = False, + use_multiple_addresses: bool = False, + ): # get problem details - + country_code = _osm.KEY_COUNTRY_DK - + gdf = self.get_node_gdf_A(country_code=country_code) - + network, node_keys, node_key_to_gdf_index_dict = self.get_network_A( gdf=gdf, country_code=country_code, AB_right_BC_wrong=AB_right_BC_wrong, - use_multiple_addresses=use_multiple_addresses) - + use_multiple_addresses=use_multiple_addresses, + ) + # create reverse edges - + if create_reversed_edges: - - previous_edge_keys = list( - edge_key for edge_key in network.edges(keys=True) - ) - + previous_edge_keys = list(edge_key for edge_key in network.edges(keys=True)) + for edge_key in previous_edge_keys: - - edge_dict = network.get_edge_data(u=edge_key[0], - v=edge_key[1], - key=edge_key[2]) - - network.add_edge(u_for_edge=edge_key[1], - v_for_edge=edge_key[0], - **edge_dict) - + edge_dict = network.get_edge_data( + u=edge_key[0], v=edge_key[1], key=edge_key[2] + ) + + network.add_edge( + u_for_edge=edge_key[1], v_for_edge=edge_key[0], **edge_dict + ) + # find out which is the closest edge - + if focus_on_node_P_only: - nearest_edge_keys, _, _ = gis_utils.identify_building_entrance_edges( - gdf=gdf, + gdf=gdf, gdf_street_column=_osm.KEY_OSM_STREET, network=network, - node_key_to_gdf_index_dict={ - 'P': node_key_to_gdf_index_dict['P'] - }, + node_key_to_gdf_index_dict={"P": node_key_to_gdf_index_dict["P"]}, crs=None, - revert_to_original_crs=revert_to_original_crs) - + revert_to_original_crs=revert_to_original_crs, + ) + else: - nearest_edge_keys, _, _ = gis_utils.identify_building_entrance_edges( - gdf=gdf, + gdf=gdf, gdf_street_column=_osm.KEY_OSM_STREET, network=network, node_key_to_gdf_index_dict=node_key_to_gdf_index_dict, crs=None, - revert_to_original_crs=revert_to_original_crs) - + revert_to_original_crs=revert_to_original_crs, + ) + # validate the outcome - + if AB_right_BC_wrong: - # the closest edge should be AB - - assert ('A','B', 0) == nearest_edge_keys['P'] - + + assert ("A", "B", 0) == nearest_edge_keys["P"] + else: - # the closest edge should be BC - - assert ('B','C', 0) == nearest_edge_keys['P'] - + + assert ("B", "C", 0) == nearest_edge_keys["P"] + # ************************************************************************* # ************************************************************************* - + def test_identify_entrances_simple_no_driveway_closest(self): - # get problem details - + country_code = _osm.KEY_COUNTRY_DK - + gdf = self.get_node_gdf_A(country_code=country_code) - + network, node_keys, node_key_to_gdf_index_dict = self.get_network_A( - gdf=gdf, - country_code=country_code) + gdf=gdf, country_code=country_code + ) # find out which is the closest edge nearest_edge_keys, network = gis_iden.identify_edge_closest_to_node( - network, - node_keys=['P']) - + network, node_keys=["P"] + ) + # the closest edge should be AB - - assert ('A','B', 0) in nearest_edge_keys - + + assert ("A", "B", 0) in nearest_edge_keys + assert len(nearest_edge_keys) == 1 - + # ************************************************************************* # ************************************************************************* - + # test generating a node data container (to create nodes in a network object) - - def example_generate_node_container(self, - include_geometry: bool = False, - include_street_column: bool = False, - use_id_as_node_key: bool = False): - + + def example_generate_node_container( + self, + include_geometry: bool = False, + include_street_column: bool = False, + use_id_as_node_key: bool = False, + ): # get problem details - + country_code = _osm.KEY_COUNTRY_DK - + gdf = self.get_node_gdf_A(country_code=country_code) - + # prepare node data - - (node_keys, - node_data_container, - _) = gis_utils.prepare_node_data_from_geodataframe( - gdf=gdf, - node_key_column=(_osm.KEY_OSM_BUILDING_ENTRANCE_ID[country_code] - if use_id_as_node_key else None), - include_columns=([_osm.KEY_OSM_STREET] - if include_street_column else None), - include_geometry=include_geometry) - + + ( + node_keys, + node_data_container, + _, + ) = gis_utils.prepare_node_data_from_geodataframe( + gdf=gdf, + node_key_column=( + _osm.KEY_OSM_BUILDING_ENTRANCE_ID[country_code] + if use_id_as_node_key + else None + ), + include_columns=([_osm.KEY_OSM_STREET] if include_street_column else None), + include_geometry=include_geometry, + ) + # node key to gdf index - + node_key_to_gdf_index_dict = { node_key: ( - 'node', gdf[ - gdf[_osm.KEY_OSM_BUILDING_ENTRANCE_ID[country_code]]==node_key - ].index[0][1] if use_id_as_node_key else node_key - ) - for node_key in node_keys} - + "node", + gdf[ + gdf[_osm.KEY_OSM_BUILDING_ENTRANCE_ID[country_code]] == node_key + ].index[0][1] + if use_id_as_node_key + else node_key, + ) + for node_key in node_keys + } + # add nodes to new network - + network = nx.MultiDiGraph() - - network.graph['crs'] = "EPSG:4326" - + + network.graph["crs"] = "EPSG:4326" + network.add_nodes_from(node_data_container) - + # verify the data - + for node_key in node_keys: - assert network.has_node(node_key) - + gdf_index = node_key_to_gdf_index_dict[node_key] - - assert (network.nodes[node_key][_osm.KEY_OSMNX_X] == - gdf.loc[gdf_index][gis_utils.KEY_GPD_GEOMETRY].x) - - assert (network.nodes[node_key][_osm.KEY_OSMNX_Y] == - gdf.loc[gdf_index][gis_utils.KEY_GPD_GEOMETRY].y) - + + assert ( + network.nodes[node_key][_osm.KEY_OSMNX_X] + == gdf.loc[gdf_index][gis_utils.KEY_GPD_GEOMETRY].x + ) + + assert ( + network.nodes[node_key][_osm.KEY_OSMNX_Y] + == gdf.loc[gdf_index][gis_utils.KEY_GPD_GEOMETRY].y + ) + if include_geometry: - - assert (network.nodes[node_key][_osm.KEY_OSMNX_GEOMETRY] == - gdf.loc[gdf_index][gis_utils.KEY_GPD_GEOMETRY]) - + assert ( + network.nodes[node_key][_osm.KEY_OSMNX_GEOMETRY] + == gdf.loc[gdf_index][gis_utils.KEY_GPD_GEOMETRY] + ) + if include_street_column: - - assert (network.nodes[node_key][_osm.KEY_OSM_STREET] == - gdf.loc[gdf_index][_osm.KEY_OSM_STREET]) - + assert ( + network.nodes[node_key][_osm.KEY_OSM_STREET] + == gdf.loc[gdf_index][_osm.KEY_OSM_STREET] + ) + # ************************************************************************* # ************************************************************************* - + def test_node_container_errors(self): - self.example_node_container_error() - + # trigger ValueError by using an index that differs from the osmnx-provided one - - def example_node_container_error(self, country_code: str = 'dk'): - + + def example_node_container_error(self, country_code: str = "dk"): # get problem details - + country_code = _osm.KEY_COUNTRY_DK - + gdf = self.get_node_gdf_A(country_code=country_code) - + # modify index to trigger error - - gdf.set_index(_osm.KEY_OSM_BUILDING_ENTRANCE_ID[country_code], - inplace=True) - + + gdf.set_index(_osm.KEY_OSM_BUILDING_ENTRANCE_ID[country_code], inplace=True) + # trigger the error - + error_triggered = False try: - (node_keys, - node_data_container, - _) = gis_utils.prepare_node_data_from_geodataframe( - gdf=gdf) + ( + node_keys, + node_data_container, + _, + ) = gis_utils.prepare_node_data_from_geodataframe(gdf=gdf) except ValueError: error_triggered = True - assert error_triggered - + assert error_triggered + # ***************************************************************************** # ***************************************************************************** - + # test the counting of occurrences in a geodataframe - + def test_occurrences(self): - gdf = GeoDataFrame( - data={'column_A': [1, 2, 2, 3, 4], - 'column_B': [5.46, 5.46, 7, 7, 7.3], - 'column_C': ['a','a','a','a','a'], - 'column_D': ['a','b','c','d','e'], - 'column_E': ['hello','goodbye',None,'hello',None]}, - geometry=[Point(0,1), Point(4,5), Point(2,3), Point(4,6), Point(7,2)] - ) - + data={ + "column_A": [1, 2, 2, 3, 4], + "column_B": [5.46, 5.46, 7, 7, 7.3], + "column_C": ["a", "a", "a", "a", "a"], + "column_D": ["a", "b", "c", "d", "e"], + "column_E": ["hello", "goodbye", None, "hello", None], + }, + geometry=[Point(0, 1), Point(4, 5), Point(2, 3), Point(4, 6), Point(7, 2)], + ) + solution = { - 'column_A': {1: 1, 2: 2, 3: 1, 4: 1}, - 'column_B': {5.46: 2, 7: 2, 7.3: 1}, - 'column_C': {'a': 5}, - 'column_D': {'a': 1, 'b': 1, 'c': 1, 'd': 1, 'e': 1}, - 'column_E': {'hello': 2, 'goodbye': 1, None: 2} - } - + "column_A": {1: 1, 2: 2, 3: 1, 4: 1}, + "column_B": {5.46: 2, 7: 2, 7.3: 1}, + "column_C": {"a": 5}, + "column_D": {"a": 1, "b": 1, "c": 1, "d": 1, "e": 1}, + "column_E": {"hello": 2, "goodbye": 1, None: 2}, + } + # all elements - + for column in solution: - assert gis_utils.count_ocurrences(gdf, column) == solution[column] - + # specific ones - - assert gis_utils.count_ocurrences(gdf, 'column_A', [1, 2]) == {1: 1, 2: 2} - - assert gis_utils.count_ocurrences(gdf, 'column_A', [10]) == {10: 0} - - assert ( - gis_utils.count_ocurrences(gdf, 'column_B', [7, 7.3]) == {7: 2, 7.3: 1} - ) - - assert gis_utils.count_ocurrences(gdf, 'column_C', ['a']) == {'a': 5} - - assert gis_utils.count_ocurrences(gdf, 'column_C', ['b']) == {'b': 0} - - assert gis_utils.count_ocurrences(gdf, 'column_D', ['b']) == {'b': 1} - - assert ( - gis_utils.count_ocurrences(gdf, 'column_E', ['hello']) == {'hello': 2} - ) - - assert ( - gis_utils.count_ocurrences(gdf, 'column_E', [None]) == {None: 2} - ) - + + assert gis_utils.count_ocurrences(gdf, "column_A", [1, 2]) == {1: 1, 2: 2} + + assert gis_utils.count_ocurrences(gdf, "column_A", [10]) == {10: 0} + + assert gis_utils.count_ocurrences(gdf, "column_B", [7, 7.3]) == {7: 2, 7.3: 1} + + assert gis_utils.count_ocurrences(gdf, "column_C", ["a"]) == {"a": 5} + + assert gis_utils.count_ocurrences(gdf, "column_C", ["b"]) == {"b": 0} + + assert gis_utils.count_ocurrences(gdf, "column_D", ["b"]) == {"b": 1} + + assert gis_utils.count_ocurrences(gdf, "column_E", ["hello"]) == {"hello": 2} + + assert gis_utils.count_ocurrences(gdf, "column_E", [None]) == {None: 2} + # ***************************************************************************** # ***************************************************************************** - + # test creating osmnx-like geodataframes for nodes - + def test_create_osmnx_gdf(self): - # method for basic gdf compliance verification - - def verify_osmnx_gdf(gdf: GeoDataFrame, - extra_column_names: list = None): - + + def verify_osmnx_gdf(gdf: GeoDataFrame, extra_column_names: list = None): # index format - + assert type(gdf.index) == MultiIndex - + assert len(gdf.index.names) == 2 - + assert _osm.KEY_OSMNX_OSMID in gdf.index.names - + assert _osm.KEY_OSMNX_ELEMENT_TYPE in gdf.index.names - + # geometry column - + assert _osm.KEY_OSMNX_GEOMETRY in gdf.columns - + # extra columns - + if type(extra_column_names) != type(None): - for extra_column_name in extra_column_names: - assert extra_column_name in gdf.columns - + # the elements - + for index in gdf.index: - # must be a node - - assert 'node' in index[0] # first position of multi-index - + + assert "node" in index[0] # first position of multi-index + # must have point geometry - + assert type(gdf.loc[index][_osm.KEY_OSMNX_GEOMETRY]) == Point - + # test gdf - + gdf_example = gis_utils.GeoDataFrame( { - _osm.KEY_OSMNX_GEOMETRY: [Point(152, 546)], - }, - index=MultiIndex.from_tuples([('node', 'badfnbjiadbnd')], - names=[_osm.KEY_OSMNX_ELEMENT_TYPE, - _osm.KEY_OSMNX_OSMID]) - ) - + _osm.KEY_OSMNX_GEOMETRY: [Point(152, 546)], + }, + index=MultiIndex.from_tuples( + [("node", "badfnbjiadbnd")], + names=[_osm.KEY_OSMNX_ELEMENT_TYPE, _osm.KEY_OSMNX_OSMID], + ), + ) + verify_osmnx_gdf(gdf_example) - + # single node - + _latitude = 23 _longitude = -12 - + gdf_single = gis_utils.create_node_geodataframe( - longitudes=(_longitude,), - latitudes=(_latitude,) - ) - + longitudes=(_longitude,), latitudes=(_latitude,) + ) + verify_osmnx_gdf(gdf_single) - + # single node, using a specific key - - mynodekey = 'mynodekeyishere' - + + mynodekey = "mynodekeyishere" + gdf_single = gis_utils.create_node_geodataframe( - longitudes=(_longitude,), - latitudes=(_latitude,), - osmids=(mynodekey,) - ) - + longitudes=(_longitude,), latitudes=(_latitude,), osmids=(mynodekey,) + ) + verify_osmnx_gdf(gdf_single) - + assert gdf_single.index[0][1] == mynodekey - + # single node, with extra columns - + gdf_single = gis_utils.create_node_geodataframe( - longitudes=(_longitude,), + longitudes=(_longitude,), latitudes=(_latitude,), osmids=(mynodekey,), - long=(_longitude,), - lat=(_latitude,) - ) - - verify_osmnx_gdf(gdf_single, - extra_column_names=('long','lat')) - + long=(_longitude,), + lat=(_latitude,), + ) + + verify_osmnx_gdf(gdf_single, extra_column_names=("long", "lat")) + assert gdf_single.index[0][1] == mynodekey - assert gdf_single.iloc[0]['long'] == _longitude - assert gdf_single.iloc[0]['lat'] == _latitude - - #************************************************************************** - - # multiple nodes - - _latitudes = (23,45,73) - _longitudes = (-12,33,24) - + assert gdf_single.iloc[0]["long"] == _longitude + assert gdf_single.iloc[0]["lat"] == _latitude + + # ************************************************************************** + + # multiple nodes + + _latitudes = (23, 45, 73) + _longitudes = (-12, 33, 24) + gdf_multi = gis_utils.create_node_geodataframe( - longitudes=_longitudes, - latitudes=_latitudes - ) - + longitudes=_longitudes, latitudes=_latitudes + ) + verify_osmnx_gdf(gdf_multi) - + # multiple nodes and specific keys - - _osmids = (54,'a4h4',44.323) - + + _osmids = (54, "a4h4", 44.323) + gdf_multi = gis_utils.create_node_geodataframe( - longitudes=_longitudes, - latitudes=_latitudes, - osmids=_osmids - ) - + longitudes=_longitudes, latitudes=_latitudes, osmids=_osmids + ) + verify_osmnx_gdf(gdf_multi) - + for i in range(len(gdf_multi)): - assert gdf_multi.index[i][1] == _osmids[i] - + # multiple nodes and extra columns - + gdf_multi = gis_utils.create_node_geodataframe( - longitudes=_longitudes, + longitudes=_longitudes, latitudes=_latitudes, osmids=_osmids, - long=_longitudes, - lat=_latitudes - ) - - verify_osmnx_gdf(gdf_multi, - extra_column_names=('long','lat')) - + long=_longitudes, + lat=_latitudes, + ) + + verify_osmnx_gdf(gdf_multi, extra_column_names=("long", "lat")) + for i in range(len(gdf_multi)): - assert gdf_multi.index[i][1] == _osmids[i] - assert gdf_multi.iloc[i]['long'] == _longitudes[i] - assert gdf_multi.iloc[i]['lat'] == _latitudes[i] - - #************************************************************************** - + assert gdf_multi.iloc[i]["long"] == _longitudes[i] + assert gdf_multi.iloc[i]["lat"] == _latitudes[i] + + # ************************************************************************** + # trigger errors - + # mismatched longitudes and latitudes - + error_triggered = False try: _ = gis_utils.create_node_geodataframe( - longitudes=(_longitude,528), - latitudes=(_latitude,) - ) + longitudes=(_longitude, 528), latitudes=(_latitude,) + ) except ValueError: error_triggered = True assert error_triggered - + # mismatched longitudes/latitudes and osmids - + error_triggered = False try: _ = gis_utils.create_node_geodataframe( - longitudes=(_longitude,528), - latitudes=(_latitude,92), - osmids=(59,482,135) - ) + longitudes=(_longitude, 528), + latitudes=(_latitude, 92), + osmids=(59, 482, 135), + ) except ValueError: error_triggered = True assert error_triggered - + # ************************************************************************* # ************************************************************************* - + # TODO: test plotting using cached data - + # ************************************************************************* # ************************************************************************* - + # test writing a GeoDataFrame with containers - - def example_io_geodataframe(self, - preserve_original_gdf: bool = True, - identify_columns: bool = False, - file_extension: str = '.gpkg'): - - #************************************************************************** - #************************************************************************** - - filename = 'tests/mygdffile'+file_extension - + + def example_io_geodataframe( + self, + preserve_original_gdf: bool = True, + identify_columns: bool = False, + file_extension: str = ".gpkg", + ): + # ************************************************************************** + # ************************************************************************** + + filename = "tests/mygdffile" + file_extension + # print('bing') # print('preserve_original_gdf:'+str(preserve_original_gdf)) # print('identify_columns:'+str(identify_columns)) # print('file_extension:'+str(file_extension)) - + def verify_gdf_conformity(gdf, new_gdf, preserve_original_gdf): - # verify conformity # print(gdf) # print(new_gdf) - + # for each column in the original gdf - + for column in gdf.columns: - # assert that the column exists or that it is a merged 1 (no need) - - assert (column in new_gdf.columns or gis_utils.RKW_GPKG == column) - + + assert column in new_gdf.columns or gis_utils.RKW_GPKG == column + # packed column - + if gis_utils.RKW_GPKG == column: - # duplicates - + # if the original was preserved, there should be no packed col. # hence, it cannot have been preserved - + assert not preserve_original_gdf - + # for each key in the packed column # print(gdf.columns) # print(gdf[column]) - + for index in gdf.index: - - contents_dict = literal_eval(gdf.loc[(index,column)]) - + contents_dict = literal_eval(gdf.loc[(index, column)]) + for new_gdf_column in contents_dict.keys(): - assert new_gdf_column in new_gdf.columns # print(new_gdf_column) # print("......................................................") @@ -1363,41 +1411,32 @@ class TestGisUtils: # print(type(new_gdf.loc[(index, new_gdf_column)])) # print(repr(contents_dict[new_gdf_column])) # print(repr(new_gdf.loc[(index, new_gdf_column)])) - + if new_gdf_column in special_columns: - # the contents are containers: use literal_eval - + assert repr( - literal_eval( - contents_dict[new_gdf_column] - ) - ) == repr( - new_gdf.loc[(index, new_gdf_column)] - ) - - else: # the contents are not containers - + literal_eval(contents_dict[new_gdf_column]) + ) == repr(new_gdf.loc[(index, new_gdf_column)]) + + else: # the contents are not containers # direct comparison # TODO: reach this statement - assert repr(contents_dict[new_gdf_column] - ) == repr( - new_gdf.loc[(index, new_gdf_column)] - ) - + assert repr(contents_dict[new_gdf_column]) == repr( + new_gdf.loc[(index, new_gdf_column)] + ) + continue - - #****************************************************************** - #****************************************************************** - + + # ****************************************************************** + # ****************************************************************** + # non-packed column - + for index in gdf.index: - if preserve_original_gdf: - # the original gdf has been preserved - + # print("......................................................") # print(gdf[column].dtype) # print(new_gdf[column].dtype) @@ -1407,38 +1446,32 @@ class TestGisUtils: # print(type(new_gdf.loc[(index, column)])) # print(repr(gdf.loc[(index, column)])) # print(repr(new_gdf.loc[(index, column)])) - + # the types should match - - assert type( - gdf.loc[(index, column)] - ) == type( - new_gdf.loc[(index, column)] - ) - + + assert type(gdf.loc[(index, column)]) == type( + new_gdf.loc[(index, column)] + ) + # sets require special behaviour - - if (type(gdf.loc[(index, column)]) == set or - column == gis_utils.KEY_GPD_GEOMETRY): - - # sets are non-ordered: + + if ( + type(gdf.loc[(index, column)]) == set + or column == gis_utils.KEY_GPD_GEOMETRY + ): + # sets are non-ordered: # repr() may reveal different results - + assert ( - gdf.loc[(index, column)] == + gdf.loc[(index, column)] == new_gdf.loc[(index, column)] + ) + + else: # standard + assert repr(gdf.loc[(index, column)]) == repr( new_gdf.loc[(index, column)] - ) - - else: # standard - - assert repr( - gdf.loc[(index, column)] - ) == repr( - new_gdf.loc[(index, column)] - ) - - else: # the original gdf has not been preserved - + ) + + else: # the original gdf has not been preserved # print("......................................................") # print(gdf.columns) # print(gdf[column].dtype) @@ -1449,528 +1482,499 @@ class TestGisUtils: # print(type(new_gdf[column].loc[index])) # print(repr(gdf[column].loc[index])) # print(repr(new_gdf[column].loc[index])) - + if column == gis_utils.KEY_GPD_GEOMETRY: - # assert ( - # gdf[column].loc[index] == + # gdf[column].loc[index] == # new_gdf[column].loc[index] # ) - + assert ( - gdf.loc[(index, column)] == - new_gdf.loc[(index, column)] - ) - + gdf.loc[(index, column)] == new_gdf.loc[(index, column)] + ) + elif column in special_columns: - - assert repr( - literal_eval(gdf.loc[(index, column)]) - ) == repr( - new_gdf.loc[(index, column)] - ) - + assert repr(literal_eval(gdf.loc[(index, column)])) == repr( + new_gdf.loc[(index, column)] + ) + else: - - assert repr( - gdf.loc[(index, column)] - ) == repr( - new_gdf.loc[(index, column)] - ) - - #************************************************************************** - #************************************************************************** - + assert repr(gdf.loc[(index, column)]) == repr( + new_gdf.loc[(index, column)] + ) + + # ************************************************************************** + # ************************************************************************** + # TODO: test methods without specifying the columns - - #************************************************************************** - #************************************************************************** - + + # ************************************************************************** + # ************************************************************************** + # gdf object with simple index, undeclared - + gdf = GeoDataFrame( - {'id': [1, 2, 3], - 'mymymy': [None,None,1.23], - 'another_id': [53.4,54.4,55.4], - 'not_another_id': ['you','they','us'], - 'column_of_lists': [list([0,1,2]), - list([3,4,5]), - list([6,7,8])], - 'column_of_tuples': [tuple([-1,-2,-3]), - tuple([-4,-5,-6]), - tuple([-7,-8,-9])], - 'column_of_sets': [set([-1,-2,-3]), - set([-4,-5,-6]), - set([-7,-8,-9])], - 'column_of_dicts': [{1:34,6:'a',5:46.32}, - {'a':575,4:[],3:(2,3)}, - {(4,5):3,4:{2:5},3:4}], - 'column_of_strs': ["set([-1,-2,-3])", - "set([-4,-5,-6])", - "set([-7,-8,-9])"], - 'another_id2': ['hello',53.4,None], # requires special handling - 'another_id3': [53.4,'hello',None], # requires special handling - 'yet_another_id': [None,None,None], # requires special handling - }, - geometry=[LineString([(3, 2), (7, 7)]), - LineString([(3, 7), (7, 2)]), - LineString([(6, 2), (6, 6)])] - ) - + { + "id": [1, 2, 3], + "mymymy": [None, None, 1.23], + "another_id": [53.4, 54.4, 55.4], + "not_another_id": ["you", "they", "us"], + "column_of_lists": [list([0, 1, 2]), list([3, 4, 5]), list([6, 7, 8])], + "column_of_tuples": [ + tuple([-1, -2, -3]), + tuple([-4, -5, -6]), + tuple([-7, -8, -9]), + ], + "column_of_sets": [ + set([-1, -2, -3]), + set([-4, -5, -6]), + set([-7, -8, -9]), + ], + "column_of_dicts": [ + {1: 34, 6: "a", 5: 46.32}, + {"a": 575, 4: [], 3: (2, 3)}, + {(4, 5): 3, 4: {2: 5}, 3: 4}, + ], + "column_of_strs": [ + "set([-1,-2,-3])", + "set([-4,-5,-6])", + "set([-7,-8,-9])", + ], + "another_id2": ["hello", 53.4, None], # requires special handling + "another_id3": [53.4, "hello", None], # requires special handling + "yet_another_id": [None, None, None], # requires special handling + }, + geometry=[ + LineString([(3, 2), (7, 7)]), + LineString([(3, 7), (7, 2)]), + LineString([(6, 2), (6, 6)]), + ], + ) + # identify the columns that require special treatment - + if identify_columns: - # find the columns automatically - - special_columns = None # TODO: reach this statement - + + special_columns = None # TODO: reach this statement + else: - special_columns = ( - 'column_of_lists', - 'column_of_tuples', - 'column_of_sets', - 'column_of_dicts', + "column_of_lists", + "column_of_tuples", + "column_of_sets", + "column_of_dicts", #'column_of_strs' # can be omitted - 'another_id2', - 'another_id3', - 'yet_another_id' - ) - + "another_id2", + "another_id3", + "yet_another_id", + ) + # find the columns automatically - + set_packable_columns = gis_utils.find_gpkg_packable_columns(gdf) - + # make sure the columns can be identified - + for packable_column in set_packable_columns: - assert packable_column in special_columns - + # write file - + gis_utils.write_gdf_file( gdf=gdf, filename=filename, columns_to_pack=special_columns, - preserve_original=preserve_original_gdf - ) - + preserve_original=preserve_original_gdf, + ) + new_gdf = gis_utils.read_gdf_file( - filename=filename, - packed_columns=special_columns) - + filename=filename, packed_columns=special_columns + ) + # verify conformity - + verify_gdf_conformity(gdf, new_gdf, preserve_original_gdf) - - #************************************************************************** - #************************************************************************** - + + # ************************************************************************** + # ************************************************************************** + # gdf object with simple index, declared - + gdf = GeoDataFrame( - {'id': [1, 2, 3], - 'column_of_lists': [list([0,1,2]), - list([3,4,5]), - list([6,7,8])], - 'column_of_tuples': [tuple([-1,-2,-3]), - tuple([-4,-5,-6]), - tuple([-7,-8,-9])], - 'column_of_sets': [set([-1,-2,-3]), - set([-4,-5,-6]), - set([-7,-8,-9])], - 'column_of_dicts': [{1:34,6:'a',5:46.32}, - {'a':575,4:[],3:(2,3)}, - {(4,5):3,4:{2:5},3:4}], - 'column_of_strs': ["set([-1,-2,-3])", - "set([-4,-5,-6])", - "set([-7,-8,-9])"] - }, - geometry=[LineString([(3, 2), (7, 7)]), - LineString([(3, 7), (7, 2)]), - LineString([(6, 2), (6, 6)])], - index=['a','b','c'], # index is declared - ) - + { + "id": [1, 2, 3], + "column_of_lists": [list([0, 1, 2]), list([3, 4, 5]), list([6, 7, 8])], + "column_of_tuples": [ + tuple([-1, -2, -3]), + tuple([-4, -5, -6]), + tuple([-7, -8, -9]), + ], + "column_of_sets": [ + set([-1, -2, -3]), + set([-4, -5, -6]), + set([-7, -8, -9]), + ], + "column_of_dicts": [ + {1: 34, 6: "a", 5: 46.32}, + {"a": 575, 4: [], 3: (2, 3)}, + {(4, 5): 3, 4: {2: 5}, 3: 4}, + ], + "column_of_strs": [ + "set([-1,-2,-3])", + "set([-4,-5,-6])", + "set([-7,-8,-9])", + ], + }, + geometry=[ + LineString([(3, 2), (7, 7)]), + LineString([(3, 7), (7, 2)]), + LineString([(6, 2), (6, 6)]), + ], + index=["a", "b", "c"], # index is declared + ) + # identify the columns that require special treatment - + if identify_columns: - # find the columns automatically - - special_columns = None # TODO: reach this statement - + + special_columns = None # TODO: reach this statement + else: - special_columns = ( - 'column_of_lists', - 'column_of_tuples', - 'column_of_sets', - 'column_of_dicts', - 'column_of_strs' - ) - + "column_of_lists", + "column_of_tuples", + "column_of_sets", + "column_of_dicts", + "column_of_strs", + ) + # find the columns automatically - + set_packable_columns = gis_utils.find_gpkg_packable_columns(gdf) - + # make sure the columns can be identified - + for packable_column in set_packable_columns: - assert packable_column in special_columns - + # write file - + gis_utils.write_gdf_file( gdf=gdf, filename=filename, columns_to_pack=special_columns, - preserve_original=preserve_original_gdf - ) - + preserve_original=preserve_original_gdf, + ) + new_gdf = gis_utils.read_gdf_file( - filename=filename, - packed_columns=special_columns, - index='index') # index has to be specified - + filename=filename, packed_columns=special_columns, index="index" + ) # index has to be specified + # verify conformity - + verify_gdf_conformity(gdf, new_gdf, preserve_original_gdf) - - #************************************************************************** - #************************************************************************** - + + # ************************************************************************** + # ************************************************************************** + # gdf object with multi-index, declared - + gdf = gis_utils.GeoDataFrame( { - 'other_column': [1,2,3], - 'column_a': ['nadbnppadfb','agasdgnp','adfgdn'], - 'column_b': [12517.4247,0.54673,0.3723], - 'column_c': [(1,2,3,4),(5,6,7,8),(44,1247)], - 'column_d': [{'beans':'cheese','lollipops':'dentist'},{},{1:3}], - 'column_e': [[1,2,3],[4.5,4.6,4.7],[9.0,10.0,11.0]], - 'column_f': [{4,5,6},{5.64435,0.7545,1.4634},{'a','b','c'}], - 'geometry': [Point(12, 55), Point(2,4), Point(3,6)], - }, - index=MultiIndex.from_tuples([('a', 124), - ('b', 754), - ('c', 234)], - names=['index1', 'index2']) - ) - + "other_column": [1, 2, 3], + "column_a": ["nadbnppadfb", "agasdgnp", "adfgdn"], + "column_b": [12517.4247, 0.54673, 0.3723], + "column_c": [(1, 2, 3, 4), (5, 6, 7, 8), (44, 1247)], + "column_d": [{"beans": "cheese", "lollipops": "dentist"}, {}, {1: 3}], + "column_e": [[1, 2, 3], [4.5, 4.6, 4.7], [9.0, 10.0, 11.0]], + "column_f": [{4, 5, 6}, {5.64435, 0.7545, 1.4634}, {"a", "b", "c"}], + "geometry": [Point(12, 55), Point(2, 4), Point(3, 6)], + }, + index=MultiIndex.from_tuples( + [("a", 124), ("b", 754), ("c", 234)], names=["index1", "index2"] + ), + ) + # identify the columns that require special treatment - + if identify_columns: - # find the columns automatically - - special_columns = None # TODO: reach this statement - + + special_columns = None # TODO: reach this statement + else: - - special_columns = ( - 'column_c', - 'column_d', - 'column_e', - 'column_f' - ) - + special_columns = ("column_c", "column_d", "column_e", "column_f") + # find the columns automatically - + set_packable_columns = gis_utils.find_gpkg_packable_columns(gdf) - + # make sure the columns can be identified - + for packable_column in set_packable_columns: - assert packable_column in special_columns - + # write file - + gis_utils.write_gdf_file( gdf=gdf, filename=filename, columns_to_pack=special_columns, - preserve_original=preserve_original_gdf - ) - + preserve_original=preserve_original_gdf, + ) + new_gdf = gis_utils.read_gdf_file( filename=filename, packed_columns=special_columns, - index=['index1', 'index2']) - + index=["index1", "index2"], + ) + # verify conformity - + verify_gdf_conformity(gdf, new_gdf, preserve_original_gdf) - - #************************************************************************** - #************************************************************************** - + + # ************************************************************************** + # ************************************************************************** + # gdf with column names matching in lower case (not good for .gpkg files) - + gdf = GeoDataFrame( - {'id': [1, 2, 3], - 'mymymy': [901.1,53.4,None], - 'another_id': [53.4,54.4,55.4], - 'not_another_id': ['you','they','us'], - 'abc': [list([0,1,2]), - list([3,4,5]), - list([6,7,8])], - 'ABC': [tuple([-1,-2,-3]), - tuple([-4,-5,-6]), - tuple([-7,-8,-9])], - 'Abc': ['here', - 'there', - 'nowhere'], - 'aBc': [(1,2,3,4), - [5,6,7,8], - {9,10,11,12}], - 'aBC': [53.643, - {3:6,7:'goodbye'}, - None], - 'ABc': [None, - None, - None], - 'mymymy2': ['hello',53.4,None], # requires special handling - 'yet_another_id': [None,None,None], # requires special handling - }, - geometry=[LineString([(3, 2), (7, 7)]), - LineString([(3, 7), (7, 2)]), - LineString([(6, 2), (6, 6)])] - ) - + { + "id": [1, 2, 3], + "mymymy": [901.1, 53.4, None], + "another_id": [53.4, 54.4, 55.4], + "not_another_id": ["you", "they", "us"], + "abc": [list([0, 1, 2]), list([3, 4, 5]), list([6, 7, 8])], + "ABC": [tuple([-1, -2, -3]), tuple([-4, -5, -6]), tuple([-7, -8, -9])], + "Abc": ["here", "there", "nowhere"], + "aBc": [(1, 2, 3, 4), [5, 6, 7, 8], {9, 10, 11, 12}], + "aBC": [53.643, {3: 6, 7: "goodbye"}, None], + "ABc": [None, None, None], + "mymymy2": ["hello", 53.4, None], # requires special handling + "yet_another_id": [None, None, None], # requires special handling + }, + geometry=[ + LineString([(3, 2), (7, 7)]), + LineString([(3, 7), (7, 2)]), + LineString([(6, 2), (6, 6)]), + ], + ) + # identify the columns that require special treatment - + if identify_columns: - # find the columns automatically - - special_columns = None # TODO: reach this statement - + + special_columns = None # TODO: reach this statement + else: - special_columns = ( - 'abc', - 'ABC', - 'Abc', # no containers but has the same lowercase name as others - 'aBc', - 'aBC', - 'ABc', + "abc", + "ABC", + "Abc", # no containers but has the same lowercase name as others + "aBc", + "aBC", + "ABc", # special cases - 'mymymy2', - 'yet_another_id' - ) - + "mymymy2", + "yet_another_id", + ) + # find the columns automatically - + set_packable_columns = gis_utils.find_gpkg_packable_columns(gdf) - + # make sure the columns can be identified - + for packable_column in set_packable_columns: - assert packable_column in special_columns - + # write file - + gis_utils.write_gdf_file( gdf=gdf, filename=filename, columns_to_pack=special_columns, - preserve_original=preserve_original_gdf - ) - + preserve_original=preserve_original_gdf, + ) + new_gdf = gis_utils.read_gdf_file( - filename=filename, - packed_columns=special_columns) - + filename=filename, packed_columns=special_columns + ) + # verify conformity - + verify_gdf_conformity(gdf, new_gdf, preserve_original_gdf) - - #************************************************************************** - #************************************************************************** - + + # ************************************************************************** + # ************************************************************************** + # TODO: force the methods to throw errors with non-primitive types - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_gpkg_write_errors(self): - - filename_gpkg: str = 'test.gpkg' - + filename_gpkg: str = "test.gpkg" + type_status = { int: True, str: True, float: True, - bytes: False, - dict: True, # works but comes out incorrectly + bytes: False, + dict: True, # works but comes out incorrectly set: False, - tuple : False, - list : False, - type(None): True # works but comes out incorrectly - } - + tuple: False, + list: False, + type(None): True, # works but comes out incorrectly + } + for a_type, a_status in type_status.items(): - if a_type == int: - - data = [1,2] - + data = [1, 2] + elif a_type == str: - - data = ['hello','goodbye'] - + data = ["hello", "goodbye"] + elif a_type == float: - - data = [3.4,6.7] - + data = [3.4, 6.7] + elif a_type == bytes: - - data = [b'\x04\x00',b'\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00'] - + data = [b"\x04\x00", b"\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00"] + elif a_type == dict: - - data = [{0:1},{'a':53.46}] - + data = [{0: 1}, {"a": 53.46}] + elif a_type == set: - - data = [{0,1},{'a',53.46}] - + data = [{0, 1}, {"a", 53.46}] + elif a_type == tuple: - - data = [(0,1),('a',53.46)] - + data = [(0, 1), ("a", 53.46)] + elif a_type == list: - - data = [list((0,1)),list(('a',53.46))] - + data = [list((0, 1)), list(("a", 53.46))] + elif a_type == type(None): - data = [None, None] - + # create gdf - + gdf = GeoDataFrame( { - 'data': data, - 'geometry': [Point(1, 2), Point(3,4)], - }, - index=['a','b'] - ) - + "data": data, + "geometry": [Point(1, 2), Point(3, 4)], + }, + index=["a", "b"], + ) + # verify the status - + if a_status: - # compatible: no errors are expected - + gdf.to_file(filename_gpkg) - - else: # incompatible: errors are expected - + + else: # incompatible: errors are expected error_triggered = False try: gdf.to_file(filename_gpkg) except Exception: error_triggered = True assert error_triggered - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + def test_discrete_plot_gdf(self): - # ********************************************************************* # ********************************************************************* - + G = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', + (55.71654, 9.11728), + network_type="drive", custom_filter='["highway"~"residential|tertiary|unclassified|service"]', - truncate_by_edge=True - ) - + truncate_by_edge=True, + ) + # ********************************************************************* # ********************************************************************* - - gdf = ox.utils_graph.graph_to_gdfs(G, edges=False) # nodes only - + + gdf = ox.utils_graph.graph_to_gdfs(G, edges=False) # nodes only + # ********************************************************************* # ********************************************************************* - + # add add random discrete element to gdf - - column = 'discrete_category_column' - - offset = random.randint(0,int(1e3)) - + + column = "discrete_category_column" + + offset = random.randint(0, int(1e3)) + number_options = 10 - + category = { - idx: random.randint(0,number_options-1)+offset - for idx in gdf.index - } - + idx: random.randint(0, number_options - 1) + offset for idx in gdf.index + } + set_categories = set(category.values()) - - category_to_label = { - cat: 'label for '+str(cat) - for cat in set_categories - } - + + category_to_label = {cat: "label for " + str(cat) for cat in set_categories} + # create column - + gdf[column] = Series(data=category, index=gdf.index) - + # ********************************************************************* # ********************************************************************* - + gis_utils.plot_discrete_attributes( - gdf, - column=column, - category_to_label=category_to_label - ) - + gdf, column=column, category_to_label=category_to_label + ) + # ********************************************************************* # ********************************************************************* - + + # ************************************************************************* # ************************************************************************* - # ************************************************************************* - + def test_convert_edge_path(self): - # create network - + network = nx.MultiDiGraph() - + # define and add edges - + list_edges = [ - (0, 1), (1, 1), (1, 2), (2, 3), - (3, 4), (4, 5), (5, 6), - (7, 6), (8, 7), (9, 8), - (6, 7), (7, 8), (8, 9) + (0, 1), + (1, 1), + (1, 2), + (2, 3), + (3, 4), + (4, 5), + (5, 6), + (7, 6), + (8, 7), + (9, 8), + (6, 7), + (7, 8), + (8, 9), ] - + network.add_edges_from(list_edges) - + # ********************************************************************* # ********************************************************************* - + allow_reversed_edges = False - + edge_paths = [ [(0, 1)], [(0, 1), (1, 2)], @@ -1979,49 +1983,47 @@ class TestGisUtils: [(0, 1), (1, 1), (1, 2)], # self loop [(6, 7), (7, 8), (8, 9)], # should work ] - + expected_node_paths = [ [0, 1], [0, 1, 2], [1, 2, 3], [0, 1, 2, 3], [0, 1, 2], - [6, 7, 8, 9] + [6, 7, 8, 9], ] - + for edge_index, edge_path in enumerate(edge_paths): - - assert gis_utils.convert_edge_path( - network, - edge_path, - allow_reversed_edges=allow_reversed_edges) == expected_node_paths[ - edge_index] - + assert ( + gis_utils.convert_edge_path( + network, edge_path, allow_reversed_edges=allow_reversed_edges + ) + == expected_node_paths[edge_index] + ) + # invalid edge paths - + invalid_edge_paths = [ [(7, 6), (8, 7), (9, 8)], # all reversed, should fail [(7, 6), (7, 8), (8, 9)], # first reversed, should fail [(6, 7), (8, 7), (8, 9)], # second reversed, should fail - [(6, 7), (7, 8), (9, 8)] # third reversed, should fail + [(6, 7), (7, 8), (9, 8)], # third reversed, should fail ] for edge_path in invalid_edge_paths: error_raised = False try: gis_utils.convert_edge_path( - network, - edge_path, - allow_reversed_edges=allow_reversed_edges - ) + network, edge_path, allow_reversed_edges=allow_reversed_edges + ) except ValueError: error_raised = True - assert error_raised - + assert error_raised + # ********************************************************************* # ********************************************************************* - + allow_reversed_edges = True - + edge_paths = [ [(0, 1)], [(0, 1), (1, 2)], @@ -2032,9 +2034,9 @@ class TestGisUtils: [(7, 6), (8, 7), (9, 8)], # all reversed, should fail [(7, 6), (7, 8), (8, 9)], # first reversed, should fail [(6, 7), (8, 7), (8, 9)], # second reversed, should fail - [(6, 7), (7, 8), (9, 8)] # third reversed, should fail + [(6, 7), (7, 8), (9, 8)], # third reversed, should fail ] - + expected_node_paths = [ [0, 1], [0, 1, 2], @@ -2045,42 +2047,41 @@ class TestGisUtils: [6, 7, 8, 9], [6, 7, 8, 9], [6, 7, 8, 9], - [6, 7, 8, 9] + [6, 7, 8, 9], ] - + for edge_index, edge_path in enumerate(edge_paths): - - assert gis_utils.convert_edge_path( - network, - edge_path, - allow_reversed_edges=allow_reversed_edges) == expected_node_paths[ - edge_index] - + assert ( + gis_utils.convert_edge_path( + network, edge_path, allow_reversed_edges=allow_reversed_edges + ) + == expected_node_paths[edge_index] + ) + # ************************************************************************* # ************************************************************************* - + def test_get_directed(self): - # get a network - + network = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', + (55.71654, 9.11728), + network_type="drive", custom_filter='["highway"~"residential|tertiary|unclassified|service"]', - truncate_by_edge=True - ) - + truncate_by_edge=True, + ) + # convert to undirected undirected_network = ox.get_undirected(network) - + # convert to directed directed_network = gis_utils.get_directed(undirected_network) - + # make sure the same nodes exist on both objects for node_key in network.nodes(): assert node_key in directed_network.nodes() assert network.number_of_nodes() == directed_network.number_of_nodes() - + # assert that all edges on the directed network exist on the undirected one assert network.number_of_edges() >= directed_network.number_of_edges() # for each edge in the directed graph object @@ -2091,7 +2092,8 @@ class TestGisUtils: # found that has all the matching attributes and content edge_dict = directed_network.edges[edge_key] for other_edge_key in gis_iden.get_edges_from_a_to_b( - network, edge_key[0], edge_key[1]): + network, edge_key[0], edge_key[1] + ): # check all attributes number_matching_attributes = 0 for edge_attr, edge_data in edge_dict.items(): @@ -2104,30 +2106,30 @@ class TestGisUtils: # the attribute does not exist continue number_matching_attributes += 1 - + if number_matching_attributes == len(edge_dict): # a compatible edge was found, break break assert number_matching_attributes == len(edge_dict) - + # ************************************************************************* # ************************************************************************* - + def test_simplifying_graph(self): # get a network network = ox.graph_from_point( - (55.71654,9.11728), - network_type='drive', + (55.71654, 9.11728), + network_type="drive", custom_filter='["highway"~"residential|tertiary|unclassified|service"]', - truncate_by_edge=True - ) + truncate_by_edge=True, + ) # protect some nodes number_nodes_protected = 4 node_keys = tuple(network.nodes()) protected_nodes = [ - node_keys[random.randint(0,len(node_keys)-1)] + node_keys[random.randint(0, len(node_keys) - 1)] for i in range(number_nodes_protected) - ] + ] # try simplifying it gis_utils.simplify_network(network, protected_nodes) # TODO: verify the changes @@ -2135,9 +2137,10 @@ class TestGisUtils: # for node_key in protected_nodes: # assert network.has_node(node_key) # TODO: check if [335762579, 335762585, 1785975921, 360252989, 335762632, 335762579] is a path - + # ************************************************************************* # ************************************************************************* - + + +# ***************************************************************************** # ***************************************************************************** -# ***************************************************************************** \ No newline at end of file diff --git a/tests/test_solvers.py b/tests/test_solvers.py index 26f17e3..87e2da1 100644 --- a/tests/test_solvers.py +++ b/tests/test_solvers.py @@ -11,77 +11,72 @@ import random # ***************************************************************************** # ***************************************************************************** + class TestSolvers: - # ************************************************************************* # ************************************************************************* - + def test_solver_factory_arguments(self): - # test a collection of problems using different solvers - + problem = self.problem_milp_feasible() - + # solver settings - + solver_timelimit = 10 - + solver_abs_mip_gap = 0.001 - + solver_rel_mip_gap = 0.01 - + solver_options = { - 'time_limit':solver_timelimit, - 'relative_mip_gap':solver_rel_mip_gap, - 'absolute_mip_gap':solver_abs_mip_gap, + "time_limit": solver_timelimit, + "relative_mip_gap": solver_rel_mip_gap, + "absolute_mip_gap": solver_abs_mip_gap, # special option - 'tee': True - } - - solver_name = 'glpk' - - results, solver_interface = self.optimise( - solver_name, - solver_options, - problem) - + "tee": True, + } + + solver_name = "glpk" + + results, solver_interface = self.optimise(solver_name, solver_options, problem) + # ************************************************************************* # ************************************************************************* - + def test_problems(self): - # test a collection of problems using different solvers - - solver = 'scip' - #scip_exec_path = '/usr/bin/scip' - #solver_options = {'executable': scip_exec_path} + + solver = "scip" + # scip_exec_path = '/usr/bin/scip' + # solver_options = {'executable': scip_exec_path} solver_options = {} - + # solver = 'cplex' # # cplex_exec_path = '/home/pmlpm/Software/CPLEX/cplex/bin/x86-64_linux/cplex' # cplex_exec_path = '/home/pmlpm/CPLEX/cplex/bin/x86-64_linux/cplex' # #solver_options = {} # solver_options = {'executable':cplex_exec_path} - + list_solvers = [ - 'fake_solver', - 'cbc', - 'glpk', - 'scip', + "fake_solver", + "cbc", + "glpk", + "scip", #'cplex' - ] - + ] + list_solver_options = [ - None, # fake - None, # cbc - {'tee': False}, # glpk - None, # scip + None, # fake + None, # cbc + {"tee": False}, # glpk + None, # scip # cplex - #{'executable': cplex_exec_path}, - ] - + # {'executable': cplex_exec_path}, + ] + # list of problems - + list_concrete_models = [ self.problem_qp_optimal(), self.problem_qp_infeasible(), @@ -92,12 +87,12 @@ class TestSolvers: self.problem_milp_infeasible(), self.problem_milp_optimal(), self.problem_milp_feasible(), - self.problem_milp_feasible(15,64), - self.problem_milp_feasible(10,46) - ] - + self.problem_milp_feasible(15, 64), + self.problem_milp_feasible(10, 46), + ] + # list of problem types - + list_problem_types = [ SolverInterface.PROBLEM_QP, SolverInterface.PROBLEM_QP, @@ -109,11 +104,11 @@ class TestSolvers: SolverInterface.PROBLEM_MILP, SolverInterface.PROBLEM_MILP, SolverInterface.PROBLEM_MILP, - 'unknown_problem_type' - ] - - # expected - + "unknown_problem_type", + ] + + # expected + list_problem_termination_conditions = [ TerminationCondition.optimal, TerminationCondition.infeasible, @@ -123,11 +118,11 @@ class TestSolvers: TerminationCondition.unbounded, TerminationCondition.infeasible, TerminationCondition.optimal, - None, # if we don't know what to expect, - None, # if we don't know what to expect, - None, # if we don't know what to expect - ] - + None, # if we don't know what to expect, + None, # if we don't know what to expect, + None, # if we don't know what to expect + ] + list_problem_optimisation_sucess = [ True, True, @@ -139,598 +134,585 @@ class TestSolvers: True, True, True, - True - ] - + True, + ] + # list of solvers - - list_solvers = [ - 'fake_solver', - 'cbc', - 'glpk', - 'scip', - 'cplex'] - + + list_solvers = ["fake_solver", "cbc", "glpk", "scip", "cplex"] + # solver settings - + solver_timelimit = 10 - + solver_abs_mip_gap = 0.001 - + solver_rel_mip_gap = 0.01 - - for solver_name, solver_options in zip( - list_solvers, list_solver_options): - + + for solver_name, solver_options in zip(list_solvers, list_solver_options): if type(solver_options) == dict: - - solver_options.update({ - 'time_limit':solver_timelimit, - 'relative_mip_gap':solver_rel_mip_gap, - 'absolute_mip_gap':solver_abs_mip_gap - }) - + solver_options.update( + { + "time_limit": solver_timelimit, + "relative_mip_gap": solver_rel_mip_gap, + "absolute_mip_gap": solver_abs_mip_gap, + } + ) + else: - solver_options = { - 'time_limit':solver_timelimit, - 'relative_mip_gap':solver_rel_mip_gap, - 'absolute_mip_gap':solver_abs_mip_gap - } - + "time_limit": solver_timelimit, + "relative_mip_gap": solver_rel_mip_gap, + "absolute_mip_gap": solver_abs_mip_gap, + } + for problem_index, problem in enumerate(list_concrete_models): - try: - # check problem and solver compatibility - + problem_type = list_problem_types[problem_index] - - if SolverInterface.problem_and_solver_are_compatible( - solver_name, - problem_type) == False: - + + if ( + SolverInterface.problem_and_solver_are_compatible( + solver_name, problem_type + ) + == False + ): continue - + # optimise - + results, solver_interface = self.optimise( - solver_name, - solver_options, - problem, - print_solver_output=False - ) - + solver_name, solver_options, problem, print_solver_output=False + ) + except SolverInterface.UnknownSolverError: - continue - + except SolverInterface.UnknownProblemTypeError: - continue - + # ************************************************************* # ************************************************************* - + # termination condition - + exp_term_cond = list_problem_termination_conditions[problem_index] - + term_cond = results.solver.termination_condition - - if (exp_term_cond == None or - (solver_name == 'glpk' and - exp_term_cond == TerminationCondition.unbounded) or - (solver_name == 'cplex' and - exp_term_cond == TerminationCondition.unbounded) or - (solver_name == 'cplex' and - exp_term_cond == TerminationCondition.optimal) or - (solver_name == 'cplex' and - exp_term_cond == TerminationCondition.infeasible) - ): - + + if ( + exp_term_cond == None + or ( + solver_name == "glpk" + and exp_term_cond == TerminationCondition.unbounded + ) + or ( + solver_name == "cplex" + and exp_term_cond == TerminationCondition.unbounded + ) + or ( + solver_name == "cplex" + and exp_term_cond == TerminationCondition.optimal + ) + or ( + solver_name == "cplex" + and exp_term_cond == TerminationCondition.infeasible + ) + ): # exceptions in need of correction - + pass - + else: # print(solver_name) # print(results) assert exp_term_cond == term_cond - + # ************************************************************* # ************************************************************* - + # solver status - - if ((solver_name == 'glpk' and - term_cond == TerminationCondition.infeasible) or - (solver_name == 'cplex' and - term_cond == TerminationCondition.unknown) or - (solver_name == 'cplex' and - exp_term_cond == TerminationCondition.unbounded) or - (solver_name == 'cplex' and - exp_term_cond == TerminationCondition.infeasible) - ): - + + if ( + ( + solver_name == "glpk" + and term_cond == TerminationCondition.infeasible + ) + or ( + solver_name == "cplex" + and term_cond == TerminationCondition.unknown + ) + or ( + solver_name == "cplex" + and exp_term_cond == TerminationCondition.unbounded + ) + or ( + solver_name == "cplex" + and exp_term_cond == TerminationCondition.infeasible + ) + ): pass - + else: - # check if the solver status matches the one one would expect # if the termination condition was correct - + assert ( - TerminationCondition.to_solver_status( - term_cond - ) == results.solver.status - ) - + TerminationCondition.to_solver_status(term_cond) + == results.solver.status + ) + # if valid, it means the results object is coherent - + # ************************************************************* # ************************************************************* - - if (exp_term_cond == None or - (solver_name == 'glpk' and - exp_term_cond == TerminationCondition.unbounded) or - (solver_name == 'glpk' and - exp_term_cond == TerminationCondition.infeasible) or - (solver_name == 'cplex' and - exp_term_cond == TerminationCondition.unknown) or - (solver_name == 'cplex' and - exp_term_cond == TerminationCondition.unbounded) or - (solver_name == 'cplex' and - exp_term_cond == TerminationCondition.infeasible) - ): - + + if ( + exp_term_cond == None + or ( + solver_name == "glpk" + and exp_term_cond == TerminationCondition.unbounded + ) + or ( + solver_name == "glpk" + and exp_term_cond == TerminationCondition.infeasible + ) + or ( + solver_name == "cplex" + and exp_term_cond == TerminationCondition.unknown + ) + or ( + solver_name == "cplex" + and exp_term_cond == TerminationCondition.unbounded + ) + or ( + solver_name == "cplex" + and exp_term_cond == TerminationCondition.infeasible + ) + ): pass - + else: - # check if the solver status matches the one one would expect # if the termination condition predicted was obtained - + assert ( - TerminationCondition.to_solver_status( - exp_term_cond - ) == results.solver.status - ) - + TerminationCondition.to_solver_status(exp_term_cond) + == results.solver.status + ) + # if valid, the solver status is correct despite other issues - + # ************************************************************* # ************************************************************* - + # make sure the optimisation went as expected - + exp_optim_result = list_problem_optimisation_sucess[problem_index] - - if TerminationCondition.to_solver_status( + + if ( + TerminationCondition.to_solver_status( results.solver.termination_condition - ) != results.solver.status: - + ) + != results.solver.status + ): # this can be removed once the aforementioned issues have # been fixed (e.g. for the cplex and glpk solvers) - + pass - + else: - optim_result = solver_interface.was_optimisation_sucessful( - results, - problem_type) - + results, problem_type + ) + # ************************************************************* # ************************************************************* - - if (TerminationCondition.to_solver_status( + + if ( + TerminationCondition.to_solver_status( results.solver.termination_condition - ) != results.solver.status or - exp_term_cond == TerminationCondition.unbounded): - + ) + != results.solver.status + or exp_term_cond == TerminationCondition.unbounded + ): # this can be removed once the aforementioned issues have # been fixed (e.g. for the cplex and glpk solvers) - + pass - + else: - assert optim_result == exp_optim_result - + # ************************************************************* # ************************************************************* - + # test additional scenarios - + if optim_result == False: - continue - + # force unknown solver status error - - results.solver.status = 'false_solver_status' - + + results.solver.status = "false_solver_status" + try: - _ = solver_interface.was_optimisation_sucessful( - results, - problem_type) - + results, problem_type + ) + except solver_interface.UnknownSolverStatusError: - assert True - + # force unknown termination condition error - - results.solver.termination_condition = 'false_termin_condition' - + + results.solver.termination_condition = "false_termin_condition" + try: - _ = solver_interface.was_optimisation_sucessful( - results, - problem_type) - + results, problem_type + ) + except solver_interface.UnknownTerminationConditionError: - assert True - + # force an InconsistentSolverStatusError - + results.solver.termination_condition = TerminationCondition.optimal - + results.solver.status = TerminationCondition.to_solver_status( - results.solver.termination_condition) - + results.solver.termination_condition + ) + results.solver.termination_condition = TerminationCondition.unknown - + try: - _ = solver_interface.was_optimisation_sucessful( - results, - problem_type) - + results, problem_type + ) + except solver_interface.InconsistentSolverStatusError: - assert True - + # force an InconsistentProblemTypeAndSolverError - - if (problem_type == SolverInterface.PROBLEM_LP and - solver_name == 'glpk'): - + + if problem_type == SolverInterface.PROBLEM_LP and solver_name == "glpk": problem_type = SolverInterface.PROBLEM_QP - + try: - _ = solver_interface.was_optimisation_sucessful( - results, - problem_type) - + results, problem_type + ) + except solver_interface.InconsistentProblemTypeAndSolverError: - assert True - + # ********************************************************************* # ********************************************************************* - + # ************************************************************************* # ************************************************************************* - + # carry out optimisations - + def optimise( - self, - solver_name: str, - solver_options: dict, - #solver_interface: SolverInterface, - problem: pyo.ConcreteModel, - print_solver_output: bool = True): - + self, + solver_name: str, + solver_options: dict, + # solver_interface: SolverInterface, + problem: pyo.ConcreteModel, + print_solver_output: bool = True, + ): # configure common solver interface - - solver_interface = SolverInterface( - solver_name=solver_name, - **solver_options) - + + solver_interface = SolverInterface(solver_name=solver_name, **solver_options) + # get the solver handler - - solver_handler = solver_interface.get_solver_handler( - **solver_options) - + + solver_handler = solver_interface.get_solver_handler(**solver_options) + # solve - - if 'tee' not in solver_options: - - results = solver_handler.solve( - problem, - tee=print_solver_output) - + + if "tee" not in solver_options: + results = solver_handler.solve(problem, tee=print_solver_output) + else: - results = solver_handler.solve(problem) - + # return - + return results, solver_interface - + # ************************************************************************* # ************************************************************************* - + def problem_qp_optimal(self): - - model = pyo.ConcreteModel('qp_optimal') - + model = pyo.ConcreteModel("qp_optimal") + model.x = pyo.Var(within=pyo.NonNegativeReals) model.y = pyo.Var(within=pyo.NonNegativeReals) - + def constraint_rule(model): return model.x + model.y >= 10 + model.constraint = pyo.Constraint(rule=constraint_rule) - + def objective_rule(model): - return model.x + model.y + 0.5 * (model.x * model.x + 4 * model.x * model.y + 7 * model.y * model.y) + return ( + model.x + + model.y + + 0.5 + * (model.x * model.x + 4 * model.x * model.y + 7 * model.y * model.y) + ) + model.objective = pyo.Objective(rule=objective_rule, sense=pyo.minimize) - + return model - + # ************************************************************************* # ************************************************************************* - + def problem_qp_infeasible(self): - - model = pyo.ConcreteModel('qp_infeasible') - + model = pyo.ConcreteModel("qp_infeasible") + # model.x = pyo.Var(within=pyo.NonNegativeReals, bounds=(0,5)) # model.y = pyo.Var(within=pyo.NonNegativeReals, bounds=(0,4)) - - model.x = pyo.Var(bounds=(0,5)) - model.y = pyo.Var(bounds=(0,4)) - + + model.x = pyo.Var(bounds=(0, 5)) + model.y = pyo.Var(bounds=(0, 4)) + def constraint_rule(model): return model.x + model.y >= 10 + model.constraint = pyo.Constraint(rule=constraint_rule) - + def objective_rule(model): - return model.x + model.y + 0.5 * (model.x * model.x + 4 * model.x * model.y + 7 * model.y * model.y) + return ( + model.x + + model.y + + 0.5 + * (model.x * model.x + 4 * model.x * model.y + 7 * model.y * model.y) + ) + model.objective = pyo.Objective(rule=objective_rule, sense=pyo.minimize) - + return model - + # ************************************************************************* # ************************************************************************* - + def problem_lp_optimal(self): - - model = pyo.ConcreteModel('lp_optimal') - - model.x = pyo.Var([1,2], domain=pyo.NonNegativeReals) - - model.OBJ = pyo.Objective(expr = 2*model.x[1] + 3*model.x[2]) - - model.Constraint1 = pyo.Constraint(expr = 3*model.x[1] + 4*model.x[2] >= 1) - + model = pyo.ConcreteModel("lp_optimal") + + model.x = pyo.Var([1, 2], domain=pyo.NonNegativeReals) + + model.OBJ = pyo.Objective(expr=2 * model.x[1] + 3 * model.x[2]) + + model.Constraint1 = pyo.Constraint(expr=3 * model.x[1] + 4 * model.x[2] >= 1) + return model - + # ************************************************************************* # ************************************************************************* - + def problem_lp_infeasible(self): - - model = pyo.ConcreteModel('lp_infeasible') - - model.x = pyo.Var([1,2], domain=pyo.NonNegativeReals) - - model.OBJ = pyo.Objective(expr = 2*model.x[1] + 3*model.x[2]) - - model.Constraint1 = pyo.Constraint(expr = 3*model.x[1] + 4*model.x[2] <= -1) - + model = pyo.ConcreteModel("lp_infeasible") + + model.x = pyo.Var([1, 2], domain=pyo.NonNegativeReals) + + model.OBJ = pyo.Objective(expr=2 * model.x[1] + 3 * model.x[2]) + + model.Constraint1 = pyo.Constraint(expr=3 * model.x[1] + 4 * model.x[2] <= -1) + return model - + # ************************************************************************* # ************************************************************************* - + def problem_lp_unbounded(self): - - model = pyo.ConcreteModel('lp_unbounded') - - model.x = pyo.Var([1,2], domain=pyo.NonNegativeReals) - - model.OBJ = pyo.Objective(expr = 2*model.x[1] + 3*model.x[2], - sense=pyo.maximize) - - model.Constraint1 = pyo.Constraint(expr = 3*model.x[1] + 4*model.x[2] >= 1) - + model = pyo.ConcreteModel("lp_unbounded") + + model.x = pyo.Var([1, 2], domain=pyo.NonNegativeReals) + + model.OBJ = pyo.Objective( + expr=2 * model.x[1] + 3 * model.x[2], sense=pyo.maximize + ) + + model.Constraint1 = pyo.Constraint(expr=3 * model.x[1] + 4 * model.x[2] >= 1) + return model - + # ************************************************************************* # ************************************************************************* - + def problem_milp_optimal(self): - - model = pyo.ConcreteModel('milp_optimal') - - model.x = pyo.Var([1,2], domain=pyo.Binary) - - model.OBJ = pyo.Objective(expr = 2.15*model.x[1] + 3.8*model.x[2]) - - model.Constraint1 = pyo.Constraint(expr = 3*model.x[1] + 4*model.x[2] >= 1) - + model = pyo.ConcreteModel("milp_optimal") + + model.x = pyo.Var([1, 2], domain=pyo.Binary) + + model.OBJ = pyo.Objective(expr=2.15 * model.x[1] + 3.8 * model.x[2]) + + model.Constraint1 = pyo.Constraint(expr=3 * model.x[1] + 4 * model.x[2] >= 1) + return model - + # ************************************************************************* # ************************************************************************* - + def problem_milp_infeasible(self): - - model = pyo.ConcreteModel('milp_infeasible') - - model.x = pyo.Var([1,2], domain=pyo.Binary) - - model.OBJ = pyo.Objective(expr = 2*model.x[1] + 3*model.x[2]) - - model.Constraint1 = pyo.Constraint(expr = 3*model.x[1] + 4*model.x[2] <= -1) - + model = pyo.ConcreteModel("milp_infeasible") + + model.x = pyo.Var([1, 2], domain=pyo.Binary) + + model.OBJ = pyo.Objective(expr=2 * model.x[1] + 3 * model.x[2]) + + model.Constraint1 = pyo.Constraint(expr=3 * model.x[1] + 4 * model.x[2] <= -1) + return model - + # ************************************************************************* # ************************************************************************* - + def problem_milp_unbounded(self): - - model = pyo.ConcreteModel('milp_unbounded') - - model.x = pyo.Var([1,2], domain=pyo.NonNegativeReals) - + model = pyo.ConcreteModel("milp_unbounded") + + model.x = pyo.Var([1, 2], domain=pyo.NonNegativeReals) + model.y = pyo.Var(domain=pyo.Binary) - - model.OBJ = pyo.Objective(expr = 2*model.x[1] + 3*model.x[2] + model.y, - sense=pyo.maximize) - - model.Constraint1 = pyo.Constraint(expr = 3*model.x[1] + 4*model.x[2] >= 1) - + + model.OBJ = pyo.Objective( + expr=2 * model.x[1] + 3 * model.x[2] + model.y, sense=pyo.maximize + ) + + model.Constraint1 = pyo.Constraint(expr=3 * model.x[1] + 4 * model.x[2] >= 1) + return model - + # ************************************************************************* # ************************************************************************* - - def problem_milp_feasible( - self, - number_binary_variables = 25, - seed_number = None): - + + def problem_milp_feasible(self, number_binary_variables=25, seed_number=None): if seed_number != None: - random.seed(seed_number) - - model = pyo.ConcreteModel('milp_feasible') - + + model = pyo.ConcreteModel("milp_feasible") + # a knapsack-type problem - + model.Y = pyo.RangeSet(number_binary_variables) - - model.y = pyo.Var(model.Y, - domain=pyo.Binary) - + + model.y = pyo.Var(model.Y, domain=pyo.Binary) + model.OBJ = pyo.Objective( - expr = sum(model.y[j]*random.random() - for j in model.Y), - sense=pyo.maximize - ) - + expr=sum(model.y[j] * random.random() for j in model.Y), sense=pyo.maximize + ) + model.Constraint1 = pyo.Constraint( - expr = sum(model.y[j]*random.random() - for j in model.Y) <= round(number_binary_variables/5) - ) - + expr=sum(model.y[j] * random.random() for j in model.Y) + <= round(number_binary_variables / 5) + ) + def rule_c1(m, i): return ( - sum(model.y[j]*(random.random()-0.5) + sum( + model.y[j] * (random.random() - 0.5) for j in model.Y if j != i - if random.randint(0,1) - ) <= round(number_binary_variables/5)*model.y[i] + if random.randint(0, 1) ) - model.constr_c1 = pyo.Constraint( - model.Y, - rule=rule_c1) - + <= round(number_binary_variables / 5) * model.y[i] + ) + + model.constr_c1 = pyo.Constraint(model.Y, rule=rule_c1) + return model - + # ************************************************************************* # ************************************************************************* - + def test_inexistent_solver(self): - - fake_solver = 'fake_solver' - good_solver = 'glpk' + fake_solver = "fake_solver" + good_solver = "glpk" # solver_options: dict = None - + # try using a fake solver and a problem incompatible with another solver - + # list of problems: one compatible, one incompatible - + list_problems = [ self.problem_milp_feasible(20, seed_number=50), self.problem_lp_optimal(), self.problem_qp_optimal(), - self.problem_qp_optimal() - ] - + self.problem_qp_optimal(), + ] + # problem types - + list_problem_types = [ SolverInterface.PROBLEM_LP, SolverInterface.PROBLEM_LP, SolverInterface.PROBLEM_QP, - 'fake_problem_type' - ] - + "fake_problem_type", + ] + # list of solvers: one fake, one real - - list_solvers = [ - fake_solver, - good_solver] - + + list_solvers = [fake_solver, good_solver] + # solver settings - + solver_timelimit = 30 - + solver_abs_mip_gap = 0 - + solver_rel_mip_gap = 0.01 - + solver_options = { - 'time_limit':solver_timelimit, - 'relative_mip_gap':solver_rel_mip_gap, - 'absolute_mip_gap':solver_abs_mip_gap - } - + "time_limit": solver_timelimit, + "relative_mip_gap": solver_rel_mip_gap, + "absolute_mip_gap": solver_abs_mip_gap, + } + # ********************************************************************* # ********************************************************************* - + for solver_name in list_solvers: - for index, problem in enumerate(list_problems): - # optimise - + try: - # test problem-solver compatibility - + problem_type = list_problem_types[index] - - if SolverInterface.problem_and_solver_are_compatible( - solver_name, - problem_type) == False: - + + if ( + SolverInterface.problem_and_solver_are_compatible( + solver_name, problem_type + ) + == False + ): continue - + except SolverInterface.UnknownSolverError: - assert True - + except SolverInterface.UnknownProblemTypeError: - assert True - + # test the solver interface - + try: - # configure common solver interface - - _ = SolverInterface( - solver_name=solver_name, - **solver_options) - + + _ = SolverInterface(solver_name=solver_name, **solver_options) + except SolverInterface.UnknownSolverError: - assert True - #************************************************************************** - #************************************************************************** - -#****************************************************************************** -#****************************************************************************** \ No newline at end of file + # ************************************************************************** + # ************************************************************************** + + +# ****************************************************************************** +# ****************************************************************************** -- GitLab