diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 62c21ab2c..5cc3422c0 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -20,24 +20,26 @@ E.g. if a new rule becomes available describe how to use it `make test` and in o * Integrate RDIR into sector rules to store intermediate data in scenario folders `PR #1154 `__ -**Minor Changes and bug-fixing** - -* The default configuration for `electricity:estimate_renewable_capacities:year` was updated from 2020 to 2023. `PR #1106 `__ - * Include a dedicated cutout for North America in bundle_config.yaml `PR #1121 `__ * Include a dedicated cutout for Europe in bundle_config.yaml `PR #1125 `__ +* Include a dedicated cutout for Oceania in bundle_config.yaml `PR #1157 `__ + +* Use BASE_DIR in rules and `_helpers.py` script for facilitate module import in subworkflow `PR #1137 `__ + +* Enable sector rules import in subworkflow `PR #1178 `__ + +**Minor Changes and bug-fixing** + +* The default configuration for `electricity:estimate_renewable_capacities:year` was updated from 2020 to 2023. `PR #1106 `__ + * Fix the mismatch between buses and x, y locations while creating H2 Stores `PR #1134 `__ * Enable configfile specification for mock_snakemake `PR #1135 `__ * Fix pre-commit docformatter python issue. `PR #1153 `__ -* Use BASE_DIR in rules and `_helpers.py` script for facilitate module import in subworkflow `PR #1137 `__ - -* Include a dedicated cutout for Oceania in bundle_config.yaml `PR #1157 `__ - * Drop duplicate entries in `AL_production.csv` data used in `build_industry_demand` rule `PR #1143 `__ * The computation of `hydro_profile.nc` in `build_renewable_profiles.py` is not differentiated whether alternative clustering is applied or not; the indexing of the different power plants in `add_electricity.py` is performed according to the bus either in case alternative clustering is applied or not and a `hydro_inflow_factor` is computed prior to the computation of `inflow_t` to split the inflow according to the capacity of each different unit of each power plant (if more units are present). `PR #1119 `__ diff --git a/scripts/build_base_energy_totals.py b/scripts/build_base_energy_totals.py index e16f44898..4ac1f7889 100644 --- a/scripts/build_base_energy_totals.py +++ b/scripts/build_base_energy_totals.py @@ -19,7 +19,7 @@ import pandas as pd import py7zr import requests -from _helpers import aggregate_fuels, get_conv_factors +from _helpers import BASE_DIR, aggregate_fuels, get_conv_factors _logger = logging.getLogger(__name__) @@ -375,7 +375,7 @@ def calc_sector(sector): if snakemake.params.update_data: # Delete and existing files to avoid duplication and double counting - files = glob.glob("data/demand/unsd/data/*.txt") + files = glob.glob(os.path.join(BASE_DIR, "data/demand/unsd/data/*.txt")) for f in files: os.remove(f) @@ -385,12 +385,14 @@ def calc_sector(sector): with urlopen(zipurl) as zipresp: with ZipFile(BytesIO(zipresp.read())) as zfile: - zfile.extractall("data/demand/unsd/data") + zfile.extractall(os.path.join(BASE_DIR, "data/demand/unsd/data")) - path = "data/demand/unsd/data" + path = os.path.join(BASE_DIR, "data/demand/unsd/data") # Get the files from the path provided in the OP - all_files = list(Path("data/demand/unsd/data").glob("*.txt")) + all_files = list( + Path(os.path.join(BASE_DIR, "data/demand/unsd/data")).glob("*.txt") + ) # Create a dataframe from all downloaded files df = pd.concat( @@ -433,7 +435,9 @@ def calc_sector(sector): df_yr = df_yr[df_yr.country.isin(countries)] # Create an empty dataframe for energy_totals_base - energy_totals_cols = pd.read_csv("data/energy_totals_DF_2030.csv").columns + energy_totals_cols = pd.read_csv( + os.path.join(BASE_DIR, "data/energy_totals_DF_2030.csv") + ).columns energy_totals_base = pd.DataFrame(columns=energy_totals_cols, index=countries) # Lists that combine the different fuels in the dataset to the model's carriers diff --git a/scripts/build_industry_demand.py b/scripts/build_industry_demand.py index 7074e5e89..8617fb466 100644 --- a/scripts/build_industry_demand.py +++ b/scripts/build_industry_demand.py @@ -13,7 +13,7 @@ from itertools import product import pandas as pd -from _helpers import mock_snakemake, read_csv_nafix +from _helpers import BASE_DIR, mock_snakemake, read_csv_nafix _logger = logging.getLogger(__name__) @@ -69,8 +69,12 @@ def country_to_nodal(industrial_production, keys): ) industry_demand = pd.read_csv( - "data/custom/industry_demand_{0}_{1}.csv".format( - snakemake.wildcards["demand"], snakemake.wildcards["planning_horizons"] + os.path.join( + BASE_DIR, + "data/custom/industry_demand_{0}_{1}.csv".format( + snakemake.wildcards["demand"], + snakemake.wildcards["planning_horizons"], + ), ), index_col=[0, 1], ) @@ -204,7 +208,9 @@ def match_technology(df): geo_locs = match_technology(geo_locs).loc[countries_geo] aluminium_year = snakemake.params.aluminium_year - AL = read_csv_nafix("data/AL_production.csv", index_col=0) + AL = read_csv_nafix( + os.path.join(BASE_DIR, "data/AL_production.csv"), index_col=0 + ) # Filter data for the given year and countries AL_prod_tom = AL.query("Year == @aluminium_year and index in @countries_geo")[ "production[ktons/a]" diff --git a/scripts/copy_config.py b/scripts/copy_config.py index 780511d81..b7073c9fa 100644 --- a/scripts/copy_config.py +++ b/scripts/copy_config.py @@ -5,11 +5,15 @@ import os from shutil import copy +from _helpers import BASE_DIR + files_to_copy = { - "./config.yaml": "config.yaml", - "./Snakefile": "Snakefile", - "./scripts/solve_network.py": "solve_network.py", - "./scripts/prepare_sector_network.py": "prepare_sector_network.py", + os.path.join(BASE_DIR, "./config.yaml"): "config.yaml", + os.path.join(BASE_DIR, "./Snakefile"): "Snakefile", + os.path.join(BASE_DIR, "./scripts/solve_network.py"): "solve_network.py", + os.path.join( + BASE_DIR, "./scripts/prepare_sector_network.py" + ): "prepare_sector_network.py", } if __name__ == "__main__": diff --git a/scripts/prepare_energy_totals.py b/scripts/prepare_energy_totals.py index 119083f02..708420b27 100644 --- a/scripts/prepare_energy_totals.py +++ b/scripts/prepare_energy_totals.py @@ -17,7 +17,7 @@ import pandas as pd import py7zr import requests -from _helpers import read_csv_nafix, three_2_two_digits_country +from _helpers import BASE_DIR, read_csv_nafix, three_2_two_digits_country _logger = logging.getLogger(__name__) @@ -53,7 +53,9 @@ def calculate_end_values(df): investment_year = int(snakemake.wildcards.planning_horizons) demand_sc = snakemake.wildcards.demand # loading the demand scenrario wildcard - base_energy_totals = read_csv_nafix("data/energy_totals_base.csv", index_col=0) + base_energy_totals = read_csv_nafix( + os.path.join(BASE_DIR, "data/energy_totals_base.csv"), index_col=0 + ) growth_factors_cagr = read_csv_nafix( snakemake.input.growth_factors_cagr, index_col=0 ) diff --git a/scripts/prepare_gas_network.py b/scripts/prepare_gas_network.py index 59078803e..cbdcd120a 100644 --- a/scripts/prepare_gas_network.py +++ b/scripts/prepare_gas_network.py @@ -19,7 +19,12 @@ import matplotlib.colors as colors import matplotlib.pyplot as plt import pandas as pd -from _helpers import content_retrieve, progress_retrieve, two_2_three_digits_country +from _helpers import ( + BASE_DIR, + content_retrieve, + progress_retrieve, + two_2_three_digits_country, +) from build_shapes import gadm from matplotlib.lines import Line2D from pyproj import CRS @@ -58,8 +63,8 @@ def download_IGGIELGN_gas_network(): url = "https://zenodo.org/record/4767098/files/IGGIELGN.zip" # Save locations - zip_fn = Path("IGGIELGN.zip") - to_fn = Path("data/gas_network/scigrid-gas") + zip_fn = Path(os.path.join(BASE_DIR, "IGGIELGN.zip")) + to_fn = Path(os.path.join(BASE_DIR, "data/gas_network/scigrid-gas")) logger.info(f"Downloading databundle from '{url}'.") progress_retrieve(url, zip_fn) @@ -344,6 +349,7 @@ def download_GADM(country_code, update=False, out_logging=False): GADM_filename = get_GADM_filename(country_code) GADM_inputfile_gpkg = os.path.join( + BASE_DIR, "data", "gadm", GADM_filename, @@ -887,7 +893,9 @@ def check_existence(row): elif snakemake.params.gas_config["network_data"] == "IGGIELGN": download_IGGIELGN_gas_network() - gas_network = "data/gas_network/scigrid-gas/data/IGGIELGN_PipeSegments.geojson" + gas_network = os.path.join( + BASE_DIR, "data/gas_network/scigrid-gas/data/IGGIELGN_PipeSegments.geojson" + ) pipelines = load_IGGIELGN_data(gas_network) pipelines = prepare_IGGIELGN_data(pipelines) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index ac125b0f9..9a731fa53 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -16,6 +16,7 @@ import ruamel.yaml import xarray as xr from _helpers import ( + BASE_DIR, create_dummy_data, create_network_topology, cycling_shift, @@ -322,8 +323,11 @@ def add_hydrogen(n, costs): if snakemake.config["sector"]["hydrogen"]["underground_storage"]: if snakemake.config["custom_data"]["h2_underground"]: custom_cavern = pd.read_csv( - "data/custom/h2_underground_{0}_{1}.csv".format( - demand_sc, investment_year + os.path.join( + BASE_DIR, + "data/custom/h2_underground_{0}_{1}.csv".format( + demand_sc, investment_year + ), ) ) # countries = n.buses.country.unique().to_list() @@ -2661,9 +2665,12 @@ def add_residential(n, costs): def add_custom_water_cost(n): for country in countries: water_costs = pd.read_csv( - "resources/custom_data/{}_water_costs.csv".format(country), - sep=",", - index_col=0, + os.path.join( + BASE_DIR, + "resources/custom_data/{}_water_costs.csv".format(country), + sep=",", + index_col=0, + ) ) water_costs = water_costs.filter(like=country, axis=0).loc[spatial.nodes] electrolysis_links = n.links.filter(like=country, axis=0).filter( diff --git a/scripts/prepare_transport_data_input.py b/scripts/prepare_transport_data_input.py index cffa163e2..a15608adc 100644 --- a/scripts/prepare_transport_data_input.py +++ b/scripts/prepare_transport_data_input.py @@ -10,9 +10,7 @@ import country_converter as coco import numpy as np import pandas as pd - -# from _helpers import configure_logging - +from _helpers import BASE_DIR # logger = logging.getLogger(__name__) @@ -127,7 +125,7 @@ def download_CO2_emissions(): if vehicles_csv.empty or CO2_emissions_csv.empty: # In case one of the urls is not working, we can use the hard-coded data - src = os.getcwd() + "/data/temp_hard_coded/transport_data.csv" + src = BASE_DIR + "/data/temp_hard_coded/transport_data.csv" dest = snakemake.output.transport_data_input shutil.copy(src, dest) else: