diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 386255cd1..00884b873 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,25 +8,19 @@ repos: rev: v5.0.0 hooks: - id: check-merge-conflict - - id: end-of-file-fixer - - id: fix-encoding-pragma - - id: mixed-line-ending - - id: trailing-whitespace - id: check-added-large-files args: ["--maxkb=2000"] - # Sort package imports alphabetically -- repo: https://github.com/PyCQA/isort - rev: 5.13.2 +# Run ruff to lint and format +- repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.8.1 hooks: - - id: isort - args: ["--profile", "black", "--filter-files"] - - # Convert relative imports to absolute imports -- repo: https://github.com/MarcoGorelli/absolufy-imports - rev: v0.3.1 - hooks: - - id: absolufy-imports + # Run the linter. + - id: ruff + args: [--fix] + # Run the formatter. + - id: ruff-format # Find common spelling mistakes in comments and docstrings - repo: https://github.com/codespell-project/codespell @@ -37,37 +31,15 @@ repos: types_or: [python, rst, markdown] files: ^(scripts|doc)/ -# Make docstrings PEP 257 compliant -# Broken for pre-commit<=4.0.0 -# See https://github.com/PyCQA/docformatter/issues/293 -# - repo: https://github.com/PyCQA/docformatter -# rev: v1.7.5 -# hooks: -# - id: docformatter -# args: ["--in-place", "--make-summary-multi-line", "--pre-summary-newline"] - -- repo: https://github.com/keewis/blackdoc - rev: v0.3.9 - hooks: - - id: blackdoc - - # Formatting with "black" coding style -- repo: https://github.com/psf/black-pre-commit-mirror - rev: 24.10.0 - hooks: - # Format Python files - - id: black - # Format Jupyter Python notebooks - - id: black-jupyter - - # Remove output from Jupyter notebooks +# Remove output from Jupyter notebooks - repo: https://github.com/aflc/pre-commit-jupyter rev: v1.2.1 hooks: - id: jupyter-notebook-cleanup - args: ["--remove-kernel-metadata"] + args: ['--remove-kernel-metadata'] + exclude: examples/solve-on-remote.ipynb - # Do YAML formatting (before the linter checks it for misses) + # YAML formatting - repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks rev: v2.14.0 hooks: @@ -81,13 +53,6 @@ repos: hooks: - id: snakefmt - # For cleaning jupyter notebooks -- repo: https://github.com/aflc/pre-commit-jupyter - rev: v1.2.1 - hooks: - - id: jupyter-notebook-cleanup - exclude: examples/solve-on-remote.ipynb - # Check for FSFE REUSE compliance (licensing) - repo: https://github.com/fsfe/reuse-tool rev: v5.0.2 diff --git a/config/create_scenarios.py b/config/create_scenarios.py index cccc29bc7..c45c7522f 100644 --- a/config/create_scenarios.py +++ b/config/create_scenarios.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -7,7 +6,7 @@ # You can modify the template to your needs and define all possible combinations of config values that should be considered. if "snakemake" in globals(): - filename = snakemake.output[0] + filename = snakemake.output[0] # noqa: F821 else: filename = "../config/scenarios.yaml" diff --git a/data/custom_extra_functionality.py b/data/custom_extra_functionality.py index e7a9df0fc..c6e6a8a0c 100644 --- a/data/custom_extra_functionality.py +++ b/data/custom_extra_functionality.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: : 2023- The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT diff --git a/doc/conf.py b/doc/conf.py index 764a2bf85..13b73422d 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/doc/release_notes.rst b/doc/release_notes.rst index bcfba5855..e931365ce 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -109,7 +109,9 @@ Upcoming Release * Bugfix: Align the naming convention for the CO2 network configuration (from `co2network` to `co2_network`). This may be a small breaking change. -* Feature: The installation via `make install` now prioritizes mamba over conda for faster installation. Conda is still used as a fallback. The command `make install` now also supports passing the name of the environment, e.g. `make install name=my-project`. +* Development: The installation via `make install` now prioritizes mamba over conda for faster installation. Conda is still used as a fallback. The command `make install` now also supports passing the name of the environment, e.g. `make install name=my-project`. + +* Development: Ruff is now used for linting and formatting. It is used in the pre-commit, so no changes are needed. But you might wanna set it up in your IDE. * Update locations and capacities of ammonia plants. diff --git a/envs/environment.yaml b/envs/environment.yaml index ec2803505..bee59b65f 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -48,7 +48,6 @@ dependencies: - jpype1 - pyxlsb - graphviz -- pre-commit - geojson # Keep in conda environment when calling ipython @@ -60,6 +59,10 @@ dependencies: - rasterio==1.4.1 - libgdal-core<3.10.0 # rasterio>=1.4.2 needed for GDAL 3.10 +# Development dependencies +- pre-commit +- ruff + - pip: - gurobipy - highspy<1.8 diff --git a/ruff.toml b/ruff.toml new file mode 100644 index 000000000..2d75d2025 --- /dev/null +++ b/ruff.toml @@ -0,0 +1,53 @@ +# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: CC0-1.0 + +extend-include = ['*.ipynb'] + +[lint] +select = [ + 'F', # pyflakes + 'E', # pycodestyle: Error + 'W', # pycodestyle: Warning + 'I', # isort + 'D', # pydocstyle + 'UP', # pyupgrade + # 'ANN', # flake-8 annotations + 'TID', # flake8-tidy-imports + # 'NPY', # numpy + # 'RUF', # ruff +] + +ignore = [ + 'ANN401', # Dynamically typed expressions are forbidden + 'E712', # comparison to False should be 'if cond is False:' or 'if not cond:' + 'E741', # ambiguous variable names + 'D203', # 1 blank line required before class docstring + 'D212', # Multi-line docstring summary should start at the second line + 'D401', # First line should be in imperative mood + ] + + +[lint.per-file-ignores] +# pydocstyle ignores, which could be enabled in future when existing +# issues are fixed +"!**/{xxx.py}" = [ + 'E501', # line too long + 'D100', # Missing docstring in public module + 'D101', # Missing docstring in public class + 'D102', # Missing docstring in public method + 'D103', # Missing docstring in public function + 'D104', # Missing docstring in public package + 'D105', # Missing docstring in magic method + 'D107', # Missing docstring in __init__ + 'D200', # One-line docstring should fit on one line with quotes + 'D202', # No blank lines allowed after function docstring + 'D205', # 1 blank line required between summary line and description + 'D400', # First line should end with a period + 'D404', # First word of the docstring should not be "This + 'D413', # Missing blank line after last section + 'D415', # First line should end with a period, question mark, or exclamation point + 'D417', # Missing argument descriptions in the docstring + # Include once available + # https://github.com/astral-sh/ruff/issues/2310 + ] \ No newline at end of file diff --git a/scripts/__init__.py b/scripts/__init__.py index a0dd0632c..1769785c2 100644 --- a/scripts/__init__.py +++ b/scripts/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/_benchmark.py b/scripts/_benchmark.py index 31dc20125..a49774f50 100644 --- a/scripts/_benchmark.py +++ b/scripts/_benchmark.py @@ -1,10 +1,7 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT -""" """ -from __future__ import absolute_import, print_function import logging import os @@ -42,7 +39,7 @@ def __init__( self.timestamps = kw.pop("timestamps", True) self.include_children = kw.pop("include_children", True) - super(MemTimer, self).__init__(*args, **kw) + super().__init__(*args, **kw) def run(self): # get baseline memory usage @@ -58,7 +55,7 @@ def run(self): if self.filename is not None: stream = open(self.filename, "w") - stream.write("MEM {0:.6f} {1:.4f}\n".format(*cur_mem)) + stream.write("MEM {:.6f} {:.4f}\n".format(*cur_mem)) stream.flush() else: stream = None @@ -74,7 +71,7 @@ def run(self): ) if stream is not None: - stream.write("MEM {0:.6f} {1:.4f}\n".format(*cur_mem)) + stream.write("MEM {:.6f} {:.4f}\n".format(*cur_mem)) stream.flush() n_measurements += 1 @@ -95,7 +92,7 @@ def run(self): self.pipe.send(n_measurements) -class memory_logger(object): +class memory_logger: """ Context manager for taking and reporting memory measurements at fixed intervals from a separate process, for the duration of a context. @@ -185,7 +182,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): return False -class timer(object): +class timer: level = 0 opened = False @@ -211,14 +208,14 @@ def __enter__(self): def print_usec(self, usec): if usec < 1000: - print("%.1f usec" % usec) + print(f"{usec:.1f} usec") else: msec = usec / 1000 if msec < 1000: - print("%.1f msec" % msec) + print(f"{msec:.1f} msec") else: sec = msec / 1000 - print("%.1f sec" % sec) + print(f"{sec:.1f} sec") def __exit__(self, exc_type, exc_val, exc_tb): if not self.opened and self.verbose: @@ -239,7 +236,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): return False -class optional(object): +class optional: def __init__(self, variable, contextman): self.variable = variable self.contextman = contextman diff --git a/scripts/_helpers.py b/scripts/_helpers.py index 598726f9b..4d0bdf905 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -10,7 +9,6 @@ import os import re import time -import urllib from functools import partial, wraps from os.path import exists from pathlib import Path @@ -189,13 +187,13 @@ def set_scenario_config(snakemake): scenario = snakemake.config["run"].get("scenarios", {}) if scenario.get("enable") and "run" in snakemake.wildcards.keys(): try: - with open(scenario["file"], "r") as f: + with open(scenario["file"]) as f: scenario_config = yaml.safe_load(f) except FileNotFoundError: # fallback for mock_snakemake script_dir = Path(__file__).parent.resolve() root_dir = script_dir.parent - with open(root_dir / scenario["file"], "r") as f: + with open(root_dir / scenario["file"]) as f: scenario_config = yaml.safe_load(f) update_config(snakemake.config, scenario_config[snakemake.wildcards.run]) @@ -748,9 +746,9 @@ def update_config_from_wildcards(config, w, inplace=True): if dg_enable: config["sector"]["electricity_distribution_grid"] = True if dg_factor is not None: - config["sector"][ - "electricity_distribution_grid_cost_factor" - ] = dg_factor + config["sector"]["electricity_distribution_grid_cost_factor"] = ( + dg_factor + ) if "biomasstransport" in opts: config["sector"]["biomass_transport"] = True @@ -883,12 +881,12 @@ def rename_techs(label: str) -> str: Removes some prefixes and renames if certain conditions defined in function body are met. - Parameters: + Parameters ---------- label: str Technology label to be renamed - Returns: + Returns ------- str Renamed label diff --git a/scripts/add_brownfield.py b/scripts/add_brownfield.py index 831786372..d7f66439a 100644 --- a/scripts/add_brownfield.py +++ b/scripts/add_brownfield.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -19,7 +18,6 @@ update_config_from_wildcards, ) from add_existing_baseyear import add_build_year_to_new_assets -from pypsa.clustering.spatial import normed_or_uniform logger = logging.getLogger(__name__) idx = pd.IndexSlice diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 6fb93c129..5a23e4d74 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -1,7 +1,7 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT + """ Adds existing electrical generators, hydro-electric plants as well as greenfield and battery and hydrogen storage to the clustered network. @@ -111,8 +111,6 @@ """ import logging -from pathlib import Path -from typing import Dict, List import numpy as np import pandas as pd @@ -502,9 +500,7 @@ def attach_wind_and_solar( + connection_cost ) logger.info( - "Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}".format( - connection_cost.min(), connection_cost.max(), car - ) + f"Added connection cost of {connection_cost.min():0.0f}-{connection_cost.max():0.0f} Eur/MW/a to {car}" ) else: capital_cost = costs.at[car, "capital_cost"] @@ -746,7 +742,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par ) -def attach_OPSD_renewables(n: pypsa.Network, tech_map: Dict[str, List[str]]) -> None: +def attach_OPSD_renewables(n: pypsa.Network, tech_map: dict[str, list[str]]) -> None: """ Attach renewable capacities from the OPSD dataset to the network. diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index d54e659ca..edc997436 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -22,9 +21,7 @@ update_config_from_wildcards, ) from add_electricity import sanitize_carriers -from definitions.heat_sector import HeatSector from definitions.heat_system import HeatSystem -from definitions.heat_system_type import HeatSystemType from prepare_sector_network import cluster_heat_buses, define_spatial, prepare_costs logger = logging.getLogger(__name__) @@ -380,8 +377,8 @@ def get_efficiency(heat_system, carrier, nodes, heating_efficiencies, costs): Computes the heating system efficiency based on the sector and carrier type. - Parameters: - ----------- + Parameters + ---------- heat_system : object carrier : str The type of fuel or energy carrier (e.g., 'gas', 'oil'). @@ -392,14 +389,14 @@ def get_efficiency(heat_system, carrier, nodes, heating_efficiencies, costs): costs : pandas.DataFrame A DataFrame containing boiler cost and efficiency data for different heating systems. - Returns: - -------- + Returns + ------- efficiency : pandas.Series or float A pandas Series mapping the efficiencies based on nodes for residential and services sectors, or a single efficiency value for other heating systems (e.g., urban central). - Notes: - ------ + Notes + ----- - For residential and services sectors, efficiency is mapped based on the nodes. - For other sectors, the default boiler efficiency is retrieved from the `costs` database. """ @@ -627,9 +624,12 @@ def set_defaults(n): """ Set default values for missing values in the network. - Parameters: + Parameters + ---------- n (pypsa.Network): The network object. - Returns: + + Returns + ------- None """ if "Link" in n.components: diff --git a/scripts/add_transmission_projects_and_dlr.py b/scripts/add_transmission_projects_and_dlr.py index cedb98ce8..f6ccafd32 100644 --- a/scripts/add_transmission_projects_and_dlr.py +++ b/scripts/add_transmission_projects_and_dlr.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/base_network.py b/scripts/base_network.py index 3c8d0e542..20d5df5ca 100644 --- a/scripts/base_network.py +++ b/scripts/base_network.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -590,15 +589,13 @@ def prefer_voltage(x, which): ) assert ( not df.empty - ), "No buses with defined country within 200km of bus `{}`".format(b) + ), f"No buses with defined country within 200km of bus `{b}`" n.buses.at[b, "country"] = df.loc[df.pathlength.idxmin(), "country"] logger.warning( - "{} buses are not in any country or offshore shape," - " {} have been assigned from the tag of the entsoe map," - " the rest from the next bus in terms of pathlength.".format( - c_nan_b.sum(), c_nan_b.sum() - c_tag_nan_b.sum() - ) + f"{c_nan_b.sum()} buses are not in any country or offshore shape," + f" {c_nan_b.sum() - c_tag_nan_b.sum()} have been assigned from the tag of the entsoe map," + " the rest from the next bus in terms of pathlength." ) return buses @@ -628,9 +625,7 @@ def findforeignbus(G, i): comp, line = next(iter(G[b0][b1])) if comp != "Line": logger.warning( - "Unable to replace B2B `{}` expected a Line, but found a {}".format( - i, comp - ) + f"Unable to replace B2B `{i}` expected a Line, but found a {comp}" ) continue @@ -646,9 +641,7 @@ def findforeignbus(G, i): n.remove("Bus", b0) logger.info( - "Replacing B2B converter `{}` together with bus `{}` and line `{}` by an HVDC tie-line {}-{}".format( - i, b0, line, linkcntry.at[i], buscntry.at[b1] - ) + f"Replacing B2B converter `{i}` together with bus `{b0}` and line `{line}` by an HVDC tie-line {linkcntry.at[i]}-{buscntry.at[b1]}" ) @@ -727,11 +720,14 @@ def base_network( ): base_network = config["electricity"].get("base_network") osm_prebuilt_version = config["electricity"].get("osm-prebuilt-version") - assert base_network in { - "entsoegridkit", - "osm-raw", - "osm-prebuilt", - }, f"base_network must be either 'entsoegridkit', 'osm-raw' or 'osm-prebuilt', but got '{base_network}'" + assert ( + base_network + in { + "entsoegridkit", + "osm-raw", + "osm-prebuilt", + } + ), f"base_network must be either 'entsoegridkit', 'osm-raw' or 'osm-prebuilt', but got '{base_network}'" if base_network == "entsoegridkit": warnings.warn( "The 'entsoegridkit' base network is deprecated and will be removed in future versions. Please use 'osm-raw' or 'osm-prebuilt' instead.", @@ -952,12 +948,14 @@ def append_bus_shapes(n, shapes, type): Append shapes to the network. If shapes with the same component and type already exist, they will be removed. - Parameters: + Parameters + ---------- n (pypsa.Network): The network to which the shapes will be appended. shapes (geopandas.GeoDataFrame): The shapes to be appended. **kwargs: Additional keyword arguments used in `n.add`. - Returns: + Returns + ------- None """ remove = n.shapes.query("component == 'Bus' and type == @type").index diff --git a/scripts/build_ammonia_production.py b/scripts/build_ammonia_production.py index a5c53c69b..48b0a538f 100644 --- a/scripts/build_ammonia_production.py +++ b/scripts/build_ammonia_production.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_biomass_potentials.py b/scripts/build_biomass_potentials.py index 58242247c..8a90c4e70 100755 --- a/scripts/build_biomass_potentials.py +++ b/scripts/build_biomass_potentials.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_biomass_transport_costs.py b/scripts/build_biomass_transport_costs.py index bc0233c86..3248fd61d 100644 --- a/scripts/build_biomass_transport_costs.py +++ b/scripts/build_biomass_transport_costs.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -41,8 +40,8 @@ def get_cost_per_tkm(pdf, datapage, countrypage): import platform import tabula as tbl - except: - ImportError("Please install tabula-py and platform") + except ImportError as e: + raise ImportError("Please install tabula-py and platform") from e system = platform.system() encoding = "cp1252" if system == "Windows" else "utf-8" diff --git a/scripts/build_central_heating_temperature_profiles/central_heating_temperature_approximator.py b/scripts/build_central_heating_temperature_profiles/central_heating_temperature_approximator.py index 590c7cf7a..0d39fca35 100644 --- a/scripts/build_central_heating_temperature_profiles/central_heating_temperature_approximator.py +++ b/scripts/build_central_heating_temperature_profiles/central_heating_temperature_approximator.py @@ -1,9 +1,7 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT -import pandas as pd import xarray as xr @@ -160,30 +158,6 @@ def _approximate_return_temperature(self) -> float: """ return self.fixed_return_temperature - @property - def forward_temperature(self) -> xr.DataArray: - """ - Property to get dynamic forward temperature. - - Returns - ------- - xr.DataArray - Dynamic forward temperatures. - """ - return self._approximate_forward_temperature() - - @property - def return_temperature(self) -> float: - """ - Property to get return temperature. - - Returns - ------- - float - Return temperature. - """ - return self._approximate_return_temperature() - def _approximate_forward_temperature(self) -> xr.DataArray: """ Approximate dynamic forward temperature. diff --git a/scripts/build_central_heating_temperature_profiles/run.py b/scripts/build_central_heating_temperature_profiles/run.py index 339b776de..c5d6749da 100644 --- a/scripts/build_central_heating_temperature_profiles/run.py +++ b/scripts/build_central_heating_temperature_profiles/run.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -35,8 +34,6 @@ - AGFW (2022): "Hauptbericht 2022" (https://www.agfw.de/zahlen-und-statistiken/agfw-hauptbericht) """ -import sys - import geopandas as gpd import numpy as np import pandas as pd @@ -53,11 +50,13 @@ def extrapolate_missing_supply_temperatures_by_country( """ Extrapolates missing supply temperatures by country. - Parameters: + Parameters + ---------- extrapolate_from (dict): A dictionary containing supply temperatures to extrapolate from. Should contain all countries. extrapolate_to (dict): A dictionary containing supply temperatures to extrapolate to. Where `country` is present, average ratio between `extrapolate_to[country]` and `extrapolate_from[country]` is applied to all countries for which `country` is not present in `extrapolate_from.keys()` to infer ratio for extrapolation. - Returns: + Returns + ------- xr.DataArray: A DataArray containing the extrapolated supply temperatures. """ @@ -85,10 +84,12 @@ def get_country_from_node_name(node_name: str) -> str: """ Extracts the country code from a given node name. - Parameters: + Parameters + ---------- node_name (str): The name of the node. - Returns: + Returns + ------- str: The country code extracted from the node name. """ return node_name[:2] @@ -103,14 +104,14 @@ def map_temperature_dict_to_onshore_regions( Missing values are replaced by the mean of all values. - Parameters: + Parameters ---------- supply_temperature_by_country : dictionary Dictionary with temperatures as values and country keys as keys. regions_onshore : pd.Index Names of onshore regions - Returns: + Returns ------- xr.DataArray The dictionary values mapped to onshore regions with onshore regions as coordinates. diff --git a/scripts/build_clustered_population_layouts.py b/scripts/build_clustered_population_layouts.py index ccabb6566..bacffd2da 100644 --- a/scripts/build_clustered_population_layouts.py +++ b/scripts/build_clustered_population_layouts.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_cop_profiles/BaseCopApproximator.py b/scripts/build_cop_profiles/BaseCopApproximator.py index def76e4b7..e127ee31a 100644 --- a/scripts/build_cop_profiles/BaseCopApproximator.py +++ b/scripts/build_cop_profiles/BaseCopApproximator.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -15,14 +14,14 @@ class BaseCopApproximator(ABC): Abstract class for approximating the coefficient of performance (COP) of a heat pump. - Attributes: + Attributes ---------- forward_temperature_celsius : Union[xr.DataArray, np.array] The forward temperature in Celsius. source_inlet_temperature_celsius : Union[xr.DataArray, np.array] The source inlet temperature in Celsius. - Methods: + Methods ------- __init__(self, forward_temperature_celsius, source_inlet_temperature_celsius) Initialize CopApproximator. @@ -42,7 +41,7 @@ def __init__( """ Initialize CopApproximator. - Parameters: + Parameters ---------- forward_temperature_celsius : Union[xr.DataArray, np.array] The forward temperature in Celsius. @@ -56,7 +55,7 @@ def approximate_cop(self) -> Union[xr.DataArray, np.array]: """ Approximate heat pump coefficient of performance (COP). - Returns: + Returns ------- Union[xr.DataArray, np.array] The calculated COP values. @@ -70,12 +69,12 @@ def celsius_to_kelvin( """ Convert temperature from Celsius to Kelvin. - Parameters: + Parameters ---------- t_celsius : Union[float, xr.DataArray, np.array] Temperature in Celsius. - Returns: + Returns ------- Union[float, xr.DataArray, np.array] Temperature in Kelvin. @@ -94,14 +93,14 @@ def logarithmic_mean( """ Calculate the logarithmic mean temperature difference. - Parameters: + Parameters ---------- t_hot : Union[float, xr.DataArray, np.ndarray] Hot temperature. t_cold : Union[float, xr.DataArray, np.ndarray] Cold temperature. - Returns: + Returns ------- Union[float, xr.DataArray, np.ndarray] Logarithmic mean temperature difference. diff --git a/scripts/build_cop_profiles/CentralHeatingCopApproximator.py b/scripts/build_cop_profiles/CentralHeatingCopApproximator.py index 89eb0f572..713da3a2b 100644 --- a/scripts/build_cop_profiles/CentralHeatingCopApproximator.py +++ b/scripts/build_cop_profiles/CentralHeatingCopApproximator.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -21,7 +20,7 @@ class CentralHeatingCopApproximator(BaseCopApproximator): a thermodynamic heat pump model with some hard-to-know parameters being approximated. - Attributes: + Attributes ---------- forward_temperature_celsius : Union[xr.DataArray, np.array] The forward temperature in Celsius. @@ -38,7 +37,7 @@ class CentralHeatingCopApproximator(BaseCopApproximator): heat_loss : float, optional The heat loss, by default 0.0. - Methods: + Methods ------- __init__( forward_temperature_celsius: Union[xr.DataArray, np.array], @@ -105,7 +104,7 @@ def __init__( """ Initializes the CentralHeatingCopApproximator object. - Parameters: + Parameters ---------- forward_temperature_celsius : Union[xr.DataArray, np.array] The forward temperature in Celsius. @@ -144,12 +143,12 @@ def approximate_cop(self) -> Union[xr.DataArray, np.array]: """ Calculate the coefficient of performance (COP) for the system. - Notes: - ------ + Notes + ----- Returns 0 where the source inlet temperature is greater than the sink outlet temperature. - Returns: - -------- + Returns + ------- Union[xr.DataArray, np.array]: The calculated COP values. """ return xr.where( @@ -319,7 +318,7 @@ def _approximate_delta_t_refrigerant_sink( Approximates the temperature difference between the refrigerant and heat sink. - Parameters: + Parameters ---------- refrigerant : str, optional The refrigerant used in the system. Either 'isobutane' or 'ammonia. Default is 'ammonia'. @@ -330,13 +329,13 @@ def _approximate_delta_t_refrigerant_sink( c : float, optional Constant term, default is 0.016. - Returns: + Returns ------- Union[xr.DataArray, np.array] The approximate temperature difference between the refrigerant and heat sink. - Notes: - ------ + Notes + ----- This function assumes ammonia as the refrigerant. The approximate temperature difference at the refrigerant sink is calculated using the following formula: @@ -363,7 +362,7 @@ def _ratio_evaporation_compression_work_approximation( """ Calculate the ratio of evaporation to compression work approximation. - Parameters: + Parameters ---------- refrigerant : str, optional The refrigerant used in the system. Either 'isobutane' or 'ammonia. Default is 'ammonia'. @@ -374,13 +373,13 @@ def _ratio_evaporation_compression_work_approximation( c : float, optional Coefficient 'c' in the approximation equation. Default is 0.039. - Returns: + Returns ------- Union[xr.DataArray, np.array] The approximated ratio of evaporation to compression work. - Notes: - ------ + Notes + ----- This function assumes ammonia as the refrigerant. The approximation equation used is: diff --git a/scripts/build_cop_profiles/DecentralHeatingCopApproximator.py b/scripts/build_cop_profiles/DecentralHeatingCopApproximator.py index e7622a0cb..1f7543827 100644 --- a/scripts/build_cop_profiles/DecentralHeatingCopApproximator.py +++ b/scripts/build_cop_profiles/DecentralHeatingCopApproximator.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_cop_profiles/run.py b/scripts/build_cop_profiles/run.py index 5462f3bca..4398bf4fb 100644 --- a/scripts/build_cop_profiles/run.py +++ b/scripts/build_cop_profiles/run.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -37,10 +36,6 @@ - `resources//cop_profiles.nc`: Heat pump coefficient-of-performance (COP) profiles """ -import sys - -import geopandas as gpd -import numpy as np import pandas as pd import xarray as xr from _helpers import set_scenario_config diff --git a/scripts/build_cross_border_flows.py b/scripts/build_cross_border_flows.py index 7427c09db..81b267e88 100644 --- a/scripts/build_cross_border_flows.py +++ b/scripts/build_cross_border_flows.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_cutout.py b/scripts/build_cutout.py index 6155b7ba3..58b896fa6 100644 --- a/scripts/build_cutout.py +++ b/scripts/build_cutout.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_daily_heat_demand.py b/scripts/build_daily_heat_demand.py index 4f135a67b..83a9539f3 100644 --- a/scripts/build_daily_heat_demand.py +++ b/scripts/build_daily_heat_demand.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_direct_heat_source_utilisation_profiles.py b/scripts/build_direct_heat_source_utilisation_profiles.py index 342a1c439..cfe3bc5a5 100644 --- a/scripts/build_direct_heat_source_utilisation_profiles.py +++ b/scripts/build_direct_heat_source_utilisation_profiles.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -25,8 +24,6 @@ - `resources//direct_heat_source_utilisation_profiles_base_s_{clusters}_{planning_horizons}.nc`: Direct heat source utilisation profiles """ -from typing import List - import xarray as xr from _helpers import set_scenario_config @@ -36,17 +33,17 @@ def get_source_temperature(heat_source_key: str): Get the constant temperature of a heat source. Args: - ----- + ---- heat_source_key: str The key (name) of the heat source. Returns: - -------- + ------- float The constant temperature of the heat source in degrees Celsius. Raises: - ------- + ------ ValueError If the heat source is unknown (not in `config`). """ @@ -57,8 +54,8 @@ def get_source_temperature(heat_source_key: str): ] else: raise ValueError( - f"Unknown heat source {heat_source_key}. Must be one of { - snakemake.params.heat_sources.keys()}." + f"Unknown heat source {heat_source_key}. Must be one of " + f"{snakemake.params.heat_sources.keys()}." ) @@ -69,14 +66,14 @@ def get_profile( Get the direct heat source utilisation profile. Args: - ----- + ---- source_temperature: float | xr.DataArray The constant temperature of the heat source in degrees Celsius. If `xarray`, indexed by `time` and `region`. If a float, it is broadcasted to the shape of `forward_temperature`. forward_temperature: xr.DataArray The central heating forward temperature profiles. If `xarray`, indexed by `time` and `region`. If a float, it is broadcasted to the shape of `return_temperature`. Returns: - -------- + ------- xr.DataArray | float The direct heat source utilisation profile. @@ -95,7 +92,7 @@ def get_profile( set_scenario_config(snakemake) - direct_utilisation_heat_sources: List[str] = ( + direct_utilisation_heat_sources: list[str] = ( snakemake.params.direct_utilisation_heat_sources ) diff --git a/scripts/build_district_heat_share.py b/scripts/build_district_heat_share.py index 9e2a5616b..95c1666cf 100644 --- a/scripts/build_district_heat_share.py +++ b/scripts/build_district_heat_share.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -22,8 +21,8 @@ energy: energy_totals_year: -Notes: ------- +Notes +----- - The district heat share is calculated as the share of urban population at each node, multiplied by the share of district heating in the respective country. - The `sector.district_heating.potential` setting defines the max. district heating share. - The max. share of district heating is increased by a progress factor, depending on the investment year (See `sector.district_heating.progress` setting). diff --git a/scripts/build_egs_potentials.py b/scripts/build_egs_potentials.py index db72387c6..ca1374677 100644 --- a/scripts/build_egs_potentials.py +++ b/scripts/build_egs_potentials.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -16,11 +15,8 @@ This scripts overlays that map with the network's regions, and builds a csv with CAPEX, OPEX and p_nom_max """ -import logging - -logger = logging.getLogger(__name__) - import json +import logging import geopandas as gpd import numpy as np @@ -28,6 +24,8 @@ import xarray as xr from shapely.geometry import Polygon +logger = logging.getLogger(__name__) + def prepare_egs_data(egs_file): """ @@ -110,7 +108,7 @@ def prepare_capex(prepared_data): year_data = prepared_data[year].groupby("geometry").mean().reset_index() for g in year_data.geometry: - if not g in year_data.geometry.tolist(): + if g not in year_data.geometry.tolist(): # weird but apparently necessary continue diff --git a/scripts/build_electricity_demand.py b/scripts/build_electricity_demand.py index 746bf200e..7949d9026 100755 --- a/scripts/build_electricity_demand.py +++ b/scripts/build_electricity_demand.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -163,15 +162,15 @@ def manual_adjustment(load, fn_load, countries): electricity consumption data from Croatia (HR) for the year 2019, scaled by the factors derived from https://energy.at-site.be/eurostat-2021/ - Parameters - ---------- + Parameters + ---------- load : pd.DataFrame Load time-series with UTC timestamps x ISO-2 countries load_fn: str File name or url location (file format .csv) - Returns - ------- + Returns + ------- load : pd.DataFrame Manual adjusted and interpolated load time-series with UTC timestamps x ISO-2 countries diff --git a/scripts/build_electricity_demand_base.py b/scripts/build_electricity_demand_base.py index 392a8660a..d999d6f3b 100644 --- a/scripts/build_electricity_demand_base.py +++ b/scripts/build_electricity_demand_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_electricity_prices.py b/scripts/build_electricity_prices.py index a16d75527..2b7b9a8b2 100644 --- a/scripts/build_electricity_prices.py +++ b/scripts/build_electricity_prices.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_electricity_production.py b/scripts/build_electricity_production.py index 155bf8d54..6ad4db955 100644 --- a/scripts/build_electricity_production.py +++ b/scripts/build_electricity_production.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index def4555fd..a022e0a00 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -41,7 +40,6 @@ import logging import multiprocessing as mp from functools import partial -from typing import List import country_converter as coco import geopandas as gpd @@ -67,7 +65,7 @@ def cartesian(s1: pd.Series, s2: pd.Series) -> pd.DataFrame: The second pandas Series. Returns - ---------- + ------- pd.DataFrame A DataFrame representing the Cartesian product of s1 and s2. @@ -178,31 +176,31 @@ def eurostat_per_country(input_eurostat: str, country: str) -> pd.DataFrame: def build_eurostat( input_eurostat: str, - countries: List[str], + countries: list[str], nprocesses: int = 1, disable_progressbar: bool = False, ) -> pd.DataFrame: """ Return multi-index for all countries' energy data in TWh/a. - Parameters: - ----------- + Parameters + ---------- input_eurostat : str Path to the Eurostat database. - countries : List[str] + countries : list[str] List of countries for which energy data is to be retrieved. nprocesses : int, optional Number of processes to use for parallel execution, by default 1. disable_progressbar : bool, optional Whether to disable the progress bar, by default False. - Returns: - -------- + Returns + ------- pd.DataFrame Multi-index DataFrame containing energy data for all countries in TWh/a. - Notes: - ------ + Notes + ----- - The function first renames the countries in the input list using the `idees_rename` mapping and removes "CH". - It then reads country-wise data using :func:`eurostat_per_country` into a single DataFrame. - The data is reordered, converted to TWh/a, and missing values are filled. @@ -278,7 +276,7 @@ def build_swiss() -> pd.DataFrame: Return a pd.DataFrame of Swiss energy data in TWh/a. Returns - -------- + ------- pd.DataFrame Swiss energy data in TWh/a. @@ -563,14 +561,14 @@ def idees_per_country(ct: str, base_dir: str) -> pd.DataFrame: return pd.DataFrame(ct_totals) -def build_idees(countries: List[str]) -> pd.DataFrame: +def build_idees(countries: list[str]) -> pd.DataFrame: """ Build energy totals from IDEES database for the given list of countries using :func:`idees_per_country`. Parameters ---------- - countries : List[str] + countries : list[str] List of country names for which energy totals need to be built. Returns @@ -652,7 +650,7 @@ def fill_missing_years(fill_values: pd.Series) -> pd.Series: def build_energy_totals( - countries: List[str], + countries: list[str], eurostat: pd.DataFrame, swiss: pd.DataFrame, idees: pd.DataFrame, @@ -663,7 +661,7 @@ def build_energy_totals( Parameters ---------- - countries : List[str] + countries : list[str] List of country codes for which energy totals are to be calculated. eurostat : pd.DataFrame Eurostat energy balances dataframe. @@ -923,13 +921,13 @@ def build_energy_totals( return df -def build_district_heat_share(countries: List[str], idees: pd.DataFrame) -> pd.Series: +def build_district_heat_share(countries: list[str], idees: pd.DataFrame) -> pd.Series: """ Calculate the share of district heating for each country. Parameters ---------- - countries : List[str] + countries : list[str] List of country codes for which to calculate district heating share. idees : pd.DataFrame IDEES energy data dataframe. @@ -1014,7 +1012,7 @@ def build_eea_co2( - It drops unneeded columns and converts the emissions to Mt. References - --------- + ---------- - `EEA CO2 data `_ (downloaded 201228, modified by EEA last on 201221) """ @@ -1115,14 +1113,14 @@ def build_eurostat_co2(eurostat: pd.DataFrame, year: int = 1990) -> pd.Series: def build_co2_totals( - countries: List[str], eea_co2: pd.DataFrame, eurostat_co2: pd.DataFrame + countries: list[str], eea_co2: pd.DataFrame, eurostat_co2: pd.DataFrame ) -> pd.DataFrame: """ Combine CO2 emissions data from EEA and Eurostat for a list of countries. Parameters ---------- - countries : List[str] + countries : list[str] List of country codes for which CO2 totals need to be built. eea_co2 : pd.DataFrame DataFrame with EEA CO2 emissions data. @@ -1168,14 +1166,14 @@ def build_co2_totals( def build_transport_data( - countries: List[str], population: pd.DataFrame, idees: pd.DataFrame + countries: list[str], population: pd.DataFrame, idees: pd.DataFrame ) -> pd.DataFrame: """ Build transport data for a set of countries based on IDEES data. Parameters ---------- - countries : List[str] + countries : list[str] List of country codes. population : pd.DataFrame DataFrame with population data. @@ -1259,7 +1257,7 @@ def build_transport_data( def rescale_idees_from_eurostat( - idees_countries: List[str], energy: pd.DataFrame, eurostat: pd.DataFrame + idees_countries: list[str], energy: pd.DataFrame, eurostat: pd.DataFrame ) -> pd.DataFrame: """ Takes JRC IDEES data from 2021 and rescales it by the ratio of the Eurostat @@ -1268,7 +1266,7 @@ def rescale_idees_from_eurostat( Parameters ---------- - idees_countries : List[str] + idees_countries : list[str] List of IDEES country codes. energy : pd.DataFrame DataFrame with JRC IDEES data. @@ -1496,7 +1494,7 @@ def update_residential_from_eurostat(energy: pd.DataFrame) -> pd.DataFrame: # convert TJ to TWh col_to_rename = {"geo": "country", "TIME_PERIOD": "year", "OBS_VALUE": nrg_name} idx_to_rename = {v: k for k, v in idees_rename.items()} - drop_geo = ["EU27_2020", "EA20"] + drop_geo = ["EU27_2020", "EA20"] # noqa: F841 nrg_data = eurostat_households.query( "nrg_bal == @code and siec == @siec and geo not in @drop_geo and OBS_VALUE > 0" ).copy() @@ -1525,7 +1523,8 @@ def build_transformation_output_coke(eurostat, fn): it needs to be processed and added separately. The filtered data is saved as a CSV file. - Parameters: + Parameters + ---------- eurostat (pd.DataFrame): A pandas DataFrame containing Eurostat data with a multi-level index fn (str): The file path where the resulting CSV file should be saved. @@ -1540,14 +1539,14 @@ def build_transformation_output_coke(eurostat, fn): def build_heating_efficiencies( - countries: List[str], idees: pd.DataFrame + countries: list[str], idees: pd.DataFrame ) -> pd.DataFrame: """ Build heating efficiencies for a set of countries based on IDEES data. Parameters ---------- - countries : List[str] + countries : list[str] List of country codes. idees : pd.DataFrame DataFrame with IDEES data. @@ -1563,8 +1562,6 @@ def build_heating_efficiencies( - It fills missing data with average data. """ - years = np.arange(2000, 2022) - cols = idees.columns[ idees.columns.str.contains("space efficiency") ^ idees.columns.str.contains("water efficiency") diff --git a/scripts/build_existing_heating_distribution.py b/scripts/build_existing_heating_distribution.py index ef8aef124..804d96acb 100644 --- a/scripts/build_existing_heating_distribution.py +++ b/scripts/build_existing_heating_distribution.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -28,14 +27,14 @@ sector: existing_capacities: -Notes: ------- +Notes +----- - Data for Albania, Montenegro and Macedonia is not included in input database and assumed 0. - Coal and oil boilers are assimilated to oil boilers. - All ground-source heat pumps are assumed in rural areas and all air-source heat pumps are assumed to be in urban areas. -References: ------------ +References +---------- - "Mapping and analyses of the current and future (2020 - 2030) heating/cooling fuel deployment (fossil/renewables)" (https://energy.ec.europa.eu/publications/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment-fossilrenewables-1_en) """ @@ -139,9 +138,9 @@ def build_existing_heating(): ) nodal_heat_name_tech[(f"{sector} urban decentral", "ground heat pump")] = 0.0 - nodal_heat_name_tech[ - (f"{sector} urban decentral", "air heat pump") - ] += nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] + nodal_heat_name_tech[(f"{sector} urban decentral", "air heat pump")] += ( + nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] + ) nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] = 0.0 nodal_heat_name_tech[("urban central", "ground heat pump")] = 0.0 diff --git a/scripts/build_gas_input_locations.py b/scripts/build_gas_input_locations.py index 8c960f427..f0084133f 100644 --- a/scripts/build_gas_input_locations.py +++ b/scripts/build_gas_input_locations.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_gas_network.py b/scripts/build_gas_network.py index dd8fc7343..24a2ba36e 100644 --- a/scripts/build_gas_network.py +++ b/scripts/build_gas_network.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_gdp_pop_non_nuts3.py b/scripts/build_gdp_pop_non_nuts3.py index 20d92f059..79d141cc7 100644 --- a/scripts/build_gdp_pop_non_nuts3.py +++ b/scripts/build_gdp_pop_non_nuts3.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -29,7 +28,8 @@ def calc_gdp_pop(country, regions, gdp_non_nuts3, pop_non_nuts3): """ Calculate the GDP p.c. and population values for non NUTS3 regions. - Parameters: + Parameters + ---------- country (str): The two-letter country code of the non-NUTS3 region. regions (GeoDataFrame): A GeoDataFrame containing the regions. gdp_non_nuts3 (str): The file path to the dataset containing the GDP p.c values @@ -37,7 +37,8 @@ def calc_gdp_pop(country, regions, gdp_non_nuts3, pop_non_nuts3): pop_non_nuts3 (str): The file path to the dataset containing the POP values for non NUTS3 countries (e.g. MD, UA) - Returns: + Returns + ------- tuple: A tuple containing two GeoDataFrames: - gdp: A GeoDataFrame with the mean GDP p.c. values mapped to each bus. - pop: A GeoDataFrame with the summed POP values mapped to each bus. diff --git a/scripts/build_hac_features.py b/scripts/build_hac_features.py index cd2d4d29f..eb8ed47d8 100644 --- a/scripts/build_hac_features.py +++ b/scripts/build_hac_features.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_heat_source_potentials/onshore_region_data.py b/scripts/build_heat_source_potentials/onshore_region_data.py index 0f58e8fac..3357722df 100755 --- a/scripts/build_heat_source_potentials/onshore_region_data.py +++ b/scripts/build_heat_source_potentials/onshore_region_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -6,8 +5,6 @@ Helper class for matching heat source potentials to onshore regions. """ -from typing import List - import geopandas as gpd diff --git a/scripts/build_heat_source_potentials/run.py b/scripts/build_heat_source_potentials/run.py index 9446dd5ea..f5d107609 100644 --- a/scripts/build_heat_source_potentials/run.py +++ b/scripts/build_heat_source_potentials/run.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_heat_totals.py b/scripts/build_heat_totals.py index 673023ebe..4707b1e87 100644 --- a/scripts/build_heat_totals.py +++ b/scripts/build_heat_totals.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_hourly_heat_demand.py b/scripts/build_hourly_heat_demand.py index 5f1a2f394..a82a04c6c 100644 --- a/scripts/build_hourly_heat_demand.py +++ b/scripts/build_hourly_heat_demand.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_hydro_profile.py b/scripts/build_hydro_profile.py index 6a1ff13de..2813431e8 100644 --- a/scripts/build_hydro_profile.py +++ b/scripts/build_hydro_profile.py @@ -1,6 +1,3 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_industrial_distribution_key.py b/scripts/build_industrial_distribution_key.py index 52de2b148..9a4b038cf 100644 --- a/scripts/build_industrial_distribution_key.py +++ b/scripts/build_industrial_distribution_key.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur > # # SPDX-License-Identifier: MIT diff --git a/scripts/build_industrial_energy_demand_per_country_today.py b/scripts/build_industrial_energy_demand_per_country_today.py index 64cd3f20c..7b5f800dc 100644 --- a/scripts/build_industrial_energy_demand_per_country_today.py +++ b/scripts/build_industrial_energy_demand_per_country_today.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -253,7 +252,8 @@ def add_coke_ovens(demand, fn, year, factor=0.75): consumption should be attributed to the iron and steel production. The default value of 75% is based on https://doi.org/10.1016/j.erss.2022.102565 - Parameters: + Parameters + ---------- demand (pd.DataFrame): A pandas DataFrame containing energy demand data with a multi-level column index where one of the levels corresponds to "Integrated steelworks". @@ -263,7 +263,8 @@ def add_coke_ovens(demand, fn, year, factor=0.75): factor (float, optional): The proportion of coke ovens energy consumption to add to the integrated steelworks demand. Defaults to 0.75. - Returns: + Returns + ------- pd.DataFrame: The updated `demand` DataFrame with the coke ovens energy consumption added to the integrated steelworks energy demand. """ diff --git a/scripts/build_industrial_energy_demand_per_node.py b/scripts/build_industrial_energy_demand_per_node.py index fbd85e062..bdb0a531c 100644 --- a/scripts/build_industrial_energy_demand_per_node.py +++ b/scripts/build_industrial_energy_demand_per_node.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_industrial_energy_demand_per_node_today.py b/scripts/build_industrial_energy_demand_per_node_today.py index 2770cdb84..c8fda2e6d 100644 --- a/scripts/build_industrial_energy_demand_per_node_today.py +++ b/scripts/build_industrial_energy_demand_per_node_today.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur > # # SPDX-License-Identifier: MIT diff --git a/scripts/build_industrial_production_per_country.py b/scripts/build_industrial_production_per_country.py index 251a0c7ca..698f55ca6 100644 --- a/scripts/build_industrial_production_per_country.py +++ b/scripts/build_industrial_production_per_country.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_industrial_production_per_country_tomorrow.py b/scripts/build_industrial_production_per_country_tomorrow.py index d916009dc..8d99da57b 100644 --- a/scripts/build_industrial_production_per_country_tomorrow.py +++ b/scripts/build_industrial_production_per_country_tomorrow.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_industrial_production_per_node.py b/scripts/build_industrial_production_per_node.py index a7431beb0..3a5a402ee 100644 --- a/scripts/build_industrial_production_per_node.py +++ b/scripts/build_industrial_production_per_node.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_industry_sector_ratios.py b/scripts/build_industry_sector_ratios.py index 30ee392e8..9bd5ec465 100644 --- a/scripts/build_industry_sector_ratios.py +++ b/scripts/build_industry_sector_ratios.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -1393,9 +1392,8 @@ def textiles_and_leather(): df.loc["heat", sector] += s_fec["Low-enthalpy heat"] # Efficiency changes due to electrification - key = "Textiles: Electric drying" # in new JRC data zero assume old data - # eff_elec = s_ued[key] / s_fec[key] + eff_elec = 73.7 / 146.6 df.loc["elec", sector] += s_ued["Textiles: Drying"] / eff_elec diff --git a/scripts/build_industry_sector_ratios_intermediate.py b/scripts/build_industry_sector_ratios_intermediate.py index e4a7b4d5e..ad867dc3c 100644 --- a/scripts/build_industry_sector_ratios_intermediate.py +++ b/scripts/build_industry_sector_ratios_intermediate.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_line_rating.py b/scripts/build_line_rating.py index 146b4bb3a..d57103063 100755 --- a/scripts/build_line_rating.py +++ b/scripts/build_line_rating.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_monthly_prices.py b/scripts/build_monthly_prices.py index 1523f0bd1..12309e877 100644 --- a/scripts/build_monthly_prices.py +++ b/scripts/build_monthly_prices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_osm_network.py b/scripts/build_osm_network.py index 3944be474..1bdf3f551 100644 --- a/scripts/build_osm_network.py +++ b/scripts/build_osm_network.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -82,10 +81,12 @@ def _merge_identical_lines(lines): """ Aggregates lines with identical geometries and voltage levels by merging them into a single line. - Parameters: + Parameters + ---------- - lines (pd.DataFrame): DataFrame containing line information with columns "geometry", "voltage", "line_id", and "circuits". - Returns: + Returns + ------- - pd.DataFrame: DataFrame with aggregated lines, where lines with identical geometries and voltage levels are merged. """ lines_all = lines.copy() @@ -135,17 +136,17 @@ def _add_line_endings(buses, lines, add=0, name="line-end"): This function creates virtual bus endpoints at the boundaries of the given lines' geometries. It ensures that each unique combination of geometry and voltage is represented by a single bus endpoint. - Parameters: + Parameters + ---------- - buses (pd.DataFrame): DataFrame containing bus information. - lines (pd.DataFrame): DataFrame containing line information, including 'voltage' and 'geometry' columns. - add (int, optional): Offset to add to the bus index for generating unique bus IDs. Default is 0. - name (str, optional): Name to assign to the 'contains' column for the virtual buses. Default is "line-end". - Returns: + Returns + ------- - pd.DataFrame: DataFrame containing the virtual bus endpoints with columns 'bus_id', 'voltage', 'geometry', and 'contains'. """ - buses_all = buses.copy() - endpoints0 = lines[["voltage", "geometry"]].copy() endpoints0["geometry"] = endpoints0["geometry"].apply(lambda x: x.boundary.geoms[0]) @@ -168,11 +169,13 @@ def _split_linestring_by_point(linestring, points): """ Splits a LineString geometry by multiple points. - Parameters: + Parameters + ---------- - linestring (LineString): The LineString geometry to be split. - points (list of Point): A list of Point geometries where the LineString should be split. - Returns: + Returns + ------- - list of LineString: A list of LineString geometries resulting from the split. """ list_linestrings = [linestring] @@ -192,13 +195,15 @@ def split_overpassing_lines(lines, buses, distance_crs=DISTANCE_CRS, tol=1): Split overpassing lines by splitting them at nodes within a given tolerance, to include the buses being overpassed. - Parameters: + Parameters + ---------- - lines (GeoDataFrame): The lines to be split. - buses (GeoDataFrame): The buses representing nodes. - distance_crs (str): The coordinate reference system (CRS) for distance calculations. - tol (float): The tolerance distance in meters for determining if a bus is within a line. - Returns: + Returns + ------- - lines (GeoDataFrame): The split lines. - buses (GeoDataFrame): The buses representing nodes. """ @@ -286,13 +291,15 @@ def _create_merge_mapping(lines, buses, buses_polygon, geo_crs=GEO_CRS): - Identifies connected components in the graph and merges lines within each component. - Note that only lines that unambigruosly can be merged are considered. - Parameters: + Parameters + ---------- - lines (GeoDataFrame): GeoDataFrame containing line data with columns ["line_id", "geometry", "voltage", "circuits"]. - buses (GeoDataFrame): GeoDataFrame containing bus data with columns ["bus_id", "geometry"]. - buses_polygon (GeoDataFrame): GeoDataFrame containing the polygon data to filter virtual buses. - geo_crs (CRS, optional): Coordinate reference system for the geometries. Defaults to GEO_CRS. - Returns: + Returns + ------- - GeoDataFrame: A GeoDataFrame containing the merged lines with columns ["line_id", "circuits", "voltage", "geometry", "underground", "contains_lines", "contains_buses"]. """ logger.info( @@ -468,13 +475,15 @@ def _merge_lines_over_virtual_buses( """ Merges lines over virtual buses and updates the lines and buses DataFrames accordingly. - Parameters: + Parameters + ---------- - lines (pd.DataFrame): DataFrame containing line information. - buses (pd.DataFrame): DataFrame containing bus information. - merged_lines_map (pd.DataFrame): DataFrame mapping virtual buses to the lines they contain. - distance_crs (str, optional): Coordinate reference system for calculating distances. Defaults to DISTANCE_CRS. - Returns: + Returns + ------- - tuple: A tuple containing the updated lines and buses DataFrames. """ lines_merged = lines.copy() @@ -524,7 +533,8 @@ def _create_station_seeds( """ Creates aggregated station seeds (candidates) based on substation polygons and updates their country information. - Parameters: + Parameters + ---------- - buses (GeoDataFrame): GeoDataFrame containing bus information with columns "bus_id" and "geometry". - buses_polygon (GeoDataFrame): GeoDataFrame containing bus polygon information with columns "bus_id" and "geometry". - country_shapes (GeoDataFrame): GeoDataFrame containing country shapes with a "name" column. @@ -532,7 +542,8 @@ def _create_station_seeds( - distance_crs (CRS, optional): Coordinate reference system for distance calculations. Default is DISTANCE_CRS. - geo_crs (CRS, optional): Coordinate reference system for geographic calculations. Default is GEO_CRS. - Returns: + Returns + ------- GeoDataFrame: Aggregated station seeds with updated country information and renamed virtual buses. """ # Drop all buses that have bus_id starting with "way/" or "relation/" prefix @@ -668,17 +679,19 @@ def _merge_buses_to_stations( """ Merges buses with the same voltage level within the same station. - Parameters: + Parameters + ---------- - buses (GeoDataFrame): GeoDataFrame containing bus data with geometries. - stations (GeoDataFrame): GeoDataFrame containing station data with geometries. - distance_crs (CRS, optional): Coordinate reference system for distance calculations. Defaults to DISTANCE_CRS. - geo_crs (CRS, optional): Coordinate reference system for geographic coordinates. Defaults to GEO_CRS. - Returns: + Returns + ------- - GeoDataFrame: Updated GeoDataFrame with merged buses and updated geometries. """ # Merge buses with same voltage and within tolerance - logger.info(f"Merging buses of the same substation.") + logger.info("Merging buses of the same substation.") # bus types (AC != DC) buses_all = buses.copy().reset_index(drop=True) stations_all = stations.copy().set_index("station_id") @@ -741,10 +754,12 @@ def _remove_loops_from_multiline(multiline): This function iteratively removes closed loops from a MultiLineString geometry until no closed loops remain or a maximum of 5 iterations is reached. - Parameters: + Parameters + ---------- - multiline (shapely.geometry.MultiLineString or shapely.geometry.LineString): The input geometry which may contain closed loops. - Returns: + Returns + ------- - shapely.geometry.MultiLineString or shapely.geometry.LineString: The geometry with closed loops removed. """ elements_initial = ( @@ -783,14 +798,16 @@ def _identify_linestring_between_polygons( This function takes a MultiLineString and two polygons, and identifies a LineString within the MultiLineString that touches both polygons. If no such LineString is found, the original MultiLineString is returned. - Parameters: + Parameters + ---------- - multiline (shapely.geometry.MultiLineString or shapely.geometry.LineString): The input MultiLineString or LineString. - polygon0 (shapely.geometry.Polygon): The first polygon. - polygon1 (shapely.geometry.Polygon): The second polygon. - geo_crs (str or pyproj.CRS, optional): The geographic coordinate reference system. Default is GEO_CRS. - distance_crs (str or pyproj.CRS, optional): The distance coordinate reference system. Default is DISTANCE_CRS. - Returns: + Returns + ------- - shapely.geometry.LineString or shapely.geometry.MultiLineString: The identified LineString that touches both polygons, or the original MultiLineString if no such LineString is found. """ list_lines = ( @@ -833,7 +850,8 @@ def _map_endpoints_to_buses( """ Maps the endpoints of lines to buses based on spatial relationships. - Parameters: + Parameters + ---------- - connection (GeoDataFrame): GeoDataFrame containing the line connections. - buses (GeoDataFrame): GeoDataFrame containing the bus information. - shape (str, optional): The shape type to use for mapping. Default is "station_polygon". @@ -842,7 +860,8 @@ def _map_endpoints_to_buses( - distance_crs (CRS, optional): Coordinate reference system for distance calculations. Default is DISTANCE_CRS. - geo_crs (CRS, optional): Coordinate reference system for geographic data. Default is GEO_CRS. - Returns: + Returns + ------- - GeoDataFrame: Updated GeoDataFrame with mapped endpoints and filtered lines. """ logger.info("Mapping endpoints of lines to buses.") @@ -973,11 +992,13 @@ def _add_point_to_line(linestring, point): Adds the bus coordinate to a linestring by extending the linestring with a new segment. - Parameters: + Parameters + ---------- - linestring (LineString): The original linestring to extend. - point (Point): The shapely.Point of the bus. - Returns: + Returns + ------- - merged (LineString): The extended linestring with the new segment. """ start = linestring.boundary.geoms[0] @@ -1004,11 +1025,13 @@ def _extend_lines_to_buses(connection, buses): at both ends (bus0 and bus1). The resulting DataFrame will have updated geometries that include these bus points. - Parameters: + Parameters + ---------- - connection (pd.DataFrame): DataFrame containing the lines/links with their geometries. - buses (pd.DataFrame): DataFrame containing the bus points with their geometries. - Returns: + Returns + ------- - pd.DataFrame: DataFrame with updated geometries for the lines/links, including the bus points. """ lines_all = connection.copy() @@ -1052,13 +1075,15 @@ def _determine_bus_capacity(buses, lines, voltages, line_types): """ Determines the bus capacity based on the sum of connected line capacities. - Parameters: + Parameters + ---------- - buses (pd.DataFrame): DataFrame containing bus information. - lines (pd.DataFrame): DataFrame containing line information. - voltages (list): List of voltage levels based on config file. - line_types (dict): Dictionary mapping voltage levels to line types based on config file. - Returns: + Returns + ------- - buses_all (pd.DataFrame): Containing the updated bus information with calculated capacities. """ logger.info("Determining total capacity of connected lines for each bus.") @@ -1102,11 +1127,13 @@ def _add_transformers(buses, geo_crs=GEO_CRS): - Assigns unique transformer IDs based on station ID and voltage levels. - Calculates the capacity of transformers based on the maximum capacity of connected buses. - Parameters: + Parameters + ---------- - buses (GeoDataFrame): A GeoDataFrame containing bus information with columns including 'bus_id', 'station_id', 'voltage', and 'geometry'. - geo_crs (CRS, optional): Coordinate reference system for the GeoDataFrame. Defaults to GEO_CRS. - Returns: + Returns + ------- - GeoDataFrame: A GeoDataFrame containing the added transformers with columns including 'transformer_id' and TRANSFORMERS_COLUMNS. """ buses_all = buses.copy().set_index("bus_id") @@ -1183,7 +1210,8 @@ def _add_dc_buses( """ Adds DC buses to the network and mapping them to the nearest AC buses. - Parameters: + Parameters + ---------- - converters_polygon (GeoDataFrame): GeoDataFrame containing the polygons of the DC converters. - links (GeoDataFrame): GeoDataFrame containing the links in the network. - buses (GeoDataFrame): GeoDataFrame containing the AC buses in the network. @@ -1191,7 +1219,8 @@ def _add_dc_buses( - distance_crs (CRS, optional): Coordinate reference system for distance calculations. Defaults to DISTANCE_CRS. - geo_crs (CRS, optional): Coordinate reference system for geographic calculations. Defaults to GEO_CRS. - Returns: + Returns + ------- - GeoDataFrame: A GeoDataFrame containing the DC buses with their corresponding PoI and mapped to the nearest AC bus. """ dc_buses = converters_polygon.copy() @@ -1261,12 +1290,14 @@ def _map_links_to_dc_buses(links, dc_buses, distance_crs=DISTANCE_CRS): """ Maps links to DC buses based on geographical proximity and updates DC bus attributes. - Parameters: + Parameters + ---------- - links (GeoDataFrame): GeoDataFrame containing link geometries and attributes. - dc_buses (GeoDataFrame): GeoDataFrame containing DC bus geometries and attributes. - distance_crs (CRS, optional): Coordinate reference system to use for distance calculations. Defaults to DISTANCE_CRS. - Returns: + Returns + ------- - tuple: A tuple containing: - links_all (GeoDataFrame): Updated GeoDataFrame of links with mapped DC buses. - dc_buses_all (GeoDataFrame): Updated GeoDataFrame of DC buses with additional attributes. @@ -1338,11 +1369,13 @@ def _add_converter_links(dc_buses, buses): links (converters) between them. It filters out DC buses that do not have an associated AC bus, renames columns for clarity, and constructs geometries for the links. - Parameters: + Parameters + ---------- - dc_buses (pd.DataFrame): DataFrame containing DC bus information. - buses (pd.DataFrame): DataFrame containing AC bus information. - Returns: + Returns + ------- - pd.DataFrame: DataFrame containing the converter links. """ logger.info("Adding converter links between DC buses and AC buses.") @@ -1380,11 +1413,13 @@ def _closest_voltage(voltage, voltage_list): """ Returns the closest voltage from a list of voltages to a given voltage. - Parameters: + Parameters + ---------- - voltage (float): The source voltage. - voltage_list (list): List of voltages to compare against. - Returns: + Returns + ------- - float: The closest voltage to the source voltage """ return min(voltage_list, key=lambda x: abs(x - voltage)) @@ -1394,14 +1429,16 @@ def _finalise_network(all_buses, converters, lines, links, transformers): """ Finalises network components and prepares for export. - Parameters: + Parameters + ---------- - buses (pd.DataFrame): DataFrame containing bus information. - converters (pd.DataFrame): DataFrame containing converter information. - lines (pd.DataFrame): DataFrame containing line information. - links (pd.DataFrame): DataFrame containing link information. - transformers (pd.DataFrame): DataFrame containing transformer information. - Returns: + Returns + ------- - tuple: A tuple containing the updated DataFrames for buses, converters, lines, links, and transformers """ logger.info("Finalising network components and preparing for export.") diff --git a/scripts/build_population_layouts.py b/scripts/build_population_layouts.py index 723bf290e..3ffe06d1d 100644 --- a/scripts/build_population_layouts.py +++ b/scripts/build_population_layouts.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_population_weighted_energy_totals.py b/scripts/build_population_weighted_energy_totals.py index 84792e038..1ccccaa28 100644 --- a/scripts/build_population_weighted_energy_totals.py +++ b/scripts/build_population_weighted_energy_totals.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_powerplants.py b/scripts/build_powerplants.py index 6a985220c..a0d4ead8a 100755 --- a/scripts/build_powerplants.py +++ b/scripts/build_powerplants.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_renewable_profiles.py b/scripts/build_renewable_profiles.py index 69678bc45..873110b76 100644 --- a/scripts/build_renewable_profiles.py +++ b/scripts/build_renewable_profiles.py @@ -1,7 +1,4 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# SPDX-FileCopyrightText: Contributors to PyPSA-Eur > +# SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT """ diff --git a/scripts/build_retro_cost.py b/scripts/build_retro_cost.py index 866ccab73..23da7150b 100755 --- a/scripts/build_retro_cost.py +++ b/scripts/build_retro_cost.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -134,7 +132,8 @@ def get_average_temperature_during_heating_season(temperature, t_threshold=15): input: temperature : pd.Series(Index=time, values=temperature) t_threshold : threshold temperature for heating degree days (HDD) - returns: + + Returns: average temperature """ t_average_daily = temperature.resample("1D").mean() @@ -367,11 +366,11 @@ def prepare_building_topology(u_values, same_building_topology=True): # get total area of building components for element in building_elements: - elements = ["A_{}_1".format(element), "A_{}_2".format(element)] + elements = [f"A_{element}_1", f"A_{element}_2"] data_tabula = pd.concat( [ data_tabula.drop(elements, axis=1), - data_tabula[elements].sum(axis=1).rename("A_{}".format(element)), + data_tabula[elements].sum(axis=1).rename(f"A_{element}"), ], axis=1, ) @@ -469,7 +468,7 @@ def map_periods(build_year1, build_year2): # total buildings envelope surface [m^2] data_tabula["A_envelope"] = data_tabula[ - ["A_{}".format(element) for element in building_elements] + [f"A_{element}" for element in building_elements] ].sum(axis=1) return data_tabula diff --git a/scripts/build_salt_cavern_potentials.py b/scripts/build_salt_cavern_potentials.py index 74f0ad828..f7a63d15c 100644 --- a/scripts/build_salt_cavern_potentials.py +++ b/scripts/build_salt_cavern_potentials.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_sequestration_potentials.py b/scripts/build_sequestration_potentials.py index aefff9f2b..bc83e1a6a 100644 --- a/scripts/build_sequestration_potentials.py +++ b/scripts/build_sequestration_potentials.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_shapes.py b/scripts/build_shapes.py index 0b284c89a..099e77931 100755 --- a/scripts/build_shapes.py +++ b/scripts/build_shapes.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur > # # SPDX-License-Identifier: MIT @@ -125,8 +124,8 @@ def countries(naturalearth, country_list): def eez(eez, country_list): df = gpd.read_file(eez) - iso3_list = cc.convert(country_list, src="ISO2", to="ISO3") - pol_type = ["200NM", "Overlapping claim"] + iso3_list = cc.convert(country_list, src="ISO2", to="ISO3") # noqa: F841 + pol_type = ["200NM", "Overlapping claim"] # noqa: F841 df = df.query("ISO_TER1 in @iso3_list and POL_TYPE in @pol_type").copy() df["name"] = cc.convert(df.ISO_TER1, src="ISO3", to="ISO2") s = df.set_index("name").geometry.map( diff --git a/scripts/build_ship_raster.py b/scripts/build_ship_raster.py index 37ae0cd63..9c62fff60 100644 --- a/scripts/build_ship_raster.py +++ b/scripts/build_ship_raster.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_shipping_demand.py b/scripts/build_shipping_demand.py index 6b79339ee..da5808d89 100644 --- a/scripts/build_shipping_demand.py +++ b/scripts/build_shipping_demand.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -28,7 +27,7 @@ demand = demand.xs(snakemake.params.energy_totals_year, level=1) # read port data into GeoDataFrame - with open(snakemake.input.ports, "r", encoding="latin_1") as f: + with open(snakemake.input.ports, encoding="latin_1") as f: ports = json.load(f) ports = pd.json_normalize(ports, "features", sep="_") coordinates = ports.geometry_coordinates diff --git a/scripts/build_solar_thermal_profiles.py b/scripts/build_solar_thermal_profiles.py index b2ef43b4a..2b910e2cd 100644 --- a/scripts/build_solar_thermal_profiles.py +++ b/scripts/build_solar_thermal_profiles.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_temperature_profiles.py b/scripts/build_temperature_profiles.py index 5eb53df75..d65d9ea5c 100644 --- a/scripts/build_temperature_profiles.py +++ b/scripts/build_temperature_profiles.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/build_transmission_projects.py b/scripts/build_transmission_projects.py index 05d6da76c..dad793835 100644 --- a/scripts/build_transmission_projects.py +++ b/scripts/build_transmission_projects.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -46,7 +45,6 @@ """ import logging -import os from pathlib import Path import geopandas as gpd @@ -163,12 +161,14 @@ def get_branch_coords_from_geometry(linestring, reversed=False): Reduces a linestring to its start and end points. Used to simplify the linestring which can have more than two points. - Parameters: + Parameters + ---------- linestring: Shapely linestring reversed (bool, optional): If True, returns the end and start points instead of the start and end points. Defaults to False. - Returns: + Returns + ------- numpy.ndarray: Flattened array of start and end coordinates. """ coords = np.asarray(linestring.coords) @@ -181,12 +181,14 @@ def get_branch_coords_from_buses(line): """ Gets line string for branch component in an pypsa network. - Parameters: + Parameters + ---------- linestring: shapely linestring reversed (bool, optional): If True, returns the end and start points instead of the start and end points. Defaults to False. - Returns: + Returns + ------- numpy.ndarray: Flattened array of start and end coordinates. """ start_coords = n.buses.loc[line.bus0, ["x", "y"]].values @@ -198,11 +200,13 @@ def get_bus_coords_from_port(linestring, port=0): """ Extracts the coordinates of a specified port from a given linestring. - Parameters: + Parameters + ---------- linestring: The shapely linestring. port (int): The index of the port to extract coordinates from. Default is 0. - Returns: + Returns + ------- tuple: The coordinates of the specified port as a tuple (x, y). """ coords = np.asarray(linestring.coords) @@ -216,12 +220,14 @@ def find_closest_lines(lines, new_lines, distance_upper_bound=0.1, type="new"): """ Find the closest lines in the existing set of lines to a set of new lines. - Parameters: + Parameters + ---------- lines (pandas.DataFrame): DataFrame of the existing lines. new_lines (pandas.DataFrame): DataFrame with column geometry containing the new lines. distance_upper_bound (float, optional): Maximum distance to consider a line as a match. Defaults to 0.1 which corresponds to approximately 15 km. - Returns: + Returns + ------- pandas.Series: Series containing with index the new lines and values providing closest existing line. """ diff --git a/scripts/build_transport_demand.py b/scripts/build_transport_demand.py index d34e61b48..1b3adcb7a 100644 --- a/scripts/build_transport_demand.py +++ b/scripts/build_transport_demand.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/clean_osm_data.py b/scripts/clean_osm_data.py index 3900607f8..cd38053e7 100644 --- a/scripts/clean_osm_data.py +++ b/scripts/clean_osm_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur > # # SPDX-License-Identifier: MIT @@ -55,11 +54,13 @@ def _create_polygon(row): """ Create a Shapely Polygon from a list of coordinate dictionaries. - Parameters: + Parameters + ---------- coords (list): List of dictionaries with 'lat' and 'lon' keys representing coordinates. - Returns: + Returns + ------- shapely.geometry.Polygon: The constructed polygon object. """ # Extract coordinates as tuples @@ -227,11 +228,13 @@ def _check_voltage(voltage, list_voltages): """ Check if the given voltage is present in the list of allowed voltages. - Parameters: + Parameters + ---------- voltage (str): The voltage to check. list_voltages (list): A list of allowed voltages. - Returns: + Returns + ------- bool: True if the voltage is present in the list of allowed voltages, False otherwise. """ @@ -359,11 +362,13 @@ def _distribute_to_circuits(row): Distributes the number of circuits or cables to individual circuits based on the given row data. - Parameters: + Parameters + ---------- - row: A dictionary representing a row of data containing information about circuits and cables. - Returns: + Returns + ------- - single_circuit: The number of circuits to be assigned to each individual circuit. """ @@ -383,11 +388,13 @@ def _import_lines_and_cables(path_lines): """ Import lines and cables from the given input paths. - Parameters: + Parameters + ---------- - path_lines (dict): A dictionary containing the input paths for lines and cables data. - Returns: + Returns + ------- - df_lines (DataFrame): A DataFrame containing the imported lines and cables data. """ @@ -418,7 +425,7 @@ def _import_lines_and_cables(path_lines): logger.info( f" - Importing {key} {str(idx+1).zfill(2)}/{str(len(path_lines[key])).zfill(2)}: {ip}" ) - with open(ip, "r") as f: + with open(ip) as f: data = json.load(f) df = pd.DataFrame(data["elements"]) @@ -463,7 +470,6 @@ def _import_lines_and_cables(path_lines): def _import_routes_relation(path_relation): - """ """ columns = [ "id", "bounds", @@ -489,7 +495,7 @@ def _import_routes_relation(path_relation): logger.info( f" - Importing {key} {str(idx+1).zfill(2)}/{str(len(path_relation[key])).zfill(2)}: {ip}" ) - with open(ip, "r") as f: + with open(ip) as f: data = json.load(f) df = pd.DataFrame(data["elements"]) @@ -533,10 +539,12 @@ def _create_single_link(row): """ Create a single link from multiple rows within a OSM link relation. - Parameters: + Parameters + ---------- - row: A row of OSM data containing information about the link. - Returns: + Returns + ------- - single_link: A single LineString representing the link. This function takes a row of OSM data and extracts the relevant information @@ -592,10 +600,12 @@ def _create_line(row): """ Create a line from multiple rows. Drops closed geometries (substations). - Parameters: + Parameters + ---------- - row: A row of OSM data containing information about the relation/line. - Returns: + Returns + ------- - line: LineString/MultiLineString representing the relation/line. """ df = pd.json_normalize(row["members"]) @@ -618,10 +628,12 @@ def _drop_duplicate_lines(df_lines): Drop duplicate lines from the given dataframe. Duplicates are usually lines cross-border lines or slightly outside the country border of focus. - Parameters: + Parameters + ---------- - df_lines (pandas.DataFrame): The dataframe containing lines data. - Returns: + Returns + ------- - df_lines (pandas.DataFrame): The dataframe with duplicate lines removed and cleaned data. @@ -662,12 +674,14 @@ def _filter_by_voltage(df, min_voltage=220000): """ Filter rows in the DataFrame based on the voltage in V. - Parameters: + Parameters + ---------- - df (pandas.DataFrame): The DataFrame containing the substations or lines data. - min_voltage (int, optional): The minimum voltage value to filter the rows. Defaults to 220000 [unit: V]. - Returns: + Returns + ------- - filtered df (pandas.DataFrame): The filtered DataFrame containing the lines or substations above min_voltage. - list_voltages (list): A list of unique voltage values above min_voltage. @@ -708,13 +722,15 @@ def _clean_substations(df_substations, list_voltages): - Set remaining invalid frequency values that are not in ['0', '50'] to '50'. - Parameters: + Parameters + ---------- - df_substations (pandas.DataFrame): The input dataframe containing substation data. - list_voltages (list): A list of voltages above min_voltage to filter the substation data. - Returns: + Returns + ------- - df_substations (pandas.DataFrame): The cleaned substation dataframe. """ df_substations = df_substations.copy() @@ -736,10 +752,10 @@ def _clean_substations(df_substations, list_voltages): == df_substations["split_elements"] ) - op_freq = lambda row: row["frequency"].split(";")[row["split_count"] - 1] - df_substations.loc[bool_frequency_len & bool_split, "frequency"] = ( - df_substations.loc[bool_frequency_len & bool_split,].apply(op_freq, axis=1) + df_substations.loc[ + bool_frequency_len & bool_split, + ].apply(lambda row: row["frequency"].split(";")[row["split_count"] - 1], axis=1) ) df_substations = _split_cells(df_substations, cols=["frequency"]) @@ -944,11 +960,13 @@ def _create_substations_geometry(df_substations): """ Creates geometries. - Parameters: + Parameters + ---------- df_substations (DataFrame): The input DataFrame containing the substations data. - Returns: + Returns + ------- df_substations (DataFrame): A new DataFrame with the polygons ["polygon"] of the substations geometries. """ @@ -965,11 +983,13 @@ def _create_substations_poi(df_substations, tol=BUS_TOL / 2): """ Creates Pole of Inaccessibility (PoI) from geometries and keeps the original polygons. - Parameters: + Parameters + ---------- df_substations (DataFrame): The input DataFrame containing the substations data. - Returns: + Returns + ------- df_substations (DataFrame): A new DataFrame with the PoI ["geometry"] and polygons ["polygon"] of the substations geometries. """ @@ -990,14 +1010,17 @@ def _create_lines_geometry(df_lines): """ Create line geometry for the given DataFrame of lines. - Parameters: + Parameters + ---------- - df_lines (pandas.DataFrame): DataFrame containing lines data. - Returns: + Returns + ------- - df_lines (pandas.DataFrame): DataFrame with transformed 'geometry' column (type: shapely LineString). - Notes: + Notes + ----- - This function transforms 'geometry' column in the input DataFrame by applying the '_create_linestring' function to each row. - It then drops rows where the geometry has equal start and end points, @@ -1018,11 +1041,13 @@ def _add_bus_poi_to_line(linestring, point): Adds the PoI of a substation to a linestring by extending the linestring with a new segment. - Parameters: + Parameters + ---------- linestring (LineString): The original linestring to extend. point (Point): The PoI of the bus. - Returns: + Returns + ------- merged (LineString): The extended linestring with the new segment. """ start = linestring.coords[0] @@ -1237,7 +1262,7 @@ def _import_substations(path_substations): logger.info( f" - Importing {key} {str(idx+1).zfill(2)}/{str(len(path_substations[key])).zfill(2)}: {ip}" ) - with open(ip, "r") as f: + with open(ip) as f: data = json.load(f) df = pd.DataFrame(data["elements"]) @@ -1319,9 +1344,9 @@ def _import_substations(path_substations): .reset_index() ) df_substations_relation_members_grouped["geometry"] = ( - df_substations_relation_members_grouped["linestring"].apply( - lambda x: x.convex_hull - ) + df_substations_relation_members_grouped[ + "linestring" + ].apply(lambda x: x.convex_hull) ) df_substations_relation = ( @@ -1347,13 +1372,15 @@ def _remove_lines_within_substations(gdf_lines, gdf_substations_polygon): GeoDataFrame of lines. These are not needed to create network (e.g. bus bars, switchgear, etc.) - Parameters: + Parameters + ---------- - gdf_lines (GeoDataFrame): A GeoDataFrame containing lines with 'line_id' and 'geometry' columns. - gdf_substations_polygon (GeoDataFrame): A GeoDataFrame containing substation polygons. - Returns: + Returns + ------- GeoDataFrame: A new GeoDataFrame without lines within substation polygons. """ logger.info("Identifying and removing lines within substation polygons...") @@ -1376,11 +1403,13 @@ def _merge_touching_polygons(df): """ Merge touching polygons in a GeoDataFrame. - Parameters: + Parameters + ---------- - df: pandas.DataFrame or geopandas.GeoDataFrame The input DataFrame containing the polygons to be merged. - Returns: + Returns + ------- - gdf: geopandas.GeoDataFrame The GeoDataFrame with merged polygons. """ @@ -1410,11 +1439,13 @@ def _add_endpoints_to_line(linestring, polygon_dict, tol=BUS_TOL / 2): """ Adds endpoints to a line by removing any overlapping areas with polygons. - Parameters: + Parameters + ---------- linestring (LineString): The original line to add endpoints to. polygon_dict (dict): A dictionary of polygons, where the keys are bus IDs and the values are the corresponding polygons. - Returns: + Returns + ------- LineString: The modified line with added endpoints. """ if not polygon_dict: @@ -1427,7 +1458,7 @@ def _add_endpoints_to_line(linestring, polygon_dict, tol=BUS_TOL / 2): # difference with polygon linestring_new = linestring.difference(polygon_unary) - if type(linestring_new) == MultiLineString: + if linestring_new is MultiLineString: # keep the longest line in the multilinestring linestring_new = max(linestring_new.geoms, key=lambda x: x.length) @@ -1441,11 +1472,13 @@ def _get_polygons_at_endpoints(linestring, polygon_dict): """ Get the polygons that contain the endpoints of a given linestring. - Parameters: + Parameters + ---------- linestring (LineString): The linestring for which to find the polygons at the endpoints. polygon_dict (dict): A dictionary containing polygons as values, with bus_ids as keys. - Returns: + Returns + ------- dict: A dictionary containing bus_ids as keys and polygons as values, where the polygons contain the endpoints of the linestring. """ # Get the endpoints of the linestring @@ -1468,11 +1501,13 @@ def _extend_lines_to_substations(gdf_lines, gdf_substations_polygon, tol=BUS_TOL the nearest substations represented by the polygons in the `gdf_substations_polygon` GeoDataFrame. - Parameters: + Parameters + ---------- gdf_lines (GeoDataFrame): A GeoDataFrame containing the lines to be extended. gdf_substations_polygon (GeoDataFrame): A GeoDataFrame containing the polygons representing substations. - Returns: + Returns + ------- GeoDataFrame: A new GeoDataFrame with the lines extended to the substations. """ logger.info( diff --git a/scripts/cluster_gas_network.py b/scripts/cluster_gas_network.py index 4e2d7ee49..7aac3e1e3 100755 --- a/scripts/cluster_gas_network.py +++ b/scripts/cluster_gas_network.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/cluster_network.py b/scripts/cluster_network.py index adfa64121..ba3cdb315 100644 --- a/scripts/cluster_network.py +++ b/scripts/cluster_network.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -105,7 +104,6 @@ import pypsa import xarray as xr from _helpers import configure_logging, set_scenario_config -from base_network import append_bus_shapes from packaging.version import Version, parse from pypsa.clustering.spatial import ( busmap_by_greedy_modularity, @@ -320,12 +318,14 @@ def cluster_regions( Cluster regions based on busmaps and save the results to a file and to the network. - Parameters: + Parameters + ---------- - busmaps (list): A list of busmaps used for clustering. - regions (gpd.GeoDataFrame): The regions to cluster. - with_country (bool): Whether to keep country column. - Returns: + Returns + ------- None """ busmap = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0]) diff --git a/scripts/definitions/heat_sector.py b/scripts/definitions/heat_sector.py index 2d16d7e39..15cf41ae5 100644 --- a/scripts/definitions/heat_sector.py +++ b/scripts/definitions/heat_sector.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/definitions/heat_system.py b/scripts/definitions/heat_system.py index b8db9d5b9..398efe5de 100644 --- a/scripts/definitions/heat_system.py +++ b/scripts/definitions/heat_system.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/definitions/heat_system_type.py b/scripts/definitions/heat_system_type.py index 305a97b00..d1ce05d1f 100644 --- a/scripts/definitions/heat_system_type.py +++ b/scripts/definitions/heat_system_type.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/determine_availability_matrix.py b/scripts/determine_availability_matrix.py index 46a4107e7..b100f9617 100644 --- a/scripts/determine_availability_matrix.py +++ b/scripts/determine_availability_matrix.py @@ -1,6 +1,3 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/determine_availability_matrix_MD_UA.py b/scripts/determine_availability_matrix_MD_UA.py index caf81a303..ebbf54029 100644 --- a/scripts/determine_availability_matrix_MD_UA.py +++ b/scripts/determine_availability_matrix_MD_UA.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/make_summary.py b/scripts/make_summary.py index c842131e1..c2c2e6cfa 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/make_summary_perfect.py b/scripts/make_summary_perfect.py index 917580759..fbd60cba0 100644 --- a/scripts/make_summary_perfect.py +++ b/scripts/make_summary_perfect.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -12,10 +11,13 @@ import pandas as pd import pypsa from _helpers import set_scenario_config -from make_summary import calculate_cfs # noqa: F401 -from make_summary import calculate_nodal_cfs # noqa: F401 -from make_summary import calculate_nodal_costs # noqa: F401 -from make_summary import assign_carriers, assign_locations +from make_summary import ( + assign_carriers, + assign_locations, + calculate_cfs, # noqa: F401 + calculate_nodal_cfs, # noqa: F401 + calculate_nodal_costs, # noqa: F401 +) from prepare_sector_network import prepare_costs from pypsa.descriptors import get_active_assets from six import iteritems diff --git a/scripts/plot_gas_network.py b/scripts/plot_gas_network.py index 3766d54c8..7f5f053cb 100644 --- a/scripts/plot_gas_network.py +++ b/scripts/plot_gas_network.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/plot_hydrogen_network.py b/scripts/plot_hydrogen_network.py index 3108fe585..011fad6dd 100644 --- a/scripts/plot_hydrogen_network.py +++ b/scripts/plot_hydrogen_network.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/plot_power_network.py b/scripts/plot_power_network.py index 4ce6b72fc..6ff2c186b 100644 --- a/scripts/plot_power_network.py +++ b/scripts/plot_power_network.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/plot_power_network_clustered.py b/scripts/plot_power_network_clustered.py index 0b313595f..8fe9166ce 100644 --- a/scripts/plot_power_network_clustered.py +++ b/scripts/plot_power_network_clustered.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/plot_power_network_perfect.py b/scripts/plot_power_network_perfect.py index 4b1c81bcb..16ec0bf7c 100644 --- a/scripts/plot_power_network_perfect.py +++ b/scripts/plot_power_network_perfect.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur > # # SPDX-License-Identifier: MIT diff --git a/scripts/plot_statistics.py b/scripts/plot_statistics.py index 9adf2ca08..97d7489ec 100644 --- a/scripts/plot_statistics.py +++ b/scripts/plot_statistics.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/plot_summary.py b/scripts/plot_summary.py index 061c8877f..03cfab3a7 100644 --- a/scripts/plot_summary.py +++ b/scripts/plot_summary.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/plot_validation_cross_border_flows.py b/scripts/plot_validation_cross_border_flows.py index 02cebea48..8ab4e99b2 100644 --- a/scripts/plot_validation_cross_border_flows.py +++ b/scripts/plot_validation_cross_border_flows.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur > # # SPDX-License-Identifier: MIT diff --git a/scripts/plot_validation_electricity_prices.py b/scripts/plot_validation_electricity_prices.py index 69107745d..bbac0be47 100644 --- a/scripts/plot_validation_electricity_prices.py +++ b/scripts/plot_validation_electricity_prices.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/plot_validation_electricity_production.py b/scripts/plot_validation_electricity_production.py index a4390e23d..6a399b46c 100644 --- a/scripts/plot_validation_electricity_production.py +++ b/scripts/plot_validation_electricity_production.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -129,7 +127,7 @@ diff = optimized - historical diff.clip(lower=0).plot.area( - ax=axes[2], **kwargs, title="$\Delta$ (Optimized - Historic)" + ax=axes[2], **kwargs, title=r"$\Delta$ (Optimized - Historic)" ) lim = axes[2].get_ylim()[1] diff.clip(upper=0).plot.area(ax=axes[2], **kwargs) diff --git a/scripts/prepare_network.py b/scripts/prepare_network.py index 96a41b8ff..91cbb4e61 100755 --- a/scripts/prepare_network.py +++ b/scripts/prepare_network.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/prepare_osm_network_release.py b/scripts/prepare_osm_network_release.py index 069246c75..5d0f457c4 100644 --- a/scripts/prepare_osm_network_release.py +++ b/scripts/prepare_osm_network_release.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -113,11 +112,13 @@ def create_geometries(network, crs=GEO_CRS): """ Create GeoDataFrames for different network components with specified coordinate reference system (CRS). - Parameters: + Parameters + ---------- network (PyPSA Network): The network object containing buses, lines, links, converters, and transformers data. crs (str, optional): Coordinate reference system to be used for the GeoDataFrames. Defaults to GEO_CRS. - Returns: + Returns + ------- tuple: A tuple containing the following GeoDataFrames: - buses (GeoDataFrame): GeoDataFrame containing bus data with geometries. - lines (GeoDataFrame): GeoDataFrame containing line data with geometries. diff --git a/scripts/prepare_perfect_foresight.py b/scripts/prepare_perfect_foresight.py index a382c03ad..c351a8f2f 100644 --- a/scripts/prepare_perfect_foresight.py +++ b/scripts/prepare_perfect_foresight.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -7,7 +6,6 @@ """ import logging -from typing import List import numpy as np import pandas as pd @@ -33,6 +31,7 @@ def get_missing(df, n, c): df: pandas DataFrame, static values of pypsa components n : pypsa Network to which new assets should be added c : string, pypsa component.list_name (e.g. "generators") + Return: pd.DataFrame with static values of missing assets """ @@ -487,7 +486,7 @@ def apply_time_segmentation_perfect( return n -def update_heat_pump_efficiency(n: pypsa.Network, years: List[int]): +def update_heat_pump_efficiency(n: pypsa.Network, years: list[int]): """ Update the efficiency of heat pumps from previous years to current year (e.g. 2030 heat pumps receive 2040 heat pump COPs in 2030). @@ -498,7 +497,7 @@ def update_heat_pump_efficiency(n: pypsa.Network, years: List[int]): ---------- n : pypsa.Network The concatenated network. - years : List[int] + years : list[int] List of planning horizon years. Returns @@ -515,9 +514,9 @@ def update_heat_pump_efficiency(n: pypsa.Network, years: List[int]): (year, slice(None)), heat_pump_idx.str[:-4] + str(year) ] # in , set the efficiency of all heat pumps to the correct efficiency - n.links_t["efficiency"].loc[ - (year, slice(None)), heat_pump_idx - ] = correct_efficiency.values + n.links_t["efficiency"].loc[(year, slice(None)), heat_pump_idx] = ( + correct_efficiency.values + ) if __name__ == "__main__": diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 1ec46ec43..3eac7ee0e 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -33,7 +32,6 @@ from build_transport_demand import transport_degree_factor from definitions.heat_sector import HeatSector from definitions.heat_system import HeatSystem -from definitions.heat_system_type import HeatSystemType from networkx.algorithms import complement from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation from prepare_network import maybe_adjust_costs_and_potentials @@ -456,9 +454,7 @@ def update_wind_solar_costs( ) logger.info( - "Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}".format( - connection_cost.min(), connection_cost.max(), tech - ) + f"Added connection cost of {connection_cost.min():0.0f}-{connection_cost.max():0.0f} Eur/MW/a to {tech}" ) n.generators.loc[n.generators.carrier == tech, "capital_cost"] = ( @@ -2039,12 +2035,14 @@ def add_heat( """ Add heat sector to the network. - Parameters: + Parameters + ---------- n (pypsa.Network): The PyPSA network object. costs (pd.DataFrame): DataFrame containing cost information. cop (xr.DataArray): DataArray containing coefficient of performance (COP) values. - Returns: + Returns + ------- None """ logger.info("Add heat sector") @@ -2075,9 +2073,7 @@ def add_heat( # 1e3 converts from W/m^2 to MW/(1000m^2) = kW/m^2 solar_thermal = options["solar_cf_correction"] * solar_thermal / 1e3 - for ( - heat_system - ) in ( + for heat_system in ( HeatSystem ): # this loops through all heat systems defined in _entities.HeatSystem overdim_factor = options["overdimension_heat_generators"][ @@ -2182,7 +2178,6 @@ def add_heat( carrier=heat_carrier, ) - costs_name_heat_source = heat_system.heat_source_costs_name(heat_source) if heat_source in snakemake.params.direct_utilisation_heat_sources: capital_cost = ( costs.at[ @@ -4233,7 +4228,8 @@ def cluster_heat_buses(n): """ def define_clustering(attributes, aggregate_dict): - """Define how attributes should be clustered. + """ + Define how attributes should be clustered. Input: attributes : pd.Index() aggregate_dict: dictionary (key: name of attribute, value @@ -4344,7 +4340,7 @@ def set_temporal_aggregation(n, resolution, snapshot_weightings): def lossy_bidirectional_links(n, carrier, efficiencies={}): - "Split bidirectional links into two unidirectional links to include transmission losses." + """Split bidirectional links into two unidirectional links to include transmission losses.""" carrier_i = n.links.query("carrier == @carrier").index diff --git a/scripts/retrieve_cost_data.py b/scripts/retrieve_cost_data.py index 6b03095ca..ad4338c1c 100644 --- a/scripts/retrieve_cost_data.py +++ b/scripts/retrieve_cost_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/retrieve_databundle.py b/scripts/retrieve_databundle.py index b6be09689..5d96165d1 100644 --- a/scripts/retrieve_databundle.py +++ b/scripts/retrieve_databundle.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- -# Copyright 2019-2024 Fabian Hofmann (TUB, FIAS), Fabian Neumann (TUB) # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/retrieve_electricity_demand.py b/scripts/retrieve_electricity_demand.py index 34672ffd4..3b8d2fa84 100644 --- a/scripts/retrieve_electricity_demand.py +++ b/scripts/retrieve_electricity_demand.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/retrieve_eurostat_data.py b/scripts/retrieve_eurostat_data.py index 20325d721..41a7ed512 100644 --- a/scripts/retrieve_eurostat_data.py +++ b/scripts/retrieve_eurostat_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/retrieve_eurostat_household_data.py b/scripts/retrieve_eurostat_household_data.py index 8057de3eb..beef7e625 100644 --- a/scripts/retrieve_eurostat_household_data.py +++ b/scripts/retrieve_eurostat_household_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/retrieve_gas_infrastructure_data.py b/scripts/retrieve_gas_infrastructure_data.py index b5347aa09..60759f10f 100644 --- a/scripts/retrieve_gas_infrastructure_data.py +++ b/scripts/retrieve_gas_infrastructure_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/retrieve_heat_source_utilisation_potentials.py b/scripts/retrieve_heat_source_utilisation_potentials.py index cb9f53758..c5a5af59b 100644 --- a/scripts/retrieve_heat_source_utilisation_potentials.py +++ b/scripts/retrieve_heat_source_utilisation_potentials.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/retrieve_jrc_idees.py b/scripts/retrieve_jrc_idees.py index 1f08d8bdf..79951b740 100644 --- a/scripts/retrieve_jrc_idees.py +++ b/scripts/retrieve_jrc_idees.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -7,9 +6,7 @@ """ import logging -import os import zipfile -from pathlib import Path from _helpers import configure_logging, progress_retrieve, set_scenario_config diff --git a/scripts/retrieve_monthly_fuel_prices.py b/scripts/retrieve_monthly_fuel_prices.py index d862969fc..29fe18b1f 100644 --- a/scripts/retrieve_monthly_fuel_prices.py +++ b/scripts/retrieve_monthly_fuel_prices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/retrieve_osm_data.py b/scripts/retrieve_osm_data.py index a258249a4..5e32da60d 100644 --- a/scripts/retrieve_osm_data.py +++ b/scripts/retrieve_osm_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -98,7 +97,6 @@ def retrieve_osm_data( # Send the request response = requests.post(overpass_url, data=op_query) response.raise_for_status() # Raise HTTPError for bad responses - data = response.json() filepath = output[f] parentfolder = os.path.dirname(filepath) diff --git a/scripts/simplify_network.py b/scripts/simplify_network.py index 29bcc671d..8a2726862 100644 --- a/scripts/simplify_network.py +++ b/scripts/simplify_network.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -68,7 +67,6 @@ import logging from functools import reduce -from typing import Tuple import geopandas as gpd import numpy as np @@ -76,7 +74,6 @@ import pypsa import scipy as sp from _helpers import configure_logging, set_scenario_config -from base_network import append_bus_shapes from cluster_network import cluster_regions from pypsa.clustering.spatial import busmap_by_stubs, get_clustering_from_busmap from scipy.sparse.csgraph import connected_components, dijkstra @@ -86,7 +83,7 @@ def simplify_network_to_380( n: pypsa.Network, linetype_380: str -) -> Tuple[pypsa.Network, pd.Series]: +) -> tuple[pypsa.Network, pd.Series]: """ Fix all lines to a voltage level of 380 kV and remove all transformers. @@ -137,7 +134,7 @@ def _remove_clustered_buses_and_branches(n: pypsa.Network, busmap: pd.Series) -> def simplify_links( n: pypsa.Network, p_max_pu: int | float -) -> Tuple[pypsa.Network, pd.Series]: +) -> tuple[pypsa.Network, pd.Series]: ## Complex multi-node links are folded into end-points logger.info("Simplifying connected link components") @@ -217,8 +214,8 @@ def split_links(nodes, added_supernodes): if len(buses) <= 2: continue - logger.debug("nodes = {}".format(labels.index[labels == lbl])) - logger.debug("b = {}\nbuses = {}\nlinks = {}".format(b, buses, links)) + logger.debug(f"nodes = {labels.index[labels == lbl]}") + logger.debug(f"b = {b}\nbuses = {buses}\nlinks = {links}") m = sp.spatial.distance_matrix( n.buses.loc[b, ["x", "y"]], n.buses.loc[buses[1:-1], ["x", "y"]] @@ -228,7 +225,7 @@ def split_links(nodes, added_supernodes): all_links = [i for _, i in sum(links, [])] lengths = n.links.loc[all_links, "length"] - name = lengths.idxmax() + "+{}".format(len(links) - 1) + name = lengths.idxmax() + f"+{len(links) - 1}" params = dict( carrier="DC", bus0=b[0], @@ -275,7 +272,7 @@ def split_links(nodes, added_supernodes): def remove_stubs( n: pypsa.Network, simplify_network: dict -) -> Tuple[pypsa.Network, pd.Series]: +) -> tuple[pypsa.Network, pd.Series]: logger.info("Removing stubs") across_borders = simplify_network["remove_stubs_across_borders"] @@ -291,7 +288,7 @@ def aggregate_to_substations( n: pypsa.Network, buses_i: pd.Index | list, aggregation_strategies: dict | None = None, -) -> Tuple[pypsa.Network, pd.Series]: +) -> tuple[pypsa.Network, pd.Series]: # can be used to aggregate a selection of buses to electrically closest neighbors logger.info("Aggregating buses to substations") if aggregation_strategies is None: @@ -341,13 +338,16 @@ def aggregate_to_substations( def find_closest_bus(n, x, y, tol=2000): """ Find the index of the closest bus to the given coordinates within a specified tolerance. - Parameters: + + Parameters + ---------- n (pypsa.Network): The network object. x (float): The x-coordinate (longitude) of the target location. y (float): The y-coordinate (latitude) of the target location. tol (float): The distance tolerance in meters. Default is 2000 meters. - Returns: + Returns + ------- int: The index of the closest bus to the target location within the tolerance. Returns None if no bus is within the tolerance. """ @@ -379,10 +379,12 @@ def remove_converters(n: pypsa.Network) -> pypsa.Network: Remove all converters from the network and remap all buses that were originally connected to the converter to the connected AC bus. Preparation step before simplifying links. - Parameters: + Parameters + ---------- n (pypsa.Network): The network object. - Returns: + Returns + ------- n (pypsa.Network): The network object with all converters removed. """ # Extract converters diff --git a/scripts/solve_network.py b/scripts/solve_network.py index e69801e4e..bce09c64e 100644 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT @@ -155,6 +154,7 @@ def add_land_use_constraint(n): def add_solar_potential_constraints(n, config): """ Add constraint to make sure the sum capacity of all solar technologies (fixed, tracking, ets. ) is below the region potential. + Example: ES1 0: total solar potential is 10 GW, meaning: solar potential : 10 GW @@ -562,7 +562,7 @@ def add_EQ_constraints(n, o, scaling=1e-1): each node to produce on average at least 70% of its consumption. """ # TODO: Generalize to cover myopic and other sectors? - float_regex = "[0-9]*\.?[0-9]+" + float_regex = r"[0-9]*\.?[0-9]+" level = float(re.findall(float_regex, o)[0]) if o[-1] == "c": ggrouper = n.generators.bus.map(n.buses.country) diff --git a/scripts/solve_operations_network.py b/scripts/solve_operations_network.py index 068c58c40..7c078ae51 100644 --- a/scripts/solve_operations_network.py +++ b/scripts/solve_operations_network.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/scripts/time_aggregation.py b/scripts/time_aggregation.py index ec43afd7a..03230403c 100644 --- a/scripts/time_aggregation.py +++ b/scripts/time_aggregation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/test/__init__.py b/test/__init__.py index 24cfe8914..d57164a38 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT diff --git a/test/conftest.py b/test/conftest.py index 7031adf98..b3bd33bed 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -1,10 +1,7 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT -# coding: utf-8 - import pathlib import pandas as pd @@ -26,7 +23,7 @@ def ac_dc_network(): @pytest.fixture(scope="session") def config(): path_config = pathlib.Path(pathlib.Path.cwd(), "config", "config.default.yaml") - with open(path_config, "r") as file: + with open(path_config) as file: config_dict = yaml.safe_load(file) return config_dict diff --git a/test/test_base_network.py b/test/test_base_network.py index 84e473954..c7a5d5c96 100644 --- a/test/test_base_network.py +++ b/test/test_base_network.py @@ -1,9 +1,7 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur # # SPDX-License-Identifier: MIT -# coding: utf-8 """ Tests the functionalities of scripts/base_network.py. """ @@ -34,7 +32,6 @@ _set_electrical_parameters_lines_eg, _set_electrical_parameters_lines_osm, _set_electrical_parameters_links_osm, - _set_electrical_parameters_transformers, ) path_cwd = pathlib.Path.cwd() diff --git a/test/test_build_powerplants.py b/test/test_build_powerplants.py index 4481e2040..bbca5f953 100644 --- a/test/test_build_powerplants.py +++ b/test/test_build_powerplants.py @@ -1,9 +1,7 @@ -# -*- coding: utf-8 -*- # SPDX-FileCopyrightText: Contributors to PyPSA-Eur > # # SPDX-License-Identifier: MIT -# coding: utf-8 """ Tests the functionalities of scripts/build_powerplants.py. """