diff --git a/eedl/google_cloud.py b/eedl/google_cloud.py index 7d00db6..bb98db1 100644 --- a/eedl/google_cloud.py +++ b/eedl/google_cloud.py @@ -1,7 +1,7 @@ import os import re from pathlib import Path -from typing import Union +from typing import List, Union import requests @@ -9,16 +9,20 @@ from google.cloud import storage # type: ignore -def get_public_export_urls(bucket_name: str, prefix: str = ""): +def get_public_export_urls(bucket_name: str, prefix: str = "") -> List[str]: """ - Downloads items from a *public* Google Storage bucket without using a GCloud login. Filters only to files - with the specified prefix - :param bucket_name: - :param prefix: A prefix to use to filter items in the bucket - only URLs where the path matches this prefix will be returned - defaults to all files - :return: list of urls + Downloads items from a *public* Google Cloud Storage Bucket without using a GCloud login. Filters only to files. + with the specified prefix. + + :param bucket_name: Name of the Google Cloud Storage Bucket to pull data from. + :type bucket_name: str + :param prefix: A prefix to use to filter items in the bucket - only URLs where the path matches this prefix will be returned - defaults to all files. + :type prefix: str + :return: A list of urls. + :rtype: List[str] """ - base_url = "http://storage.googleapis.com/" + base_url = "https://storage.googleapis.com/" request_url = f"{base_url}{bucket_name}/" # get the content of the bucket (it needs to be public @@ -33,7 +37,17 @@ def get_public_export_urls(bucket_name: str, prefix: str = ""): return filtered -def download_public_export(bucket_name: str, output_folder: Union[str, Path], prefix: str = ""): +def download_public_export(bucket_name: str, output_folder: Union[str, Path], prefix: str = "") -> None: + """ + + :param bucket_name: Name of the Google Cloud Storage Bucket to pull data from. + :type bucket_name: str + :param output_folder: Destination folder for exported data. + :type output_folder: Union[str, Path] + :param prefix: A prefix to use to filter items in the bucket - only URLs where the path matches this prefix will be returned - defaults to all files. + :type prefix: str + :return: None. + """ # get the urls of items in the bucket with the specified prefix urls = get_public_export_urls(bucket_name, prefix) @@ -49,13 +63,26 @@ def download_export(bucket_name: str, output_folder: Union[str, Path], prefix: str, delimiter: str = "/", - autodelete: bool = True): + autodelete: bool = True) -> None: + """Downloads a blob from the bucket. Modified from Google Cloud sample documentation at https://cloud.google.com/storage/docs/samples/storage-download-file#storage_download_file-python and https://cloud.google.com/storage/docs/samples/storage-list-files-with-prefix + + :param bucket_name: Name of the Google Cloud Storage Bucket to pull data from. + :type bucket_name: str + :param output_folder: Destination folder for exported data. + :type output_folder: Union[str, Path] + :param prefix: A prefix to use to filter items in the bucket - only URLs where the path matches this prefix will be returned - defaults to all files. + :type prefix: str + :param delimiter: Delimiter used for getting the list of blobs in the Google Cloud Storage Bucket. Defaults to "/" + :type delimiter: str + :param autodelete: Bool for deleting blobs once contents have been installed. Defaults to True + :type autodelete: bool + :return: None """ # The ID of your GCS bucket # bucket_name = "your-bucket-name" diff --git a/eedl/image.py b/eedl/image.py index 880309f..cbda025 100644 --- a/eedl/image.py +++ b/eedl/image.py @@ -22,9 +22,12 @@ def _get_fiona_args(polygon_path: Union[str, Path]) -> Dict[str, Union[str, Path]]: """ A simple utility that detects if, maybe, we're dealing with an Esri File Geodatabase. This is the wrong way - to do this, but it'll work in many situations - :param polygon_path: - :return: + to do this, but it'll work in many situations. + + :param polygon_path: File location of polygons. + :type polygon_path: Union[str, Path] + :return: Returns the full path and, depending on the file format, the file name in a dictionary. + :rtype: Dict[str, Union[str, Path]] """ parts = os.path.split(polygon_path) @@ -38,10 +41,14 @@ def _get_fiona_args(polygon_path: Union[str, Path]) -> Dict[str, Union[str, Path def download_images_in_folder(source_location: Union[str, Path], download_location: Union[str, Path], prefix: str) -> None: """ Handles pulling data from Google Drive over to a local location, filtering by a filename prefix and folder - :param source_location: - :param download_location: - :param prefix: - :return: + + :param source_location: Directory to search for files + :type source_location: Union[str, Path] + :param download_location: Destination for files with the specified prefix + :type download_location: Union[str, Path] + :param prefix: A prefix to use to filter items in the folder - only files where the name matches this prefix will be moved + :type prefix: str + :return: None """ folder_search_path: Union[str, Path] = source_location files = [filename for filename in os.listdir(folder_search_path) if filename.startswith(prefix)] @@ -53,28 +60,54 @@ def download_images_in_folder(source_location: Union[str, Path], download_locati class TaskRegistry: + """ + The TaskRegistry class makes it convent to manage arbitrarily many Earth Engine images that are in varying states of being downloaded. + """ INCOMPLETE_STATUSES = ("READY", "UNSUBMITTED", "RUNNING") COMPLETE_STATUSES = ["COMPLETED"] FAILED_STATUSES = ["CANCEL_REQUESTED", "CANCELLED", "FAILED"] def __init__(self) -> None: + """ + Initialized the TaskRegistry class and defaults images to "[]" and the callback function to "None" + :return: None + """ self.images: List[Image] = [] self.callback: Optional[str] = None - def add(self, image) -> None: + def add(self, image: ee.image.Image) -> None: + """ + Adds an Earth Engine image to the list of Earth Engine images + + :param image: Earth Engine image to be added to the list of images + :type image: ee.image.Image + :return: None + """ self.images.append(image) @property def incomplete_tasks(self) -> List[ee.image.Image]: - initial_tasks = [image for image in self.images if image._last_task_status['state'] in self.INCOMPLETE_STATUSES] + """ + List of Earth Engine images that have not been completed yet + + :return: List of Earth Engine images that have not been completed yet + :rtype: List[ee.image.Image] + """ + initial_tasks = [image for image in self.images if image.last_task_status['state'] in self.INCOMPLETE_STATUSES] for image in initial_tasks: # update anything that's currently running or waiting first image._check_task_status() - return [image for image in self.images if image._last_task_status['state'] in self.INCOMPLETE_STATUSES] + return [image for image in self.images if image.last_task_status['state'] in self.INCOMPLETE_STATUSES] @property def complete_tasks(self) -> List[ee.image.Image]: - return [image for image in self.images if image._last_task_status['state'] in self.COMPLETE_STATUSES + self.FAILED_STATUSES] + """ + List of Earth Engine images + + :return: List of Earth Engine images + :rtype: List[ee.image.Image] + """ + return [image for image in self.images if image.last_task_status['state'] in self.COMPLETE_STATUSES + self.FAILED_STATUSES] @property def failed_tasks(self) -> List[ee.image.Image]: @@ -82,9 +115,20 @@ def failed_tasks(self) -> List[ee.image.Image]: @property def downloadable_tasks(self) -> List[ee.image.Image]: - return [image for image in self.complete_tasks if image.task_data_downloaded is False and image._last_task_status['state'] not in self.FAILED_STATUSES] + """ + List of Earth Engine images that have successfully been downloaded + :return: List of Earth Engine images that have successfully been downloaded + :rtype: List[ee.image.Image] + """ + return [image for image in self.complete_tasks if image.task_data_downloaded is False and image.last_task_status['state'] not in self.FAILED_STATUSES] def download_ready_images(self, download_location: Union[str, Path]) -> None: + """ + + :param download_location: Destination for downloaded files + :type download_location: Union[str, Path] + :return: None + """ for image in self.downloadable_tasks: print(f"{image.filename} is ready for download") image.download_results(download_location=download_location, callback=self.callback) @@ -95,6 +139,19 @@ def wait_for_images(self, callback: Optional[str] = None, try_again_disk_full: bool = True, on_failure="raise") -> None: + """ + Blocker until there are no more incomplete or downloadable tasks left. + + :param download_location: Destination for downloaded files. + :type download_location: Union[str, Path] + :param sleep_time: Time between checking if the disk is full in seconds. Defaults to 10 seconds. + :type sleep_time: int + :param callback: Optional callback function. Executed after image has been downloaded. + :type callback: Optional[str] + :param try_again_disk_full: Will continuously retry to download images that are ready if disk is full. + :type try_again_disk_full: bool + :return: None + """ self.callback = callback while len(self.incomplete_tasks) > 0 or len(self.downloadable_tasks) > 0: @@ -131,17 +188,20 @@ class Image: directly to the class and override any defaults. Options include: :param crs: Coordinate Reference System to use for exports in a format Earth Engine understands, such as "EPSG:3310" + :type crs: Optional[str] :param tile_size: the number of pixels per side of tiles to export + :type tile_size: Optional[int] :param export_folder: the name of the folder in the chosen export location that will be created for the export + :type export_folder: Optional[Union[str, Path]] This docstring needs to be checked to ensure it's in a standard format that Sphinx will render """ def __init__(self, **kwargs) -> None: - # TODO: We shouldn't define a default drive root folder. This should always be provided by the user, - # but we need to figure out where in the workflow this happens. + """ - # Check if the path is valid before we do anything else + :return: None + """ self.drive_root_folder: Optional[Union[str, Path]] = None self.crs: Optional[str] = None @@ -168,12 +228,23 @@ def __init__(self, **kwargs) -> None: self.filename_description = "" - def _set_names(self, filename_prefix: str = "") -> None: - self.description = filename_prefix - self.filename = f"{self.filename_description}_{filename_prefix}" + def _set_names(self, filename_suffix: str = "") -> None: + """ + + :param filename_suffix: Suffix used to later identify files. + :type filename_suffix: Str + :return: None + """ + self.description = filename_suffix + self.filename = f"{self.filename_description}_{filename_suffix}" @staticmethod def _initialize() -> None: + """ + Handles the initialization and potentially the authentication of Earth Engine + + :return: None + """ try: # try just a basic discardable operation used in their docs so that we don't initialize if we don't need to _ = ee.Image("NASA/NASADEM_HGT/001") except EEException: # if it fails, try just running initialize @@ -183,31 +254,66 @@ def _initialize() -> None: ee.Authenticate() ee.Initialize() + @property + def last_task_status(self) -> Dict[str, str]: + """ + Allows reading the private variable "_last_task_status" + :return: return the private variable "_last_task_status" + :rtype: Dict[str, str] + """ + return self._last_task_status + + @last_task_status.setter + def last_task_status(self, new_status: Dict[str, str]) -> None: + """ + Sets the value of the private variable "_last_task_status" to a specified value + + :param new_status: Updated status + :type new_status: Dict[str, str] + :return: None + """ + self._last_task_status = new_status + def export(self, image: ee.image.Image, - filename_prefix: str, - export_type: str = "Drive", + filename_suffix: str, + export_type: str = "drive", clip: Optional[ee.geometry.Geometry] = None, drive_root_folder: Optional[Union[str, Path]] = None, **export_kwargs) -> None: + """ + Handles the exporting of an image + + :param image: Image for export + :type image: ee.image.Image + :param filename_suffix: The unique identifier used internally to identify images. + :type filename_suffix: Str + :param export_type: Specifies how the image should be exported. Either "cloud" or "drive". Defaults to "drive". + :type export_type: Str + :param clip: Defines the clip that should be used + :type clip: Optional[ee.geometry.Geometry] + :param drive_root_folder: The folder for exporting if "drive" is selected + :type drive_root_folder: Optional[Union[str, Path]] + :return: None + """ - # If image does not have a clip attribute, the error message is not very helpful. This allows for a custom error message: + # If "image" does not have a clip attribute, the error message is not very helpful. This allows for a custom error message: if not isinstance(image, ee.image.Image): raise ValueError("Invalid image provided for export") - if export_type == "Drive" and (drive_root_folder is None or not os.path.exists(drive_root_folder)): + if export_type.lower() == "drive" and (drive_root_folder is None or not os.path.exists(drive_root_folder)): raise NotADirectoryError("The provided path for the Google Drive export folder is not a valid directory but" " Drive export was specified. Either change the export type to use Google Cloud" " and set that up properly (with a bucket, etc), or set the drive_root_folder" " to a valid folder") - elif export_type == "Drive": + elif export_type.lower() == "drive": self.drive_root_folder = drive_root_folder self._initialize() self._ee_image = image - self._set_names(filename_prefix) + self._set_names(filename_suffix) ee_kwargs = { 'description': self.description, @@ -251,7 +357,11 @@ def export(self, def download_results(self, download_location: Union[str, Path], callback: Optional[str] = None, drive_wait: int = 15) -> None: """ - :return: + :param download_location: The directory where the results should be downloaded to + :type download_location: Union[str, Path] + :param callback: The callback function called once the image is downloaded + :type callback: Optional[str] + :return: None """ # need an event loop that checks self.task.status(), which # will get the current state of the task @@ -280,13 +390,18 @@ def download_results(self, download_location: Union[str, Path], callback: Option callback_func() def mosaic(self) -> None: + """ + Mosaics the individual images into the full image + + :return: None + """ self.mosaic_image = os.path.join(str(self.output_folder), f"{self.filename}_mosaic.tif") mosaic_rasters.mosaic_folder(str(self.output_folder), self.mosaic_image, prefix=self.filename) def zonal_stats(self, polygons: Union[str, Path], - keep_fields: Tuple = ("UniqueID", "CLASS2"), - stats: Tuple = ('min', 'max', 'mean', 'median', 'std', 'count', 'percentile_10', 'percentile_90'), + keep_fields: Tuple[str, ...] = ("UniqueID", "CLASS2"), + stats: Tuple[str, ...] = ('min', 'max', 'mean', 'median', 'std', 'count', 'percentile_10', 'percentile_90'), report_threshold: int = 1000, write_batch_size: int = 2000, use_points: bool = False, @@ -294,13 +409,19 @@ def zonal_stats(self, """ :param polygons: + :type polygons: Union[str, Path] :param keep_fields: + :type keep_fields: tuple[str, ...] :param stats: + :type stats: Tuple[str, ...] :param report_threshold: After how many iterations should it print out the feature number it's on. Defaults to 1000. - Set to None to disable - :param write_batch_size: How many zones should we store up before writing to the disk? + Set to None to disable + :type report_threshold: int + :param write_batch_size: How many zones should we store up before writing to the disk? Defaults to 2000 + :type write_batch_size: int :param use_points: - :return: + :type use_points: bool + :return: None """ @@ -315,6 +436,12 @@ def zonal_stats(self, use_points=use_points) def _check_task_status(self) -> Dict[str, Union[Dict[str, str], bool]]: + """ + Updates the status is it needs to be changed + + :return: Returns a dictionary of the most up-to-date status and whether it was changed + :rtype: Dict[str, Union[Dict[str, str], bool]] + """ if self.task is None: raise ValueError('Error checking task status. Task is None. It likely means that the export task was not' @@ -323,8 +450,8 @@ def _check_task_status(self) -> Dict[str, Union[Dict[str, str], bool]]: new_status = self.task.status() changed = False - if self._last_task_status != new_status: + if self.last_task_status != new_status: changed = True - self._last_task_status = new_status + self.last_task_status = new_status - return {'status': self._last_task_status, 'changed': changed} + return {'status': self.last_task_status, 'changed': changed} diff --git a/eedl/merge.py b/eedl/merge.py index 62f26a6..6870473 100644 --- a/eedl/merge.py +++ b/eedl/merge.py @@ -14,12 +14,18 @@ def merge_outputs(file_mapping, sqlite_table: Optional[str] = None) -> pandas.DataFrame: """ Makes output zonal stats files into a data frame and adds a datetime field. Merges all inputs into one DF, and - can optionally insert into a sqlite database - :param file_mapping: a set of tuples with a path to a file and a time value (string or datetime) to associate with it. - :param date_field: - :param sqlite_db: - :param sqlite_table: - :return: pandas data frame with all file data and times + can optionally insert into a sqlite database. + + :param file_mapping: A set of tuples with a path to a file and a time value (string or datetime) to associate with it. + :type file_mapping: + :param date_field: Defaults to "et_date". + :type date_field: str + :param sqlite_db: Name of a sqlite database. + :type sqlite_db: Optional[str] + :param sqlite_table: Name of a table in the database. + :type sqlite_table: Optional[str] + :return: Pandas data frame with all file and time data. + :rtype: pandas.DataFrame """ dfs = [] @@ -42,7 +48,20 @@ def merge_outputs(file_mapping, return final_df -def plot_merged(df: pandas.DataFrame, et_field, date_field: str = "et_date", uniqueid: str = "UniqueID") -> so.Plot: +def plot_merged(df: pandas.DataFrame, et_field: str, date_field: str = "et_date", uniqueid: str = "UniqueID") -> so.Plot: + """ + + :param df: Data source for the plot + :type df: pandas.DataFrame + :param et_field: Name of the variable on the x-axis + :type et_field: str + :param date_field: Name of the variable on the y-axis. Default is "et_date" + :type date_field: str + :param uniqueid: Defines additional data subsets that transforms should operate on independently. Default is "UniqueID" + :type uniqueid: str + :return: Returns a seaborn object plot + :rtype: so.Plot + """ return ( so.Plot(df, x=date_field, diff --git a/eedl/mosaic_rasters.py b/eedl/mosaic_rasters.py index 129da3a..7cb7f46 100644 --- a/eedl/mosaic_rasters.py +++ b/eedl/mosaic_rasters.py @@ -7,6 +7,16 @@ def mosaic_folder(folder_path: Union[str, Path], output_path: Union[str, Path], prefix: str = "") -> None: + """ + + :param folder_path: Location of the folder + :type folder_path: Union[str, Path] + :param output_path: Output destination + :type output_path: Union[str, Path] + :param prefix: Used to find the files of interest. + :type prefix: Str + :return: None + """ tifs = [os.path.join(folder_path, filename) for filename in os.listdir(folder_path) if filename.endswith(".tif") and filename.startswith(prefix)] mosaic_rasters(tifs, output_path) @@ -17,10 +27,14 @@ def mosaic_rasters(raster_paths: Sequence[Union[str, Path]], """ Adapted from https://gis.stackexchange.com/a/314580/1955 and https://www.gislite.com/tutorial/k8024 along with other basic lookups on GDAL Python bindings - :param raster_paths: - :param output_path: + + :param raster_paths: Location of the raster + :type raster_paths: Sequence[Union[str, Path]] + :param output_path: Output destination + :type output_path: Union[str, Path] :param add_overviews: - :return: + :type add_overviews: Bool + :return: None """ # gdal.SetConfigOption("GTIFF_SRC_SOURCE", "GEOKEYS") diff --git a/eedl/zonal.py b/eedl/zonal.py index c455210..a910343 100644 --- a/eedl/zonal.py +++ b/eedl/zonal.py @@ -10,9 +10,12 @@ def _get_fiona_args(polygon_path: Union[str, Path]) -> Dict[str, Union[str, Path]]: """ A simple utility that detects if, maybe, we're dealing with an Esri File Geodatabase. This is the wrong way - to do this, but it'll work in many situations - :param polygon_path: - :return: + to do this, but it'll work in many situations. + + :param polygon_path: File location of polygons. + :type polygon_path: Union[str, Path] + :return: Returns the full path and, depending on the file format, the file name in a dictionary. + :rtype: Dict[str, Union[str, Path]] """ parts = os.path.split(polygon_path) @@ -38,27 +41,37 @@ def zonal_stats(features: Union[str, Path], """ - :param features: - :param raster: - :param output_folder: - :param filename: - :param keep_fields: - :param stats: - :param report_threshold: After how many iterations should it print out the feature number it's on. Defaults to 1000. Set to None to disable - :param write_batch_size: How many zones should we store up before writing to the disk? - :param use_points: Switches rasterstats to extract using gen_point_query instead of gen_zonal_stats. See rasterstats - package documentation for complete information. get_point_query will get the values of a raster at all vertex + :param features: Location to the features + :type features: Union[str, Path] + :param raster: Location of the raster + :type raster: Union[str, Path, None] + :param output_folder: Output destination + :type output_folder: Union[str, Path, None] + :param filename: Name of the file + :type filename: Str + :param keep_fields: Fields that will be used + :type keep_fields: Iterable[str] + :param stats: The various statistical measurements to be computed. + :type stats: Iterable[str] + :param report_threshold: The number of iterations before it prints out the feature number it's on. Default is 1000. Set to None to disable + :type report_threshold: Int + :param write_batch_size: The number of zones that should be stored up before writing to disk. + :type write_batch_size: Int + :param use_points: Switch rasterstats to extract using gen_point_query instead of gen_zonal_stats. See rasterstats + package documentation for complete information. Get_point_query will get the values of a raster at all vertex locations when provided with a polygon or line. If provided points, it will extract those point values. We set - interpolation to nearest to perform an exact extraction of the cell values. In this codebase's usage, it's - assumed that the "features" paramter to this function will be a points dataset (still in the same CRS as the raster) + interpolation to the nearest to perform an exact extraction of the cell values. In this codebase's usage, it's + assumed that the "features" parameter to this function will be a points dataset (still in the same CRS as the raster) when use_points is True. Additionally, when this is True, the `stats` argument to this function is ignored - as only a single value will be extracted as the attribute `value` in the output CSV. default is False. - :param kwargs: passed through to rasterstats + as only a single value will be extracted as the attribute `value` in the output CSV. Default is False. + :type use_points: Bool + :param kwargs: Passed through to rasterstats :return: + :rtype: Union[str, Path, None] """ - # note use of gen_zonal_stats, which uses a generator. That should mean that until we coerce it to a list on the + # Note the use of gen_zonal_stats, which uses a generator. That should mean that until we coerce it to a list on the # next line, each item isn't evaluated, which should prevent us from needing to store a geojson representation of - # all the polygons at one time since we'll strip it off (it'd be reallllly bad to try to keep all of it + # all the polygons at one time since we'll strip it off (it'd be bad to try to keep all of it # A silly hack to get fiona to open GDB data by splitting it only if the input is a gdb data item, then providing # anything else as kwargs. But fiona requires the main item to be an arg, not a kwarg @@ -70,25 +83,25 @@ def zonal_stats(features: Union[str, Path], with fiona.open(main_file_path, **kwargs) as feats_open: - if not use_points: # if we want to do zonal, open a zonal stats generator + if not use_points: # If we want to do zonal, open a zonal stats generator zstats_results_geo = rasterstats.gen_zonal_stats(feats_open, raster, stats=stats, geojson_out=True, nodata=-9999, **kwargs) fieldnames = (*stats, *keep_fields) filesuffix = "zonal_stats" - else: # otherwise open a point query generator + else: # Otherwise, open a point query generator. # TODO: Need to make it convert the polygons to points here, otherwise it'll get the vertex data zstats_results_geo = rasterstats.gen_point_query(feats_open, raster, - geojson_out=True, # need this to get extra attributes back + geojson_out=True, # Need this to get extra attributes back nodata=-9999, - interpolate="nearest", # we need this or else rasterstats uses a mix of nearby cells, even for single points + interpolate="nearest", # Need this or else rasterstats uses a mix of nearby cells, even for single points **kwargs) - fieldnames = ("value", *keep_fields,) # when doing point queries, we get a field called "value" back with the raster value + fieldnames = ("value", *keep_fields,) # When doing point queries, we get a field called "value" back with the raster value filesuffix = "point_query" - # here's a first approach that still stores a lot in memory - it's commented out because we're instead + # Here's a first approach that still stores a lot in memory - it's commented out because we're instead # going to just generate them one by one and write them to a file directly. # - # ok, so this next line is doing a lot of work. It's a dictionary comprehension inside a list comprehension - + # This next line is doing a lot of work. It's a dictionary comprehension inside a list comprehension - # we're going through each item in the results, then accessing just the properties key and constructing a new # dictionary just for the keys we want to keep - the keep fields (the key and a class field by defaiult) and # the stats fields zstats_results = [{key: poly['properties'][key] for key in fieldnames} for poly in @@ -100,8 +113,7 @@ def zonal_stats(features: Union[str, Path], writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader() results = [] - for poly in zstats_results_geo: # get the result for the polygon, then filter the keys with the - # dictionary comprehension below + for poly in zstats_results_geo: # Get the result for the polygon, then filter the keys with the dictionary comprehension below result = {key: poly['properties'][key] for key in fieldnames} for key in result: # truncate the floats @@ -111,13 +123,13 @@ def zonal_stats(features: Union[str, Path], i += 1 results.append(result) if i % write_batch_size == 0: - writer.writerows(results) # then write the lone result out one at a time to not store it all in RAM + writer.writerows(results) # Then write the lone result out one at a time to not store it all in RAM results = [] if report_threshold and i % report_threshold == 0: print(i) - if len(results) > 0: # clear out any remaining items at the end + if len(results) > 0: # Clear out any remaining items at the end writer.writerows(results) print(i) @@ -125,6 +137,11 @@ def zonal_stats(features: Union[str, Path], def run_data_2018_baseline() -> None: + """ + + + :return: None + """ datasets = [ # dict( # name="cv_water_balance",