From 3ba5715c3a565046534281bc5f04410b46a09986 Mon Sep 17 00:00:00 2001 From: Roman Donchenko Date: Tue, 24 Dec 2024 11:22:56 +0200 Subject: [PATCH] Remove all remaining deprecated imports (#8861) This concludes the series started in #8626. --- dev/update_version.py | 3 ++- site/build_docs.py | 4 ++-- site/process_sdk_docs.py | 8 ++++---- utils/dataset_manifest/core.py | 28 ++++++++++++++++------------ 4 files changed, 24 insertions(+), 19 deletions(-) diff --git a/dev/update_version.py b/dev/update_version.py index ed8d08a40f42..bc175aa16dd0 100755 --- a/dev/update_version.py +++ b/dev/update_version.py @@ -6,7 +6,8 @@ import sys from dataclasses import dataclass from pathlib import Path -from typing import Callable, Match, Pattern +from re import Match, Pattern +from typing import Callable SUCCESS_CHAR = "\u2714" diff --git a/site/build_docs.py b/site/build_docs.py index 2eca3a941330..a01c437ae64c 100755 --- a/site/build_docs.py +++ b/site/build_docs.py @@ -10,7 +10,7 @@ import subprocess import tempfile from pathlib import Path -from typing import Dict, Optional +from typing import Optional import git import toml @@ -98,7 +98,7 @@ def run_npm_install(): def run_hugo( destination_dir: os.PathLike, *, - extra_env_vars: Dict[str, str] = None, + extra_env_vars: dict[str, str] = None, executable: Optional[str] = "hugo", ): extra_kwargs = {} diff --git a/site/process_sdk_docs.py b/site/process_sdk_docs.py index 03324aea691b..4fb911b69718 100755 --- a/site/process_sdk_docs.py +++ b/site/process_sdk_docs.py @@ -12,13 +12,13 @@ import sys import textwrap from glob import iglob -from typing import Callable, List +from typing import Callable from inflection import underscore class Processor: - _reference_files: List[str] + _reference_files: list[str] def __init__(self, *, input_dir: str, site_root: str) -> None: self._input_dir = input_dir @@ -29,7 +29,7 @@ def __init__(self, *, input_dir: str, site_root: str) -> None: self._templates_dir = osp.join(self._site_root, "templates") @staticmethod - def _copy_files(src_dir: str, glob_pattern: str, dst_dir: str) -> List[str]: + def _copy_files(src_dir: str, glob_pattern: str, dst_dir: str) -> list[str]: copied_files = [] for src_path in iglob(osp.join(src_dir, glob_pattern), recursive=True): @@ -140,7 +140,7 @@ def _fix_page_links_and_references(self): with open(p, "w") as f: f.write(contents) - def _process_non_code_blocks(self, text: str, handlers: List[Callable[[str], str]]) -> str: + def _process_non_code_blocks(self, text: str, handlers: list[Callable[[str], str]]) -> str: """ Allows to process Markdown documents with passed callbacks. Callbacks are only executed outside code blocks. diff --git a/utils/dataset_manifest/core.py b/utils/dataset_manifest/core.py index 6a7c9d92f0d6..449e70d64098 100644 --- a/utils/dataset_manifest/core.py +++ b/utils/dataset_manifest/core.py @@ -9,7 +9,8 @@ import json import os -from abc import ABC, abstractmethod, abstractproperty, abstractstaticmethod +from abc import ABC, abstractmethod +from collections.abc import Iterator from contextlib import closing from itertools import islice from PIL import Image @@ -20,7 +21,7 @@ from .utils import SortingMethod, md5_hash, rotate_image, sort from .types import NamedBytesIO -from typing import Any, Dict, List, Union, Optional, Iterator, Tuple, Callable +from typing import Any, Union, Optional, Callable class VideoStreamReader: @@ -78,7 +79,7 @@ def validate_key_frame(self, container, video_stream, key_frame): return False return True - def __iter__(self) -> Iterator[Union[int, Tuple[int, int, str]]]: + def __iter__(self) -> Iterator[Union[int, tuple[int, int, str]]]: """ Iterate over video frames and yield key frames or indexes. @@ -143,12 +144,12 @@ def __iter__(self) -> Iterator[Union[int, Tuple[int, int, str]]]: class DatasetImagesReader: def __init__(self, - sources: Union[List[str], Iterator[NamedBytesIO]], + sources: Union[list[str], Iterator[NamedBytesIO]], *, start: int = 0, step: int = 1, stop: Optional[int] = None, - meta: Optional[Dict[str, List[str]]] = None, + meta: Optional[dict[str, list[str]]] = None, sorting_method: SortingMethod = SortingMethod.PREDEFINED, use_image_hash: bool = False, **kwargs @@ -196,7 +197,7 @@ def step(self): def step(self, value): self._step = int(value) - def _get_img_properties(self, image: Union[str, NamedBytesIO]) -> Dict[str, Any]: + def _get_img_properties(self, image: Union[str, NamedBytesIO]) -> dict[str, Any]: img = Image.open(image, mode='r') if self._data_dir: img_name = os.path.relpath(image, self._data_dir) @@ -469,7 +470,8 @@ def __getitem__(self, item): def index(self): return self._index - @abstractproperty + @property + @abstractmethod def data(self): ... @@ -665,7 +667,7 @@ def emulate_hierarchical_structure( prefix: str = "", default_prefix: Optional[str] = None, start_index: Optional[int] = None, - ) -> Dict: + ) -> dict: if default_prefix and prefix and not (default_prefix.startswith(prefix) or prefix.startswith(default_prefix)): return { @@ -727,12 +729,12 @@ def emulate_hierarchical_structure( 'next': next_start_index, } - def reorder(self, reordered_images: List[str]) -> None: + def reorder(self, reordered_images: list[str]) -> None: """ The method takes a list of image names and reorders its content based on this new list. Due to the implementation of Honeypots, the reordered list of image names may contain duplicates. """ - unique_images: Dict[str, Any] = {} + unique_images: dict[str, Any] = {} for _, image_details in self: if image_details.full_name not in unique_images: unique_images[image_details.full_name] = image_details @@ -766,11 +768,13 @@ def _validate_type(self, _dict): if not _dict['type'] == self.TYPE: raise InvalidManifestError('Incorrect type field') - @abstractproperty + @property + @abstractmethod def validators(self): pass - @abstractstaticmethod + @staticmethod + @abstractmethod def _validate_first_item(_dict): pass