Skip to content

Commit

Permalink
engine: remove deprecated imports
Browse files Browse the repository at this point in the history
This is a continuation of cvat-ai#8626.
  • Loading branch information
SpecLad committed Dec 20, 2024
1 parent 54534e8 commit 8c63d5e
Show file tree
Hide file tree
Showing 23 changed files with 127 additions and 128 deletions.
6 changes: 3 additions & 3 deletions cvat/apps/engine/background.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from abc import ABC, abstractmethod
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Callable, Dict, Optional, Union
from typing import Any, Callable, Optional, Union

import django_rq
from attrs.converters import to_bool
Expand Down Expand Up @@ -170,7 +170,7 @@ class ExportArgs:
format: str
filename: str
save_images: bool
location_config: Dict[str, Any]
location_config: dict[str, Any]

@property
def location(self) -> Location:
Expand Down Expand Up @@ -515,7 +515,7 @@ class BackupExportManager(_ResourceExportManager):
@dataclass
class ExportArgs:
filename: str
location_config: Dict[str, Any]
location_config: dict[str, Any]

@property
def location(self) -> Location:
Expand Down
5 changes: 3 additions & 2 deletions cvat/apps/engine/backup.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,11 @@
import shutil
import tempfile
import uuid
from collections.abc import Collection, Iterable
from enum import Enum
from logging import Logger
from tempfile import NamedTemporaryFile
from typing import Any, Collection, Dict, Iterable, Optional, Union
from typing import Any, Optional, Union
from zipfile import ZipFile

import django_rq
Expand Down Expand Up @@ -650,7 +651,7 @@ def _calculate_segment_size(jobs):
return segment_size, overlap

@staticmethod
def _parse_segment_frames(*, jobs: Dict[str, Any]) -> JobFileMapping:
def _parse_segment_frames(*, jobs: dict[str, Any]) -> JobFileMapping:
segments = []

for i, segment in enumerate(jobs):
Expand Down
27 changes: 8 additions & 19 deletions cvat/apps/engine/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,22 +13,11 @@
import time
import zipfile
import zlib
from collections.abc import Collection, Generator, Iterator, Sequence
from contextlib import ExitStack, closing
from datetime import datetime, timezone
from itertools import groupby, pairwise
from typing import (
Any,
Callable,
Collection,
Generator,
Iterator,
Optional,
Sequence,
Tuple,
Type,
Union,
overload,
)
from typing import Any, Callable, Optional, Union, overload

import attrs
import av
Expand Down Expand Up @@ -76,8 +65,8 @@
slogger = ServerLogManager(__name__)


DataWithMime = Tuple[io.BytesIO, str]
_CacheItem = Tuple[io.BytesIO, str, int, Union[datetime, None]]
DataWithMime = tuple[io.BytesIO, str]
_CacheItem = tuple[io.BytesIO, str, int, Union[datetime, None]]


def enqueue_create_chunk_job(
Expand Down Expand Up @@ -636,7 +625,7 @@ def _read_raw_images(
@staticmethod
def _read_raw_frames(
db_task: Union[models.Task, int], frame_ids: Sequence[int]
) -> Generator[Tuple[Union[av.VideoFrame, PIL.Image.Image], str, str], None, None]:
) -> Generator[tuple[Union[av.VideoFrame, PIL.Image.Image], str, str], None, None]:
if isinstance(db_task, int):
db_task = models.Task.objects.get(pk=db_task)

Expand Down Expand Up @@ -962,7 +951,7 @@ def prepare_preview_image(image: PIL.Image.Image) -> DataWithMime:


def prepare_chunk(
task_chunk_frames: Iterator[Tuple[Any, str, int]],
task_chunk_frames: Iterator[tuple[Any, str, int]],
*,
quality: FrameQuality,
db_task: models.Task,
Expand All @@ -972,7 +961,7 @@ def prepare_chunk(

db_data = db_task.data

writer_classes: dict[FrameQuality, Type[IChunkWriter]] = {
writer_classes: dict[FrameQuality, type[IChunkWriter]] = {
FrameQuality.COMPRESSED: (
Mpeg4CompressedChunkWriter
if db_data.compressed_chunk_type == models.DataChoice.VIDEO
Expand Down Expand Up @@ -1005,7 +994,7 @@ def prepare_chunk(
return buffer, get_chunk_mime_type_for_writer(writer_class)


def get_chunk_mime_type_for_writer(writer: Union[IChunkWriter, Type[IChunkWriter]]) -> str:
def get_chunk_mime_type_for_writer(writer: Union[IChunkWriter, type[IChunkWriter]]) -> str:
if isinstance(writer, IChunkWriter):
writer_class = type(writer)
else:
Expand Down
31 changes: 17 additions & 14 deletions cvat/apps/engine/cloud_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,12 @@
import json
import os
import math
from abc import ABC, abstractmethod, abstractproperty
from abc import ABC, abstractmethod
from collections.abc import Iterator
from concurrent.futures import ThreadPoolExecutor, wait, FIRST_EXCEPTION
from enum import Enum
from io import BytesIO
from typing import Dict, List, Optional, Any, Callable, TypeVar, Iterator
from concurrent.futures import ThreadPoolExecutor, wait, FIRST_EXCEPTION
from typing import Optional, Any, Callable, TypeVar

import boto3
from azure.core.exceptions import HttpResponseError, ResourceExistsError
Expand Down Expand Up @@ -135,7 +136,8 @@ class _CloudStorage(ABC):
def __init__(self, prefix: Optional[str] = None):
self.prefix = prefix

@abstractproperty
@property
@abstractmethod
def name(self):
pass

Expand Down Expand Up @@ -232,7 +234,7 @@ def optimally_image_download(self, key: str, chunk_size: int = 65536) -> NamedBy

def bulk_download_to_memory(
self,
files: List[str],
files: list[str],
*,
threads_number: Optional[int] = None,
_use_optimal_downloading: bool = True,
Expand All @@ -246,7 +248,7 @@ def bulk_download_to_memory(

def bulk_download_to_dir(
self,
files: List[str],
files: list[str],
upload_dir: str,
*,
threads_number: Optional[int] = None,
Expand Down Expand Up @@ -274,7 +276,7 @@ def _list_raw_content_on_one_page(
prefix: str = "",
next_token: Optional[str] = None,
page_size: int = settings.BUCKET_CONTENT_MAX_PAGE_SIZE,
) -> Dict:
) -> dict:
pass

def list_files_on_one_page(
Expand All @@ -284,7 +286,7 @@ def list_files_on_one_page(
page_size: int = settings.BUCKET_CONTENT_MAX_PAGE_SIZE,
_use_flat_listing: bool = False,
_use_sort: bool = False,
) -> Dict:
) -> dict:

if self.prefix and prefix and not (self.prefix.startswith(prefix) or prefix.startswith(self.prefix)):
return {
Expand Down Expand Up @@ -337,7 +339,7 @@ def list_files(
self,
prefix: str = "",
_use_flat_listing: bool = False,
) -> List[str]:
) -> list[str]:
all_files = []
next_token = None
while True:
Expand All @@ -349,7 +351,8 @@ def list_files(

return all_files

@abstractproperty
@property
@abstractmethod
def supported_actions(self):
pass

Expand All @@ -365,7 +368,7 @@ def get_cloud_storage_instance(
cloud_provider: CloudProviderChoice,
resource: str,
credentials: str,
specific_attributes: Optional[Dict[str, Any]] = None,
specific_attributes: Optional[dict[str, Any]] = None,
):
instance = None
if cloud_provider == CloudProviderChoice.AWS_S3:
Expand Down Expand Up @@ -529,7 +532,7 @@ def _list_raw_content_on_one_page(
prefix: str = "",
next_token: Optional[str] = None,
page_size: int = settings.BUCKET_CONTENT_MAX_PAGE_SIZE,
) -> Dict:
) -> dict:
# The structure of response looks like this:
# {
# 'CommonPrefixes': [{'Prefix': 'sub/'}],
Expand Down Expand Up @@ -736,7 +739,7 @@ def _list_raw_content_on_one_page(
prefix: str = "",
next_token: Optional[str] = None,
page_size: int = settings.BUCKET_CONTENT_MAX_PAGE_SIZE,
) -> Dict:
) -> dict:
page = self._client.walk_blobs(
maxresults=page_size, results_per_page=page_size, delimiter='/',
**({'name_starts_with': prefix} if prefix else {})
Expand Down Expand Up @@ -852,7 +855,7 @@ def _list_raw_content_on_one_page(
prefix: str = "",
next_token: Optional[str] = None,
page_size: int = settings.BUCKET_CONTENT_MAX_PAGE_SIZE,
) -> Dict:
) -> dict:
iterator = self._client.list_blobs(
bucket_or_name=self.name, max_results=page_size, page_size=page_size,
fields='items(name),nextPageToken,prefixes', # https://cloud.google.com/storage/docs/json_api/v1/parameters#fields
Expand Down
3 changes: 2 additions & 1 deletion cvat/apps/engine/field_validation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@
#
# SPDX-License-Identifier: MIT

from typing import Any, Sequence
from collections.abc import Sequence
from typing import Any

from rest_framework import serializers

Expand Down
13 changes: 7 additions & 6 deletions cvat/apps/engine/filters.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@
#
# SPDX-License-Identifier: MIT

from typing import Any, Dict, Tuple, List, Iterator, Optional, Iterable
from collections.abc import Iterator, Iterable
from functools import reduce
from typing import Any, Optional
import operator
import json

Expand All @@ -25,7 +26,7 @@
DEFAULT_FILTER_FIELDS_ATTR = 'filter_fields'
DEFAULT_LOOKUP_MAP_ATTR = 'lookup_fields'

def get_lookup_fields(view, fields: Optional[Iterator[str]] = None) -> Dict[str, str]:
def get_lookup_fields(view, fields: Optional[Iterator[str]] = None) -> dict[str, str]:
if fields is None:
fields = getattr(view, DEFAULT_FILTER_FIELDS_ATTR, None) or []

Expand Down Expand Up @@ -134,7 +135,7 @@ def get_schema_operation_parameters(self, view):
}] if ordering_fields else []

class JsonLogicFilter(filters.BaseFilterBackend):
Rules = Dict[str, Any]
Rules = dict[str, Any]
filter_param = 'filter'
filter_title = _('Filter')
filter_description = _(dedent("""
Expand Down Expand Up @@ -191,7 +192,7 @@ def _parse_query(self, json_rules: str) -> Rules:
return rules

def apply_filter(self,
queryset: QuerySet, parsed_rules: Rules, *, lookup_fields: Dict[str, Any]
queryset: QuerySet, parsed_rules: Rules, *, lookup_fields: dict[str, Any]
) -> QuerySet:
try:
q_object = self._build_Q(parsed_rules, lookup_fields)
Expand Down Expand Up @@ -362,7 +363,7 @@ class DotDict(dict):
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__

def __init__(self, dct: Dict):
def __init__(self, dct: dict):
for key, value in dct.items():
if isinstance(value, dict):
value = self.__class__(value)
Expand Down Expand Up @@ -454,7 +455,7 @@ class NonModelOrderingFilter(OrderingFilter, _NestedAttributeHandler):
?sort=-field1,-field2
"""

def get_ordering(self, request, queryset, view) -> Tuple[List[str], bool]:
def get_ordering(self, request, queryset, view) -> tuple[list[str], bool]:
ordering = super().get_ordering(request, queryset, view)
result, reverse = [], False
for field in ordering:
Expand Down
21 changes: 9 additions & 12 deletions cvat/apps/engine/frame_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,18 +11,15 @@
from abc import ABCMeta, abstractmethod
from bisect import bisect
from collections import OrderedDict
from collections.abc import Iterator, Sequence
from dataclasses import dataclass
from enum import Enum, auto
from io import BytesIO
from typing import (
Any,
Callable,
Generic,
Iterator,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
Expand Down Expand Up @@ -53,7 +50,7 @@
class _ChunkLoader(metaclass=ABCMeta):
def __init__(
self,
reader_class: Type[IMediaReader],
reader_class: type[IMediaReader],
*,
reader_params: Optional[dict] = None,
) -> None:
Expand All @@ -62,7 +59,7 @@ def __init__(
self.reader_class = reader_class
self.reader_params = reader_params

def load(self, chunk_id: int) -> RandomAccessIterator[Tuple[Any, str, int]]:
def load(self, chunk_id: int) -> RandomAccessIterator[tuple[Any, str, int]]:
if self.chunk_id != chunk_id:
self.unload()

Expand All @@ -88,7 +85,7 @@ def read_chunk(self, chunk_id: int) -> DataWithMime: ...
class _FileChunkLoader(_ChunkLoader):
def __init__(
self,
reader_class: Type[IMediaReader],
reader_class: type[IMediaReader],
get_chunk_path_callback: Callable[[int], str],
*,
reader_params: Optional[dict] = None,
Expand All @@ -108,7 +105,7 @@ def read_chunk(self, chunk_id: int) -> DataWithMime:
class _BufferChunkLoader(_ChunkLoader):
def __init__(
self,
reader_class: Type[IMediaReader],
reader_class: type[IMediaReader],
get_chunk_callback: Callable[[int], DataWithMime],
*,
reader_params: Optional[dict] = None,
Expand Down Expand Up @@ -154,7 +151,7 @@ def _av_frame_to_png_bytes(cls, av_frame: av.VideoFrame) -> BytesIO:
return BytesIO(result.tobytes())

def _convert_frame(
self, frame: Any, reader_class: Type[IMediaReader], out_type: FrameOutputType
self, frame: Any, reader_class: type[IMediaReader], out_type: FrameOutputType
) -> AnyFrame:
if out_type == FrameOutputType.BUFFER:
return (
Expand Down Expand Up @@ -451,7 +448,7 @@ def __init__(self, db_segment: models.Segment) -> None:

db_data = db_segment.task.data

reader_class: dict[models.DataChoice, Tuple[Type[IMediaReader], Optional[dict]]] = {
reader_class: dict[models.DataChoice, tuple[type[IMediaReader], Optional[dict]]] = {
models.DataChoice.IMAGESET: (ZipReader, None),
models.DataChoice.VIDEO: (
VideoReader,
Expand Down Expand Up @@ -523,7 +520,7 @@ def get_frame_index(self, frame_number: int) -> Optional[int]:

return frame_index

def validate_frame_number(self, frame_number: int) -> Tuple[int, int, int]:
def validate_frame_number(self, frame_number: int) -> tuple[int, int, int]:
frame_index = self.get_frame_index(frame_number)
if frame_index is None:
raise ValidationError(f"Incorrect requested frame number: {frame_number}")
Expand Down Expand Up @@ -576,7 +573,7 @@ def _get_raw_frame(
frame_number: int,
*,
quality: FrameQuality = FrameQuality.ORIGINAL,
) -> Tuple[Any, str, Type[IMediaReader]]:
) -> tuple[Any, str, type[IMediaReader]]:
_, chunk_number, frame_offset = self.validate_frame_number(frame_number)
loader = self._loaders[quality]
chunk_reader = loader.load(chunk_number)
Expand Down
Loading

0 comments on commit 8c63d5e

Please sign in to comment.