diff --git a/flash/core/data/utils.py b/flash/core/data/utils.py index 4e5c69ed0d0..fc16c03c92f 100644 --- a/flash/core/data/utils.py +++ b/flash/core/data/utils.py @@ -13,8 +13,8 @@ # limitations under the License. import os.path -import zipfile import tarfile +import zipfile from typing import Any, Callable, Dict, Iterable, Mapping, Optional, Set, Type import requests diff --git a/flash/image/__init__.py b/flash/image/__init__.py index 6c7480ed6bb..95dfcf49d72 100644 --- a/flash/image/__init__.py +++ b/flash/image/__init__.py @@ -4,14 +4,9 @@ ImageClassificationPreprocess, ImageClassifier, ) -from flash.image.detection import ( # noqa: F401 - ObjectDetectionData, - ObjectDetector, -) -from flash.image.face_detection import ( # noqa: F401 - FaceDetector, -) +from flash.image.detection import ObjectDetectionData, ObjectDetector # noqa: F401 from flash.image.embedding import ImageEmbedder # noqa: F401 +from flash.image.face_detection import FaceDetector # noqa: F401 from flash.image.segmentation import ( # noqa: F401 SemanticSegmentation, SemanticSegmentationData, diff --git a/flash/image/face_detection/data.py b/flash/image/face_detection/data.py index 4b3e7816718..be8e8e9cd89 100644 --- a/flash/image/face_detection/data.py +++ b/flash/image/face_detection/data.py @@ -13,14 +13,14 @@ # limitations under the License. from typing import Any, Callable, Dict, Optional, Sequence, Tuple +from torch.utils.data import Dataset + from flash.core.data.data_source import DataSource, DefaultDataKeys, DefaultDataSources from flash.core.data.process import Preprocess from flash.core.utilities.imports import _TORCHVISION_AVAILABLE from flash.image.data import ImagePathsDataSource from flash.image.detection.transforms import default_transforms -from torch.utils.data import Dataset - if _TORCHVISION_AVAILABLE: from torchvision.datasets.folder import default_loader diff --git a/flash/image/face_detection/model.py b/flash/image/face_detection/model.py index 5468c8ae291..ef978142b1c 100644 --- a/flash/image/face_detection/model.py +++ b/flash/image/face_detection/model.py @@ -21,7 +21,6 @@ from flash.core.data.process import Preprocess, Serializer from flash.core.model import Task from flash.core.utilities.imports import _FASTFACE_AVAILABLE - from flash.image.detection.finetuning import ObjectDetectionFineTuning from flash.image.detection.serialization import DetectionLabels from flash.image.face_detection.data import FaceDetectionPreprocess @@ -62,9 +61,7 @@ def __init__( self.save_hyperparameters() if model in ff.list_pretrained_models(): - model = FaceDetector.get_model( - model, pretrained, **kwargs - ) + model = FaceDetector.get_model(model, pretrained, **kwargs) else: ValueError(f"{model} is not supported yet.") diff --git a/flash_examples/face_detection.py b/flash_examples/face_detection.py index 5b2601db345..e8a14f7096e 100644 --- a/flash_examples/face_detection.py +++ b/flash_examples/face_detection.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. import flash +from flash.core.data.data_module import DataModule from flash.core.utilities.imports import _FASTFACE_AVAILABLE from flash.image import FaceDetector from flash.image.face_detection.data import FaceDetectionPreprocess -from flash.core.data.data_module import DataModule if _FASTFACE_AVAILABLE: import fastface as ff @@ -27,19 +27,14 @@ val_dataset = ff.dataset.FDDBDataset(source_dir="data/", phase="val") datamodule = DataModule.from_data_source( - "fastface", - train_data=train_dataset, - val_data=val_dataset, - preprocess=FaceDetectionPreprocess() + "fastface", train_data=train_dataset, val_data=val_dataset, preprocess=FaceDetectionPreprocess() ) # 2. Build the task model = FaceDetector(model="lffd_slim") # 3. Create the trainer and finetune the model -trainer = flash.Trainer(max_epochs=3, - limit_train_batches=0.1, - limit_val_batches=0.1) +trainer = flash.Trainer(max_epochs=3, limit_train_batches=0.1, limit_val_batches=0.1) trainer.finetune(model, datamodule=datamodule, strategy="freeze")