From de049da2e5b3dba3f90f857ec4148e5c2e8a78b2 Mon Sep 17 00:00:00 2001 From: tchaton Date: Tue, 30 Nov 2021 16:59:02 +0100 Subject: [PATCH 01/14] update --- docs/source/api/image.rst | 6 +++--- docs/source/integrations/fiftyone.rst | 4 ++-- flash/core/data/output.py | 2 +- flash/image/detection/model.py | 4 ++-- flash/image/detection/output.py | 4 ++-- flash/image/face_detection/model.py | 4 ++-- flash/image/instance_segmentation/model.py | 4 ++-- flash/image/keypoint_detection/model.py | 4 ++-- flash/image/segmentation/data.py | 12 ++++++------ flash/image/segmentation/model.py | 4 ++-- flash/image/segmentation/output.py | 4 ++-- .../integrations/fiftyone/object_detection.py | 4 ++-- .../semantic_segmentation/inference_server.py | 4 ++-- tests/image/detection/test_output.py | 14 +++++++------- tests/image/segmentation/test_output.py | 14 +++++++------- 15 files changed, 44 insertions(+), 44 deletions(-) diff --git a/docs/source/api/image.rst b/docs/source/api/image.rst index b8db9c6ad9..d737e1862f 100644 --- a/docs/source/api/image.rst +++ b/docs/source/api/image.rst @@ -44,7 +44,7 @@ ________________ detection.data.FiftyOneParser detection.data.ObjectDetectionFiftyOneInput - detection.output.FiftyOneDetectionLabels + detection.output.FiftyOneDetectionLabelsOutput detection.data.ObjectDetectionInputTransform Keypoint Detection @@ -102,8 +102,8 @@ ____________ segmentation.data.SemanticSegmentationFiftyOneInput segmentation.data.SemanticSegmentationDeserializer segmentation.model.SemanticSegmentationOutputTransform - segmentation.output.FiftyOneSegmentationLabels - segmentation.output.SegmentationLabels + segmentation.output.FiftyOneSegmentationLabelsOutput + segmentation.output.SegmentationLabelsOutput .. autosummary:: :toctree: generated/ diff --git a/docs/source/integrations/fiftyone.rst b/docs/source/integrations/fiftyone.rst index 89a390a8b3..6cd23dbd3b 100644 --- a/docs/source/integrations/fiftyone.rst +++ b/docs/source/integrations/fiftyone.rst @@ -47,8 +47,8 @@ semantic segmentation tasks. Doing so is as easy as updating your model to use one of the following outputs: * :class:`FiftyOneLabels(return_filepath=True)` -* :class:`FiftyOneSegmentationLabels(return_filepath=True)` -* :class:`FiftyOneDetectionLabels(return_filepath=True)` +* :class:`FiftyOneSegmentationLabelsOutput(return_filepath=True)` +* :class:`FiftyOneDetectionLabelsOutput(return_filepath=True)` The :func:`~flash.core.integrations.fiftyone.visualize` function then lets you visualize your predictions in the diff --git a/flash/core/data/output.py b/flash/core/data/output.py index 644c778f8c..337386b14f 100644 --- a/flash/core/data/output.py +++ b/flash/core/data/output.py @@ -17,7 +17,7 @@ from flash.core.data.io.output import Output -class Preds(Output): +class PredsOutput(Output): """A :class:`~flash.core.data.io.output.Output` which returns the "preds" from the model outputs.""" def transform(self, sample: Any) -> Union[int, List[int]]: diff --git a/flash/image/detection/model.py b/flash/image/detection/model.py index 94905f81e5..269c2442e5 100644 --- a/flash/image/detection/model.py +++ b/flash/image/detection/model.py @@ -14,7 +14,7 @@ from typing import Any, Dict, List, Optional from flash.core.adapter import AdapterTask -from flash.core.data.output import Preds +from flash.core.data.output import PredsOutput from flash.core.registry import FlashRegistry from flash.core.utilities.types import LR_SCHEDULER_TYPE, OPTIMIZER_TYPE, OUTPUT_TYPE from flash.image.detection.backbones import OBJECT_DETECTION_HEADS @@ -73,7 +73,7 @@ def __init__( learning_rate=learning_rate, optimizer=optimizer, lr_scheduler=lr_scheduler, - output=output or Preds(), + output=output or PredsOutput(), ) def _ci_benchmark_fn(self, history: List[Dict[str, Any]]) -> None: diff --git a/flash/image/detection/output.py b/flash/image/detection/output.py index b52c24cbe4..cbd060704c 100644 --- a/flash/image/detection/output.py +++ b/flash/image/detection/output.py @@ -28,7 +28,7 @@ fo = None -class FiftyOneDetectionLabels(Output): +class FiftyOneDetectionLabelsOutput(Output): """A :class:`.Output` which converts model outputs to FiftyOne detection format. Args: @@ -57,7 +57,7 @@ def __init__( def transform(self, sample: Dict[str, Any]) -> Union[Detections, Dict[str, Any]]: if DataKeys.METADATA not in sample: - raise ValueError("sample requires DefaultDataKeys.METADATA to use a FiftyOneDetectionLabels output.") + raise ValueError("sample requires DefaultDataKeys.METADATA to use a FiftyOneDetectionLabelsOutput output.") labels = None if self._labels is not None: diff --git a/flash/image/face_detection/model.py b/flash/image/face_detection/model.py index d42b23b41c..28cbe3a488 100644 --- a/flash/image/face_detection/model.py +++ b/flash/image/face_detection/model.py @@ -35,7 +35,7 @@ import fastface as ff -class DetectionLabels(Output): +class DetectionLabelsOutput(Output): """A :class:`.Output` which extracts predictions from sample dict.""" def transform(self, sample: Any) -> Dict[str, Any]: @@ -89,7 +89,7 @@ def __init__( learning_rate=learning_rate, optimizer=optimizer, lr_scheduler=lr_scheduler, - output=output or DetectionLabels(), + output=output or DetectionLabelsOutput(), input_transform=input_transform or FaceDetectionInputTransform(), ) diff --git a/flash/image/instance_segmentation/model.py b/flash/image/instance_segmentation/model.py index eb0e257653..58dd81661a 100644 --- a/flash/image/instance_segmentation/model.py +++ b/flash/image/instance_segmentation/model.py @@ -17,7 +17,7 @@ from flash.core.adapter import AdapterTask from flash.core.data.data_pipeline import DataPipeline -from flash.core.data.output import Preds +from flash.core.data.output import PredsOutput from flash.core.registry import FlashRegistry from flash.core.utilities.types import LR_SCHEDULER_TYPE, OPTIMIZER_TYPE, OUTPUT_TYPE from flash.image.instance_segmentation.backbones import INSTANCE_SEGMENTATION_HEADS @@ -80,7 +80,7 @@ def __init__( learning_rate=learning_rate, optimizer=optimizer, lr_scheduler=lr_scheduler, - output=output or Preds(), + output=output or PredsOutput(), ) def _ci_benchmark_fn(self, history: List[Dict[str, Any]]) -> None: diff --git a/flash/image/keypoint_detection/model.py b/flash/image/keypoint_detection/model.py index 1993ee1ac9..74353a27f2 100644 --- a/flash/image/keypoint_detection/model.py +++ b/flash/image/keypoint_detection/model.py @@ -14,7 +14,7 @@ from typing import Any, Dict, List, Optional from flash.core.adapter import AdapterTask -from flash.core.data.output import Preds +from flash.core.data.output import PredsOutput from flash.core.registry import FlashRegistry from flash.core.utilities.types import LR_SCHEDULER_TYPE, OPTIMIZER_TYPE, OUTPUT_TYPE from flash.image.keypoint_detection.backbones import KEYPOINT_DETECTION_HEADS @@ -76,7 +76,7 @@ def __init__( learning_rate=learning_rate, optimizer=optimizer, lr_scheduler=lr_scheduler, - output=output or Preds(), + output=output or PredsOutput(), ) def _ci_benchmark_fn(self, history: List[Dict[str, Any]]) -> None: diff --git a/flash/image/segmentation/data.py b/flash/image/segmentation/data.py index 69eb67b783..89a283799c 100644 --- a/flash/image/segmentation/data.py +++ b/flash/image/segmentation/data.py @@ -46,7 +46,7 @@ ) from flash.core.utilities.stages import RunningStage from flash.image.data import ImageDeserializer, IMG_EXTENSIONS -from flash.image.segmentation.output import SegmentationLabels +from flash.image.segmentation.output import SegmentationLabelsOutput from flash.image.segmentation.transforms import default_transforms, predict_default_transforms, train_default_transforms SampleCollection = None @@ -244,7 +244,7 @@ def __init__( self.image_size = image_size self.num_classes = num_classes if num_classes: - labels_map = labels_map or SegmentationLabels.create_random_labels_map(num_classes) + labels_map = labels_map or SegmentationLabelsOutput.create_random_labels_map(num_classes) super().__init__( train_transform=train_transform, @@ -329,9 +329,9 @@ def from_input( num_classes = input_transform_kwargs["num_classes"] - labels_map = getattr(input_transform_kwargs, "labels_map", None) or SegmentationLabels.create_random_labels_map( - num_classes - ) + labels_map = getattr( + input_transform_kwargs, "labels_map", None + ) or SegmentationLabelsOutput.create_random_labels_map(num_classes) data_fetcher = data_fetcher or cls.configure_data_fetcher(labels_map) @@ -494,7 +494,7 @@ def _show_images_and_labels(self, data: List[Any], num_samples: int, title: str) raise TypeError(f"Unknown data type. Got: {type(data)}.") # convert images and labels to numpy and stack horizontally image_vis: np.ndarray = self._to_numpy(image.byte()) - label_tmp: torch.Tensor = SegmentationLabels.labels_to_image(label.squeeze().byte(), self.labels_map) + label_tmp: torch.Tensor = SegmentationLabelsOutput.labels_to_image(label.squeeze().byte(), self.labels_map) label_vis: np.ndarray = self._to_numpy(label_tmp) img_vis = np.hstack((image_vis, label_vis)) # send to visualiser diff --git a/flash/image/segmentation/model.py b/flash/image/segmentation/model.py index 9296db60cb..b5a8093539 100644 --- a/flash/image/segmentation/model.py +++ b/flash/image/segmentation/model.py @@ -34,7 +34,7 @@ ) from flash.image.segmentation.backbones import SEMANTIC_SEGMENTATION_BACKBONES from flash.image.segmentation.heads import SEMANTIC_SEGMENTATION_HEADS -from flash.image.segmentation.output import SegmentationLabels +from flash.image.segmentation.output import SegmentationLabelsOutput if _KORNIA_AVAILABLE: import kornia as K @@ -114,7 +114,7 @@ def __init__( lr_scheduler=lr_scheduler, metrics=metrics, learning_rate=learning_rate, - output=output or SegmentationLabels(), + output=output or SegmentationLabelsOutput(), output_transform=output_transform or self.output_transform_cls(), ) diff --git a/flash/image/segmentation/output.py b/flash/image/segmentation/output.py index 268cc89568..dcc66fd923 100644 --- a/flash/image/segmentation/output.py +++ b/flash/image/segmentation/output.py @@ -46,7 +46,7 @@ K = None -class SegmentationLabels(Output): +class SegmentationLabelsOutput(Output): """A :class:`.Output` which converts the model outputs to the label of the argmax classification per pixel in the image for semantic segmentation tasks. @@ -100,7 +100,7 @@ def transform(self, sample: Dict[str, torch.Tensor]) -> torch.Tensor: return labels.tolist() -class FiftyOneSegmentationLabels(SegmentationLabels): +class FiftyOneSegmentationLabelsOutput(SegmentationLabelsOutput): """A :class:`.Output` which converts the model outputs to FiftyOne segmentation format. Args: diff --git a/flash_examples/integrations/fiftyone/object_detection.py b/flash_examples/integrations/fiftyone/object_detection.py index 8a0450c51e..ef3e25c25c 100644 --- a/flash_examples/integrations/fiftyone/object_detection.py +++ b/flash_examples/integrations/fiftyone/object_detection.py @@ -17,7 +17,7 @@ from flash.core.integrations.fiftyone import visualize from flash.core.utilities.imports import example_requires from flash.image import ObjectDetectionData, ObjectDetector -from flash.image.detection.output import FiftyOneDetectionLabels +from flash.image.detection.output import FiftyOneDetectionLabelsOutput example_requires("image") @@ -42,7 +42,7 @@ trainer.finetune(model, datamodule=datamodule, strategy="freeze") # 4. Set the output and get some predictions -model.output = FiftyOneDetectionLabels(return_filepath=True) # output FiftyOne format +model.output = FiftyOneDetectionLabelsOutput(return_filepath=True) # output FiftyOne format predictions = trainer.predict(model, datamodule=datamodule) predictions = list(chain.from_iterable(predictions)) # flatten batches diff --git a/flash_examples/serve/semantic_segmentation/inference_server.py b/flash_examples/serve/semantic_segmentation/inference_server.py index ca42c43d68..0ee2da2909 100644 --- a/flash_examples/serve/semantic_segmentation/inference_server.py +++ b/flash_examples/serve/semantic_segmentation/inference_server.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. from flash.image import SemanticSegmentation -from flash.image.segmentation.output import SegmentationLabels +from flash.image.segmentation.output import SegmentationLabelsOutput model = SemanticSegmentation.load_from_checkpoint( "https://flash-weights.s3.amazonaws.com/0.6.0/semantic_segmentation_model.pt" ) -model.output = SegmentationLabels(visualize=False) +model.output = SegmentationLabelsOutput(visualize=False) model.serve() diff --git a/tests/image/detection/test_output.py b/tests/image/detection/test_output.py index 002fb68f41..4fe0395290 100644 --- a/tests/image/detection/test_output.py +++ b/tests/image/detection/test_output.py @@ -4,24 +4,24 @@ from flash.core.data.io.input import DataKeys from flash.core.utilities.imports import _FIFTYONE_AVAILABLE, _IMAGE_AVAILABLE -from flash.image.detection.output import FiftyOneDetectionLabels +from flash.image.detection.output import FiftyOneDetectionLabelsOutput @pytest.mark.skipif(not _IMAGE_AVAILABLE, reason="image libraries aren't installed.") @pytest.mark.skipif(not _FIFTYONE_AVAILABLE, reason="fiftyone is not installed for testing") -class TestFiftyOneDetectionLabels: +class TestFiftyOneDetectionLabelsOutput: @staticmethod def test_smoke(): - serial = FiftyOneDetectionLabels() + serial = FiftyOneDetectionLabelsOutput() assert serial is not None @staticmethod def test_serialize_fiftyone(): labels = ["class_1", "class_2", "class_3"] - serial = FiftyOneDetectionLabels() - filepath_serial = FiftyOneDetectionLabels(return_filepath=True) - threshold_serial = FiftyOneDetectionLabels(threshold=0.9) - labels_serial = FiftyOneDetectionLabels(labels=labels) + serial = FiftyOneDetectionLabelsOutput() + filepath_serial = FiftyOneDetectionLabelsOutput(return_filepath=True) + threshold_serial = FiftyOneDetectionLabelsOutput(threshold=0.9) + labels_serial = FiftyOneDetectionLabelsOutput(labels=labels) sample = { DataKeys.PREDS: { diff --git a/tests/image/segmentation/test_output.py b/tests/image/segmentation/test_output.py index 767bd49ccf..414a3da992 100644 --- a/tests/image/segmentation/test_output.py +++ b/tests/image/segmentation/test_output.py @@ -16,15 +16,15 @@ from flash.core.data.io.input import DataKeys from flash.core.utilities.imports import _FIFTYONE_AVAILABLE, _IMAGE_AVAILABLE -from flash.image.segmentation.output import FiftyOneSegmentationLabels, SegmentationLabels +from flash.image.segmentation.output import FiftyOneSegmentationLabelsOutput, SegmentationLabelsOutput from tests.helpers.utils import _IMAGE_TESTING -class TestSemanticSegmentationLabels: +class TestSemanticSegmentationLabelsOutput: @pytest.mark.skipif(not _IMAGE_TESTING, "image libraries aren't installed.") @staticmethod def test_smoke(): - serial = SegmentationLabels() + serial = SegmentationLabelsOutput() assert serial is not None assert serial.labels_map is None assert serial.visualize is False @@ -32,7 +32,7 @@ def test_smoke(): @pytest.mark.skipif(not _IMAGE_TESTING, "image libraries aren't installed.") @staticmethod def test_exception(): - serial = SegmentationLabels() + serial = SegmentationLabelsOutput() with pytest.raises(Exception): sample = torch.zeros(1, 5, 2, 3) @@ -45,7 +45,7 @@ def test_exception(): @pytest.mark.skipif(not _IMAGE_TESTING, "image libraries aren't installed.") @staticmethod def test_serialize(): - serial = SegmentationLabels() + serial = SegmentationLabelsOutput() sample = torch.zeros(5, 2, 3) sample[1, 1, 2] = 1 # add peak in class 2 @@ -59,8 +59,8 @@ def test_serialize(): @pytest.mark.skipif(not _FIFTYONE_AVAILABLE, reason="fiftyone is not installed for testing") @staticmethod def test_serialize_fiftyone(): - serial = FiftyOneSegmentationLabels() - filepath_serial = FiftyOneSegmentationLabels(return_filepath=True) + serial = FiftyOneSegmentationLabelsOutput() + filepath_serial = FiftyOneSegmentationLabelsOutput(return_filepath=True) preds = torch.zeros(5, 2, 3) preds[1, 1, 2] = 1 # add peak in class 2 From 529bd83312a6225999c74444608f745417076572 Mon Sep 17 00:00:00 2001 From: tchaton Date: Tue, 30 Nov 2021 17:01:45 +0100 Subject: [PATCH 02/14] update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 981654fc32..5b347450a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Changed the `SpeechRecognition` task to use `AutoModelForCTC` rather than just `Wav2Vec2ForCTC` ([#874](https://github.com/PyTorchLightning/lightning-flash/pull/874)) +- Added `Output` suffix to `Preds`, `FiftyOneDetectionLabels`, `SegmentationLabels`, `FiftyOneDetectionLabels`, `DetectionLabels` ([#1011](https://github.com/PyTorchLightning/lightning-flash/pull/1011)) + ### Deprecated - Deprecated `flash.core.data.process.Serializer` in favour of `flash.core.data.io.output.Output` ([#927](https://github.com/PyTorchLightning/lightning-flash/pull/927)) From 891ddca7bb8126a6bc0de022e42ee241eb3150d4 Mon Sep 17 00:00:00 2001 From: tchaton Date: Tue, 30 Nov 2021 18:39:35 +0100 Subject: [PATCH 03/14] update --- docs/source/api/core.rst | 10 ++--- docs/source/integrations/fiftyone.rst | 2 +- docs/source/template/optional.rst | 4 +- flash/core/classification.py | 14 +++---- .../fiftyone/image_classification.py | 4 +- .../image_classification_fiftyone_datasets.py | 6 +-- tests/core/test_classification.py | 38 +++++++++++-------- 7 files changed, 43 insertions(+), 35 deletions(-) diff --git a/docs/source/api/core.rst b/docs/source/api/core.rst index 40f94c460a..c1a9426aa0 100644 --- a/docs/source/api/core.rst +++ b/docs/source/api/core.rst @@ -26,14 +26,14 @@ _________________________ :nosignatures: :template: classtemplate.rst - ~flash.core.classification.Classes + ~flash.core.classification.ClassesOutput ~flash.core.classification.ClassificationOutput ~flash.core.classification.ClassificationTask - ~flash.core.classification.FiftyOneLabels - ~flash.core.classification.Labels - ~flash.core.classification.Logits + ~flash.core.classification.FiftyOneLabelsOutput + ~flash.core.classification.LabelsOutput + ~flash.core.classification.LogitsOutput ~flash.core.classification.PredsClassificationOutput - ~flash.core.classification.Probabilities + ~flash.core.classification.ProbabilitiesOutput flash.core.finetuning _____________________ diff --git a/docs/source/integrations/fiftyone.rst b/docs/source/integrations/fiftyone.rst index 6cd23dbd3b..1c64173272 100644 --- a/docs/source/integrations/fiftyone.rst +++ b/docs/source/integrations/fiftyone.rst @@ -46,7 +46,7 @@ You can visualize predictions for classification, object detection, and semantic segmentation tasks. Doing so is as easy as updating your model to use one of the following outputs: -* :class:`FiftyOneLabels(return_filepath=True)` +* :class:`FiftyOneLabelsOutput(return_filepath=True)` * :class:`FiftyOneSegmentationLabelsOutput(return_filepath=True)` * :class:`FiftyOneDetectionLabelsOutput(return_filepath=True)` diff --git a/docs/source/template/optional.rst b/docs/source/template/optional.rst index 335402a7e5..5a37e1ac1b 100644 --- a/docs/source/template/optional.rst +++ b/docs/source/template/optional.rst @@ -34,11 +34,11 @@ Here's the :class:`~flash.core.classification.Classes` :class:`~flash.core.data. :language: python :pyobject: Classes -Alternatively, here's the :class:`~flash.core.classification.Logits` :class:`~flash.core.data.io.output.Output`: +Alternatively, here's the :class:`~flash.core.classification.LogitsOutput` :class:`~flash.core.data.io.output.Output`: .. literalinclude:: ../../../flash/core/classification.py :language: python - :pyobject: Logits + :pyobject: LogitsOutput Take a look at :ref:`predictions` to learn more. diff --git a/flash/core/classification.py b/flash/core/classification.py index 39f0bd0c80..d5038c2ca9 100644 --- a/flash/core/classification.py +++ b/flash/core/classification.py @@ -78,7 +78,7 @@ def __init__( *args, loss_fn=loss_fn, metrics=metrics, - output=output or Classes(multi_label=multi_label), + output=output or ClassesOutput(multi_label=multi_label), **kwargs, ) @@ -101,7 +101,7 @@ def __init__( *args, loss_fn=loss_fn, metrics=metrics, - output=output or Classes(multi_label=multi_label), + output=output or ClassesOutput(multi_label=multi_label), **kwargs, ) @@ -136,14 +136,14 @@ def transform(self, sample: Any) -> Any: return sample -class Logits(PredsClassificationOutput): +class LogitsOutput(PredsClassificationOutput): """A :class:`.Output` which simply converts the model outputs (assumed to be logits) to a list.""" def transform(self, sample: Any) -> Any: return super().transform(sample).tolist() -class Probabilities(PredsClassificationOutput): +class ProbabilitiesOutput(PredsClassificationOutput): """A :class:`.Output` which applies a softmax to the model outputs (assumed to be logits) and converts to a list.""" @@ -154,7 +154,7 @@ def transform(self, sample: Any) -> Any: return torch.softmax(sample, -1).tolist() -class Classes(PredsClassificationOutput): +class ClassesOutput(PredsClassificationOutput): """A :class:`.Output` which applies an argmax to the model outputs (either logits or probabilities) and converts to a list. @@ -180,7 +180,7 @@ def transform(self, sample: Any) -> Union[int, List[int]]: return torch.argmax(sample, -1).tolist() -class Labels(Classes): +class LabelsOutput(ClassesOutput): """A :class:`.Output` which converts the model outputs (either logits or probabilities) to the label of the argmax classification. @@ -218,7 +218,7 @@ def transform(self, sample: Any) -> Union[int, List[int], str, List[str]]: return classes -class FiftyOneLabels(ClassificationOutput): +class FiftyOneLabelsOutput(ClassificationOutput): """A :class:`.Output` which converts the model outputs to FiftyOne classification format. Args: diff --git a/flash_examples/integrations/fiftyone/image_classification.py b/flash_examples/integrations/fiftyone/image_classification.py index b86e450957..4f4bfaf2cb 100644 --- a/flash_examples/integrations/fiftyone/image_classification.py +++ b/flash_examples/integrations/fiftyone/image_classification.py @@ -16,7 +16,7 @@ import torch import flash -from flash.core.classification import FiftyOneLabels, Labels +from flash.core.classification import FiftyOneLabelsOutput, Labels from flash.core.data.utils import download_data from flash.core.integrations.fiftyone import visualize from flash.image import ImageClassificationData, ImageClassifier @@ -55,7 +55,7 @@ model = ImageClassifier.load_from_checkpoint( "https://flash-weights.s3.amazonaws.com/0.6.0/image_classification_model.pt" ) -model.output = FiftyOneLabels(return_filepath=True) # output FiftyOne format +model.output = FiftyOneLabelsOutput(return_filepath=True) # output FiftyOne format predictions = trainer.predict(model, datamodule=datamodule) predictions = list(chain.from_iterable(predictions)) # flatten batches diff --git a/flash_examples/integrations/fiftyone/image_classification_fiftyone_datasets.py b/flash_examples/integrations/fiftyone/image_classification_fiftyone_datasets.py index f5e6d0b4e3..b119b4995d 100644 --- a/flash_examples/integrations/fiftyone/image_classification_fiftyone_datasets.py +++ b/flash_examples/integrations/fiftyone/image_classification_fiftyone_datasets.py @@ -17,7 +17,7 @@ import torch import flash -from flash.core.classification import FiftyOneLabels, Labels +from flash.core.classification import FiftyOneLabelsOutput, LabelsOutput from flash.core.data.utils import download_data from flash.image import ImageClassificationData, ImageClassifier @@ -49,7 +49,7 @@ model = ImageClassifier( backbone="resnet18", num_classes=datamodule.num_classes, - output=Labels(), + output=LabelsOutput(), ) trainer = flash.Trainer( max_epochs=1, @@ -68,7 +68,7 @@ model = ImageClassifier.load_from_checkpoint( "https://flash-weights.s3.amazonaws.com/0.6.0/image_classification_model.pt" ) -model.output = FiftyOneLabels(return_filepath=False) # output FiftyOne format +model.output = FiftyOneLabelsOutput(return_filepath=False) # output FiftyOne format datamodule = ImageClassificationData.from_fiftyone(predict_dataset=test_dataset) predictions = trainer.predict(model, datamodule=datamodule) predictions = list(chain.from_iterable(predictions)) # flatten batches diff --git a/tests/core/test_classification.py b/tests/core/test_classification.py index d322c1bba3..64214b549a 100644 --- a/tests/core/test_classification.py +++ b/tests/core/test_classification.py @@ -14,7 +14,13 @@ import pytest import torch -from flash.core.classification import Classes, FiftyOneLabels, Labels, Logits, Probabilities +from flash.core.classification import ( + ClassesOutput, + FiftyOneLabelsOutput, + LabelsOutput, + LogitsOutput, + ProbabilitiesOutput, +) from flash.core.data.io.input import DataKeys from flash.core.utilities.imports import _FIFTYONE_AVAILABLE, _IMAGE_AVAILABLE @@ -23,23 +29,25 @@ def test_classification_outputs(): example_output = torch.tensor([-0.1, 0.2, 0.3]) # 3 classes labels = ["class_1", "class_2", "class_3"] - assert torch.allclose(torch.tensor(Logits().transform(example_output)), example_output) - assert torch.allclose(torch.tensor(Probabilities().transform(example_output)), torch.softmax(example_output, -1)) - assert Classes().transform(example_output) == 2 - assert Labels(labels).transform(example_output) == "class_3" + assert torch.allclose(torch.tensor(LogitsOutput().transform(example_output)), example_output) + assert torch.allclose( + torch.tensor(ProbabilitiesOutput().transform(example_output)), torch.softmax(example_output, -1) + ) + assert ClassesOutput().transform(example_output) == 2 + assert LabelsOutput(labels).transform(example_output) == "class_3" def test_classification_outputs_multi_label(): example_output = torch.tensor([-0.1, 0.2, 0.3]) # 3 classes labels = ["class_1", "class_2", "class_3"] - assert torch.allclose(torch.tensor(Logits(multi_label=True).transform(example_output)), example_output) + assert torch.allclose(torch.tensor(LogitsOutput(multi_label=True).transform(example_output)), example_output) assert torch.allclose( - torch.tensor(Probabilities(multi_label=True).transform(example_output)), + torch.tensor(ProbabilitiesOutput(multi_label=True).transform(example_output)), torch.sigmoid(example_output), ) - assert Classes(multi_label=True).transform(example_output) == [1, 2] - assert Labels(labels, multi_label=True).transform(example_output) == ["class_2", "class_3"] + assert ClassesOutput(multi_label=True).transform(example_output) == [1, 2] + assert LabelsOutput(labels, multi_label=True).transform(example_output) == ["class_2", "class_3"] @pytest.mark.skipif(not _IMAGE_AVAILABLE, reason="image libraries aren't installed.") @@ -50,22 +58,22 @@ def test_classification_outputs_fiftyone(): example_output = {DataKeys.PREDS: logits, DataKeys.METADATA: {"filepath": "something"}} # 3 classes labels = ["class_1", "class_2", "class_3"] - predictions = FiftyOneLabels(return_filepath=True).transform(example_output) + predictions = FiftyOneLabelsOutput(return_filepath=True).transform(example_output) assert predictions["predictions"].label == "2" assert predictions["filepath"] == "something" - predictions = FiftyOneLabels(labels, return_filepath=True).transform(example_output) + predictions = FiftyOneLabelsOutput(labels, return_filepath=True).transform(example_output) assert predictions["predictions"].label == "class_3" assert predictions["filepath"] == "something" - predictions = FiftyOneLabels(store_logits=True).transform(example_output) + predictions = FiftyOneLabelsOutput(store_logits=True).transform(example_output) assert torch.allclose(torch.tensor(predictions.logits), logits) assert torch.allclose(torch.tensor(predictions.confidence), torch.softmax(logits, -1)[-1]) assert predictions.label == "2" - predictions = FiftyOneLabels(labels, store_logits=True).transform(example_output) + predictions = FiftyOneLabelsOutput(labels, store_logits=True).transform(example_output) assert predictions.label == "class_3" - predictions = FiftyOneLabels(store_logits=True, multi_label=True).transform(example_output) + predictions = FiftyOneLabelsOutput(store_logits=True, multi_label=True).transform(example_output) assert torch.allclose(torch.tensor(predictions.logits), logits) assert [c.label for c in predictions.classifications] == ["1", "2"] - predictions = FiftyOneLabels(labels, multi_label=True).transform(example_output) + predictions = FiftyOneLabelsOutput(labels, multi_label=True).transform(example_output) assert [c.label for c in predictions.classifications] == ["class_2", "class_3"] From 8a00f90034be82e2672a79bbe30abd2618623f21 Mon Sep 17 00:00:00 2001 From: tchaton Date: Tue, 30 Nov 2021 18:41:02 +0100 Subject: [PATCH 04/14] update --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5b347450a4..cb80ac0c2b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,7 +30,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Changed the `SpeechRecognition` task to use `AutoModelForCTC` rather than just `Wav2Vec2ForCTC` ([#874](https://github.com/PyTorchLightning/lightning-flash/pull/874)) -- Added `Output` suffix to `Preds`, `FiftyOneDetectionLabels`, `SegmentationLabels`, `FiftyOneDetectionLabels`, `DetectionLabels` ([#1011](https://github.com/PyTorchLightning/lightning-flash/pull/1011)) +- Added `Output` suffix to `Preds`, `FiftyOneDetectionLabels`, `SegmentationLabels`, `FiftyOneDetectionLabels`, `DetectionLabels`, `Classes`, `FiftyOneLabels`, `Labels`, `Logits`, `Probabilities` ([#1011](https://github.com/PyTorchLightning/lightning-flash/pull/1011)) ### Deprecated From 1d9b85ebacbe3628b2267ff8fb0c061b81b5f7cc Mon Sep 17 00:00:00 2001 From: tchaton Date: Tue, 30 Nov 2021 18:49:00 +0100 Subject: [PATCH 05/14] update --- docs/source/common/finetuning_example.rst | 4 ++-- docs/source/common/training_example.rst | 2 +- docs/source/general/predictions.rst | 4 ++-- flash/image/classification/model.py | 4 ++-- flash/tabular/classification/model.py | 4 ++-- flash/template/classification/model.py | 4 ++-- flash/text/classification/model.py | 4 ++-- flash/video/classification/model.py | 4 ++-- .../baal/image_classification_active_learning.py | 6 ++++-- .../integrations/fiftyone/image_classification.py | 4 ++-- .../integrations/labelstudio/image_classification.py | 4 ++-- .../serve/tabular_classification/inference_server.py | 4 ++-- tests/core/data/io/test_output.py | 4 ++-- tests/image/classification/test_active_learning.py | 6 +++--- tests/image/classification/test_model.py | 4 ++-- 15 files changed, 32 insertions(+), 30 deletions(-) diff --git a/docs/source/common/finetuning_example.rst b/docs/source/common/finetuning_example.rst index 63022a9d7d..60f0d8af7a 100644 --- a/docs/source/common/finetuning_example.rst +++ b/docs/source/common/finetuning_example.rst @@ -15,7 +15,7 @@ Here's an example of finetuning. from pytorch_lightning import seed_everything import flash - from flash.core.classification import Labels + from flash.core.classification import LabelsOutput from flash.core.data.utils import download_data from flash.image import ImageClassificationData, ImageClassifier @@ -56,7 +56,7 @@ Once you've finetuned, use the model to predict: .. testcode:: finetune # Output predictions as labels, automatically inferred from the training data in part 2. - model.output = Labels() + model.output = LabelsOutput() predictions = model.predict( [ diff --git a/docs/source/common/training_example.rst b/docs/source/common/training_example.rst index 9a015cda65..3780c44588 100644 --- a/docs/source/common/training_example.rst +++ b/docs/source/common/training_example.rst @@ -15,7 +15,7 @@ Here's an example: from pytorch_lightning import seed_everything import flash - from flash.core.classification import Labels + from flash.core.classification import LabelsOutput from flash.core.data.utils import download_data from flash.image import ImageClassificationData, ImageClassifier diff --git a/docs/source/general/predictions.rst b/docs/source/general/predictions.rst index da415045bd..328913215e 100644 --- a/docs/source/general/predictions.rst +++ b/docs/source/general/predictions.rst @@ -64,7 +64,7 @@ reference below). .. code-block:: python - from flash.core.classification import Probabilities + from flash.core.classification import ProbabilitiesOutput from flash.core.data.utils import download_data from flash.image import ImageClassifier @@ -78,7 +78,7 @@ reference below). ) # 3. Attach the Output - model.output = Probabilities() + model.output = ProbabilitiesOutput() # 4. Predict whether the image contains an ant or a bee predictions = model.predict("data/hymenoptera_data/val/bees/65038344_52a45d090d.jpg") diff --git a/flash/image/classification/model.py b/flash/image/classification/model.py index 8fd6ec742b..0ae8f52327 100644 --- a/flash/image/classification/model.py +++ b/flash/image/classification/model.py @@ -17,7 +17,7 @@ from pytorch_lightning.utilities.exceptions import MisconfigurationException from torch import nn -from flash.core.classification import ClassificationAdapterTask, Labels +from flash.core.classification import ClassificationAdapterTask, LabelsOutput from flash.core.registry import FlashRegistry from flash.core.utilities.types import LOSS_FN_TYPE, LR_SCHEDULER_TYPE, METRICS_TYPE, OPTIMIZER_TYPE, OUTPUT_TYPE from flash.image.classification.adapters import TRAINING_STRATEGIES @@ -136,7 +136,7 @@ def __init__( optimizer=optimizer, lr_scheduler=lr_scheduler, multi_label=multi_label, - output=output or Labels(multi_label=multi_label), + output=output or LabelsOutput(multi_label=multi_label), ) @classmethod diff --git a/flash/tabular/classification/model.py b/flash/tabular/classification/model.py index de6d2178c5..a4f18201e9 100644 --- a/flash/tabular/classification/model.py +++ b/flash/tabular/classification/model.py @@ -16,7 +16,7 @@ import torch from torch.nn import functional as F -from flash.core.classification import ClassificationTask, Probabilities +from flash.core.classification import ClassificationTask, ProbabilitiesOutput from flash.core.data.io.input import DataKeys from flash.core.utilities.imports import _TABULAR_AVAILABLE from flash.core.utilities.types import LR_SCHEDULER_TYPE, METRICS_TYPE, OPTIMIZER_TYPE, OUTPUT_TYPE @@ -83,7 +83,7 @@ def __init__( metrics=metrics, learning_rate=learning_rate, multi_label=multi_label, - output=output or Probabilities(), + output=output or ProbabilitiesOutput(), ) self.save_hyperparameters() diff --git a/flash/template/classification/model.py b/flash/template/classification/model.py index 5af3e36165..19d2d08ca6 100644 --- a/flash/template/classification/model.py +++ b/flash/template/classification/model.py @@ -16,7 +16,7 @@ import torch from torch import nn -from flash.core.classification import ClassificationTask, Labels +from flash.core.classification import ClassificationTask, LabelsOutput from flash.core.data.io.input import DataKeys from flash.core.registry import FlashRegistry from flash.core.utilities.types import LOSS_FN_TYPE, LR_SCHEDULER_TYPE, METRICS_TYPE, OPTIMIZER_TYPE, OUTPUT_TYPE @@ -67,7 +67,7 @@ def __init__( metrics=metrics, learning_rate=learning_rate, multi_label=multi_label, - output=output or Labels(), + output=output or LabelsOutput(), ) self.save_hyperparameters() diff --git a/flash/text/classification/model.py b/flash/text/classification/model.py index 4a468295c4..7c617a3ebd 100644 --- a/flash/text/classification/model.py +++ b/flash/text/classification/model.py @@ -18,7 +18,7 @@ import torch from pytorch_lightning import Callback -from flash.core.classification import ClassificationTask, Labels +from flash.core.classification import ClassificationTask, LabelsOutput from flash.core.data.io.input import DataKeys from flash.core.registry import FlashRegistry from flash.core.utilities.imports import _TRANSFORMERS_AVAILABLE @@ -84,7 +84,7 @@ def __init__( metrics=metrics, learning_rate=learning_rate, multi_label=multi_label, - output=output or Labels(multi_label=multi_label), + output=output or LabelsOutput(multi_label=multi_label), ) self.enable_ort = enable_ort self.model = self.backbones.get(backbone)(num_labels=num_classes) diff --git a/flash/video/classification/model.py b/flash/video/classification/model.py index 9f2d26b6c5..3c049c7030 100644 --- a/flash/video/classification/model.py +++ b/flash/video/classification/model.py @@ -22,7 +22,7 @@ from torchmetrics import Accuracy import flash -from flash.core.classification import ClassificationTask, Labels +from flash.core.classification import ClassificationTask, LabelsOutput from flash.core.data.io.input import DataKeys from flash.core.registry import FlashRegistry from flash.core.utilities.compatibility import accelerator_connector @@ -91,7 +91,7 @@ def __init__( lr_scheduler=lr_scheduler, metrics=metrics, learning_rate=learning_rate, - output=output or Labels(), + output=output or LabelsOutput(), ) self.save_hyperparameters() diff --git a/flash_examples/integrations/baal/image_classification_active_learning.py b/flash_examples/integrations/baal/image_classification_active_learning.py index d006ce9312..65864dcc3c 100644 --- a/flash_examples/integrations/baal/image_classification_active_learning.py +++ b/flash_examples/integrations/baal/image_classification_active_learning.py @@ -14,7 +14,7 @@ import torch import flash -from flash.core.classification import Probabilities +from flash.core.classification import ProbabilitiesOutput from flash.core.data.utils import download_data from flash.image import ImageClassificationData, ImageClassifier from flash.image.classification.integrations.baal import ActiveLearningDataModule, ActiveLearningLoop @@ -34,7 +34,9 @@ torch.nn.Dropout(p=0.1), torch.nn.Linear(512, datamodule.num_classes), ) -model = ImageClassifier(backbone="resnet18", head=head, num_classes=datamodule.num_classes, output=Probabilities()) +model = ImageClassifier( + backbone="resnet18", head=head, num_classes=datamodule.num_classes, output=ProbabilitiesOutput() +) # 3.1 Create the trainer diff --git a/flash_examples/integrations/fiftyone/image_classification.py b/flash_examples/integrations/fiftyone/image_classification.py index 4f4bfaf2cb..87eb834aec 100644 --- a/flash_examples/integrations/fiftyone/image_classification.py +++ b/flash_examples/integrations/fiftyone/image_classification.py @@ -16,7 +16,7 @@ import torch import flash -from flash.core.classification import FiftyOneLabelsOutput, Labels +from flash.core.classification import FiftyOneLabelsOutput, LabelsOutput from flash.core.data.utils import download_data from flash.core.integrations.fiftyone import visualize from flash.image import ImageClassificationData, ImageClassifier @@ -36,7 +36,7 @@ model = ImageClassifier( backbone="resnet18", num_classes=datamodule.num_classes, - output=Labels(), + output=LabelsOutput(), ) trainer = flash.Trainer( max_epochs=1, diff --git a/flash_examples/integrations/labelstudio/image_classification.py b/flash_examples/integrations/labelstudio/image_classification.py index 1100e2daed..653efb9751 100644 --- a/flash_examples/integrations/labelstudio/image_classification.py +++ b/flash_examples/integrations/labelstudio/image_classification.py @@ -1,5 +1,5 @@ import flash -from flash.core.classification import Labels +from flash.core.classification import LabelsOutput from flash.core.data.utils import download_data from flash.core.integrations.labelstudio.visualizer import launch_app from flash.image import ImageClassificationData, ImageClassifier @@ -30,7 +30,7 @@ # 4. Predict from checkpoint model = ImageClassifier.load_from_checkpoint("image_classification_model.pt") -model.output = Labels() +model.output = LabelsOutput() predictions = model.predict( [ diff --git a/flash_examples/serve/tabular_classification/inference_server.py b/flash_examples/serve/tabular_classification/inference_server.py index e92543f087..af83b6662f 100644 --- a/flash_examples/serve/tabular_classification/inference_server.py +++ b/flash_examples/serve/tabular_classification/inference_server.py @@ -11,11 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from flash.core.classification import Labels +from flash.core.classification import LabelsOutput from flash.tabular import TabularClassifier model = TabularClassifier.load_from_checkpoint( "https://flash-weights.s3.amazonaws.com/0.6.0/tabular_classification_model.pt" ) -model.output = Labels(["Did not survive", "Survived"]) +model.output = LabelsOutput(["Did not survive", "Survived"]) model.serve() diff --git a/tests/core/data/io/test_output.py b/tests/core/data/io/test_output.py index c7626a1b6e..f9913f58b1 100644 --- a/tests/core/data/io/test_output.py +++ b/tests/core/data/io/test_output.py @@ -17,7 +17,7 @@ import torch from torch.utils.data import DataLoader -from flash.core.classification import Labels +from flash.core.classification import LabelsOutput from flash.core.data.data_pipeline import DataPipeline, DataPipelineState from flash.core.data.io.input import LabelsState from flash.core.data.io.input_transform import DefaultInputTransform @@ -44,7 +44,7 @@ class CustomModel(Task): def __init__(self): super().__init__(model=torch.nn.Linear(1, 1), loss_fn=torch.nn.MSELoss()) - output = Labels(["a", "b"]) + output = LabelsOutput(["a", "b"]) model = CustomModel() trainer = Trainer(fast_dev_run=True) data_pipeline = DataPipeline(input_transform=DefaultInputTransform(), output=output) diff --git a/tests/image/classification/test_active_learning.py b/tests/image/classification/test_active_learning.py index 1892e8bcbe..bd625c2e2b 100644 --- a/tests/image/classification/test_active_learning.py +++ b/tests/image/classification/test_active_learning.py @@ -22,7 +22,7 @@ from torch.utils.data import SequentialSampler import flash -from flash.core.classification import Probabilities +from flash.core.classification import ProbabilitiesOutput from flash.core.utilities.imports import _BAAL_AVAILABLE from flash.image import ImageClassificationData, ImageClassifier from flash.image.classification.integrations.baal import ActiveLearningDataModule, ActiveLearningLoop @@ -92,7 +92,7 @@ def test_active_learning_training(simple_datamodule, initial_num_labels, query_s ) model = ImageClassifier( - backbone="resnet18", head=head, num_classes=active_learning_dm.num_classes, output=Probabilities() + backbone="resnet18", head=head, num_classes=active_learning_dm.num_classes, output=ProbabilitiesOutput() ) trainer = flash.Trainer(max_epochs=3) active_learning_loop = ActiveLearningLoop(label_epoch_frequency=1, inference_iteration=3) @@ -144,7 +144,7 @@ def test_no_validation_loop(simple_datamodule): ) model = ImageClassifier( - backbone="resnet18", head=head, num_classes=active_learning_dm.num_classes, output=Probabilities() + backbone="resnet18", head=head, num_classes=active_learning_dm.num_classes, output=ProbabilitiesOutput() ) trainer = flash.Trainer(max_epochs=3) active_learning_loop = ActiveLearningLoop(label_epoch_frequency=1, inference_iteration=3) diff --git a/tests/image/classification/test_model.py b/tests/image/classification/test_model.py index 27e9130bd1..42ea267362 100644 --- a/tests/image/classification/test_model.py +++ b/tests/image/classification/test_model.py @@ -20,7 +20,7 @@ from flash import Trainer from flash.__main__ import main -from flash.core.classification import Probabilities +from flash.core.classification import ProbabilitiesOutput from flash.core.data.io.input import DataKeys from flash.core.utilities.imports import _IMAGE_AVAILABLE from flash.image import ImageClassifier @@ -104,7 +104,7 @@ def test_multilabel(tmpdir): num_classes = 4 ds = DummyMultiLabelDataset(num_classes) - model = ImageClassifier(num_classes, multi_label=True, output=Probabilities(multi_label=True)) + model = ImageClassifier(num_classes, multi_label=True, output=ProbabilitiesOutput(multi_label=True)) train_dl = torch.utils.data.DataLoader(ds, batch_size=2) trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, limit_train_batches=5) trainer.finetune(model, train_dl, strategy=("freeze_unfreeze", 1)) From 054e3eea3b7b7903080b92e2a6254754a37abe54 Mon Sep 17 00:00:00 2001 From: tchaton Date: Tue, 30 Nov 2021 21:29:46 +0100 Subject: [PATCH 06/14] update --- flash/core/classification.py | 19 +++++++++++++++++- tests/deprecated_api/__init__.py | 0 tests/deprecated_api/test_remove_0-7.py | 26 +++++++++++++++++++++++++ 3 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 tests/deprecated_api/__init__.py create mode 100644 tests/deprecated_api/test_remove_0-7.py diff --git a/flash/core/classification.py b/flash/core/classification.py index d5038c2ca9..2212bab195 100644 --- a/flash/core/classification.py +++ b/flash/core/classification.py @@ -16,7 +16,7 @@ import torch import torch.nn.functional as F import torchmetrics -from pytorch_lightning.utilities import rank_zero_warn +from pytorch_lightning.utilities import rank_zero_deprecation, rank_zero_warn from flash.core.adapter import AdapterTask from flash.core.data.io.input import DataKeys, LabelsState @@ -338,3 +338,20 @@ def transform( filepath = sample[DataKeys.METADATA]["filepath"] return {"filepath": filepath, "predictions": fo_predictions} return fo_predictions + + +class Labels(LabelsOutput): + def __init__(self): + rank_zero_deprecation( + "`Labels` was deprecated in v0.6.0 and will be removed in v0.7.0." "Please use `LabelsOutput` instead." + ) + super().__init__() + + +class Probabilities(ProbabilitiesOutput): + def __init__(self): + rank_zero_deprecation( + "`Probabilities` was deprecated in v0.6.0 and will be removed in v0.7.0." + "Please use `ProbabilitiesOutput` instead." + ) + super().__init__() diff --git a/tests/deprecated_api/__init__.py b/tests/deprecated_api/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/deprecated_api/test_remove_0-7.py b/tests/deprecated_api/test_remove_0-7.py new file mode 100644 index 0000000000..00f21b7f83 --- /dev/null +++ b/tests/deprecated_api/test_remove_0-7.py @@ -0,0 +1,26 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest + +from flash.core.classification import Labels, Probabilities + + +def test_v0_7_deprecated_labels(tmpdir): + with pytest.deprecated_call(match="`Labels` was deprecated in v0.6.0 and will be removed in v0.7.0."): + Labels() + + +def test_v0_7_deprecated_probabilities(tmpdir): + with pytest.deprecated_call(match="`Probabilities` was deprecated in v0.6.0 and will be removed in v0.7.0."): + Probabilities() From 7e8536c6506de9cca9279339a219ff9a9eb8d9a8 Mon Sep 17 00:00:00 2001 From: Ethan Harris Date: Tue, 30 Nov 2021 20:34:10 +0000 Subject: [PATCH 07/14] Apply suggestions from code review --- flash/core/classification.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flash/core/classification.py b/flash/core/classification.py index 9f694f505b..d4796c27a9 100644 --- a/flash/core/classification.py +++ b/flash/core/classification.py @@ -342,17 +342,17 @@ def transform( class Labels(LabelsOutput): - def __init__(self): + def __init__(self, labels: Optional[List[str]] = None, multi_label: bool = False, threshold: float = 0.5): rank_zero_deprecation( "`Labels` was deprecated in v0.6.0 and will be removed in v0.7.0." "Please use `LabelsOutput` instead." ) - super().__init__() + super().__init__(labels=labels, multi_label=multi_label, threshold=threshold) class Probabilities(ProbabilitiesOutput): - def __init__(self): + def __init__(self, multi_label: bool = False): rank_zero_deprecation( "`Probabilities` was deprecated in v0.6.0 and will be removed in v0.7.0." "Please use `ProbabilitiesOutput` instead." ) - super().__init__() + super().__init__(multi_label=multi_label) From d54e40dd8fcb3e4e696f6e4d6df879b6a76d4c76 Mon Sep 17 00:00:00 2001 From: Ethan Harris Date: Tue, 30 Nov 2021 20:51:11 +0000 Subject: [PATCH 08/14] Fixes --- _notebooks | 2 +- docs/source/template/optional.rst | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/_notebooks b/_notebooks index 0c32582910..57e2a2a97b 160000 --- a/_notebooks +++ b/_notebooks @@ -1 +1 @@ -Subproject commit 0c325829101d5a6ebf32ed99bbf5b09badf04a59 +Subproject commit 57e2a2a97b3b2601bd6c92c826afeff071542aba diff --git a/docs/source/template/optional.rst b/docs/source/template/optional.rst index 5a37e1ac1b..f74262db2d 100644 --- a/docs/source/template/optional.rst +++ b/docs/source/template/optional.rst @@ -28,11 +28,11 @@ Specifically, it should include any formatting and transforms that should always If you want to support different use cases that require different prediction formats, you should add some :class:`~flash.core.data.io.output.Output` implementations in an ``output.py`` file. Some good examples are in `flash/core/classification.py `_. -Here's the :class:`~flash.core.classification.Classes` :class:`~flash.core.data.io.output.Output`: +Here's the :class:`~flash.core.classification.ClassesOutput` :class:`~flash.core.data.io.output.Output`: .. literalinclude:: ../../../flash/core/classification.py :language: python - :pyobject: Classes + :pyobject: ClassesOutput Alternatively, here's the :class:`~flash.core.classification.LogitsOutput` :class:`~flash.core.data.io.output.Output`: From 9312fef8f5a7068a553ccf93d3843730d9948c9e Mon Sep 17 00:00:00 2001 From: tchaton Date: Tue, 30 Nov 2021 21:54:54 +0100 Subject: [PATCH 09/14] update --- flash/core/classification.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flash/core/classification.py b/flash/core/classification.py index 9f694f505b..0cf5fa38db 100644 --- a/flash/core/classification.py +++ b/flash/core/classification.py @@ -342,17 +342,17 @@ def transform( class Labels(LabelsOutput): - def __init__(self): + def __init__(self, *args, **kwargs): rank_zero_deprecation( "`Labels` was deprecated in v0.6.0 and will be removed in v0.7.0." "Please use `LabelsOutput` instead." ) - super().__init__() + super().__init__(*args, **kwargs) class Probabilities(ProbabilitiesOutput): - def __init__(self): + def __init__(self, *args, **kwargs): rank_zero_deprecation( "`Probabilities` was deprecated in v0.6.0 and will be removed in v0.7.0." "Please use `ProbabilitiesOutput` instead." ) - super().__init__() + super().__init__(*args, **kwargs) From 0a83f1e03ec02b5cf40f7ce3a1aec2ef19e0a692 Mon Sep 17 00:00:00 2001 From: tchaton Date: Tue, 30 Nov 2021 22:08:51 +0100 Subject: [PATCH 10/14] purge notebooks --- _notebooks | 1 - 1 file changed, 1 deletion(-) delete mode 160000 _notebooks diff --git a/_notebooks b/_notebooks deleted file mode 160000 index 57e2a2a97b..0000000000 --- a/_notebooks +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 57e2a2a97b3b2601bd6c92c826afeff071542aba From 6d67999250a93f5ccc3464329b95880f7ea5ddb5 Mon Sep 17 00:00:00 2001 From: tchaton Date: Tue, 30 Nov 2021 22:08:59 +0100 Subject: [PATCH 11/14] purged notebooks --- _notebooks | 1 + 1 file changed, 1 insertion(+) create mode 160000 _notebooks diff --git a/_notebooks b/_notebooks new file mode 160000 index 0000000000..0c32582910 --- /dev/null +++ b/_notebooks @@ -0,0 +1 @@ +Subproject commit 0c325829101d5a6ebf32ed99bbf5b09badf04a59 From d6fff052ac0d54c3683f5c84134fca1d8960e9ee Mon Sep 17 00:00:00 2001 From: tchaton Date: Tue, 30 Nov 2021 22:32:18 +0100 Subject: [PATCH 12/14] update --- tests/examples/test_integrations.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/examples/test_integrations.py b/tests/examples/test_integrations.py index c7f66f8207..afe186294c 100644 --- a/tests/examples/test_integrations.py +++ b/tests/examples/test_integrations.py @@ -17,7 +17,7 @@ import pytest -from flash.core.utilities.imports import _BAAL_AVAILABLE, _FIFTYONE_AVAILABLE, _IMAGE_AVAILABLE, _LEARN2LEARN_AVAILABLE +from flash.core.utilities.imports import _BAAL_AVAILABLE, _FIFTYONE_AVAILABLE, _IMAGE_AVAILABLE from tests.examples.utils import run_test root = Path(__file__).parent.parent.parent @@ -43,7 +43,9 @@ "learn2learn", "image_classification_imagenette_mini.py", marks=pytest.mark.skipif( - not (_IMAGE_AVAILABLE and _LEARN2LEARN_AVAILABLE), reason="learn2learn isn't installed" + # not (_IMAGE_AVAILABLE and _LEARN2LEARN_AVAILABLE), reason="learn2learn isn't installed" + True, + reason="Currently having an issue with their dataset.", ), ), ], From 082b9d130efaf6151c1593951a0c8587e8df5fe2 Mon Sep 17 00:00:00 2001 From: Ethan Harris Date: Tue, 30 Nov 2021 21:35:18 +0000 Subject: [PATCH 13/14] Skip l2l example test --- tests/examples/test_integrations.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/examples/test_integrations.py b/tests/examples/test_integrations.py index c7f66f8207..7527c276de 100644 --- a/tests/examples/test_integrations.py +++ b/tests/examples/test_integrations.py @@ -42,9 +42,12 @@ pytest.param( "learn2learn", "image_classification_imagenette_mini.py", - marks=pytest.mark.skipif( - not (_IMAGE_AVAILABLE and _LEARN2LEARN_AVAILABLE), reason="learn2learn isn't installed" - ), + marks=[ + pytest.mark.skip("MiniImagenet broken: https://github.com/learnables/learn2learn/issues/291"), + pytest.mark.skipif( + not (_IMAGE_AVAILABLE and _LEARN2LEARN_AVAILABLE), reason="learn2learn isn't installed" + ), + ], ), ], ) From 779b646d24afa704f5379ac2b839c3f30b239516 Mon Sep 17 00:00:00 2001 From: Ethan Harris Date: Tue, 30 Nov 2021 21:39:55 +0000 Subject: [PATCH 14/14] Reset notebooks --- _notebooks | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_notebooks b/_notebooks index 57e2a2a97b..0c32582910 160000 --- a/_notebooks +++ b/_notebooks @@ -1 +1 @@ -Subproject commit 57e2a2a97b3b2601bd6c92c826afeff071542aba +Subproject commit 0c325829101d5a6ebf32ed99bbf5b09badf04a59