diff --git a/.circleci/config.yml b/.circleci/config.yml index 0043e4ee67..9897e168da 100755 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -85,6 +85,9 @@ references: jobs: TPU-tests: + executor: + name: go/default + tag: '1.17' docker: - image: circleci/python:3.7 environment: @@ -94,7 +97,8 @@ jobs: - CHECK_SPEEP: 5 steps: - checkout - - go/install + - go/install: + version: "1.17" - *checkout_ml_testing - gcp-gke/install - gcp-gke/update-kubeconfig-with-credentials: diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 7794336c7a..6f97665f23 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -115,7 +115,6 @@ jobs: pip install torch>=1.7.1 pip install '.[${{ join(matrix.topic, ',') }}]' --upgrade $flag --find-links https://download.pytorch.org/whl/cpu/torch_stable.html pip install '.[test]' --upgrade - pip list shell: bash - name: Install vissl @@ -128,7 +127,7 @@ jobs: if: contains( matrix.topic , 'serve' ) run: | sudo apt-get install libsndfile1 - pip install '.[all,audio]' icevision effdet --upgrade + pip install '.[all,audio]' icevision sahi==0.8.19 effdet --upgrade - name: Install audio test dependencies if: contains( matrix.topic , 'audio' ) diff --git a/CHANGELOG.md b/CHANGELOG.md index 024a1413b2..6a396dd4b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Fixed +- Fixed compatibility with `lightning==1.8.0` ([#1479](https://github.com/Lightning-AI/lightning-flash/pull/1479)) - Fixed the error message to suggest installing `icevision`, if it's not found while loading data ([#1474](https://github.com/Lightning-AI/lightning-flash/pull/1474)) - Fixed compatibility with `torchmetrics==1.10.0` ([#1469](https://github.com/Lightning-AI/lightning-flash/pull/1469)) diff --git a/docs/source/general/training.rst b/docs/source/general/training.rst index f7d00cee97..f310a9026a 100644 --- a/docs/source/general/training.rst +++ b/docs/source/general/training.rst @@ -56,6 +56,6 @@ Flash tasks supports many advanced training functionalities out-of-the-box, such .. code-block:: python # Train on TPUs - flash.Trainer(tpu_cores=8) + flash.Trainer(accelerator="tpu", num_devices=8) You can add to the flash Trainer any argument from the Lightning trainer! Learn more about the Lightning Trainer `here `_. diff --git a/flash/core/trainer.py b/flash/core/trainer.py index d3dead99fe..b0ee93e808 100644 --- a/flash/core/trainer.py +++ b/flash/core/trainer.py @@ -187,9 +187,9 @@ def predict( Returns a list of dictionaries, one for each provided dataloader containing their respective predictions. """ # Note: Prediction on TPU device with multi cores is not supported yet - if isinstance(self.accelerator, TPUAccelerator) and self.tpu_cores > 1: + if isinstance(self.accelerator, TPUAccelerator) and self.num_devices > 1: raise NotImplementedError( - f"Prediction on TPU device with multi-cores (requested cores: {self.tpu_cores}) is not supported yet." + f"Prediction on TPU device with multi-cores (requested cores: {self.num_devices}) is not supported yet." ) model = model or self.lightning_module output_transform = getattr(model, "_output_transform", None) or OutputTransform() diff --git a/flash/core/utilities/imports.py b/flash/core/utilities/imports.py index 9f067fb900..810e878b91 100644 --- a/flash/core/utilities/imports.py +++ b/flash/core/utilities/imports.py @@ -87,6 +87,7 @@ class Image: _PL_GREATER_EQUAL_1_4_0 = compare_version("pytorch_lightning", operator.ge, "1.4.0") _PL_GREATER_EQUAL_1_5_0 = compare_version("pytorch_lightning", operator.ge, "1.5.0") _PL_GREATER_EQUAL_1_6_0 = compare_version("pytorch_lightning", operator.ge, "1.6.0rc0") + _PL_GREATER_EQUAL_1_8_0 = compare_version("pytorch_lightning", operator.ge, "1.8.0") _PANDAS_GREATER_EQUAL_1_3_0 = compare_version("pandas", operator.ge, "1.3.0") _ICEVISION_GREATER_EQUAL_0_11_0 = compare_version("icevision", operator.ge, "0.11.0") _TM_GREATER_EQUAL_0_7_0 = compare_version("torchmetrics", operator.ge, "0.7.0") diff --git a/requirements/datatype_image_extras.txt b/requirements/datatype_image_extras.txt index 71ecbd4375..5a5d6556f2 100644 --- a/requirements/datatype_image_extras.txt +++ b/requirements/datatype_image_extras.txt @@ -3,6 +3,7 @@ fiftyone classy_vision vissl>=0.1.5 icevision>=0.8 +sahi >=0.8.19,<0.11.0 icedata effdet kornia>=0.5.1 diff --git a/tests/core/test_model.py b/tests/core/test_model.py index 65ec60052b..b6a7a3437e 100644 --- a/tests/core/test_model.py +++ b/tests/core/test_model.py @@ -44,6 +44,7 @@ _GRAPH_TESTING, _IMAGE_AVAILABLE, _IMAGE_TESTING, + _PL_GREATER_EQUAL_1_8_0, _TABULAR_TESTING, _TEXT_TESTING, _TORCH_OPTIMIZER_AVAILABLE, @@ -471,7 +472,10 @@ def train_dataloader(self): assert task.get_num_training_steps() == batch_count assert isinstance(trainer.optimizers[0], torch.optim.Adadelta) - assert isinstance(trainer.lr_schedulers[0]["scheduler"], torch.optim.lr_scheduler.LambdaLR) + if _PL_GREATER_EQUAL_1_8_0: + assert isinstance(trainer.lr_scheduler_configs[0].scheduler, torch.optim.lr_scheduler.LambdaLR) + else: + assert isinstance(trainer.lr_schedulers[0]["scheduler"], torch.optim.lr_scheduler.LambdaLR) @pytest.mark.skipif(not _CORE_TESTING, reason="Not testing core.") diff --git a/tests/examples/test_scripts.py b/tests/examples/test_scripts.py index 77254d3b49..8337fad2c1 100644 --- a/tests/examples/test_scripts.py +++ b/tests/examples/test_scripts.py @@ -120,6 +120,7 @@ "template.py", marks=[ pytest.mark.skipif(not _CORE_TESTING, reason="Not testing core."), + pytest.mark.skipif(os.name == "posix", reason="Flaky on Mac OS (CI)"), pytest.mark.skipif(sys.version_info >= (3, 9), reason="Undiagnosed segmentation fault in 3.9"), ], ), diff --git a/tests/helpers/task_tester.py b/tests/helpers/task_tester.py index 9e0026007e..5495fa646c 100644 --- a/tests/helpers/task_tester.py +++ b/tests/helpers/task_tester.py @@ -102,11 +102,9 @@ def _test_jit_trace(self, tmpdir): path = os.path.join(tmpdir, "test.pt") model = self.instantiated_task - trainer = self.instantiated_trainer model.eval() - model.trainer = trainer - model = torch.jit.trace(model, self.example_forward_input) + model = model.to_torchscript(method="trace", example_inputs=self.example_forward_input) torch.jit.save(model, path) model = torch.jit.load(path)