From d4bcafad7a64d7c39598fa7e4e33b81a1be31828 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Fri, 26 Aug 2022 18:56:56 +0200 Subject: [PATCH] Remove the deprecated loop output format (#14373) --- src/pytorch_lightning/CHANGELOG.md | 6 + .../loops/epoch/training_epoch_loop.py | 39 +----- src/pytorch_lightning/loops/utilities.py | 9 +- .../deprecated_api/test_remove_1-8.py | 50 -------- .../loops/epoch/test_training_epoch_loop.py | 115 +++--------------- tests/tests_pytorch/loops/test_utilities.py | 26 +--- 6 files changed, 27 insertions(+), 218 deletions(-) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 6c2e54197fa7d..4b5e3a893429e 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -81,6 +81,12 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Removed the deprecated `DistributedType` and `DeviceType` enum classes ([#14045](https://github.com/Lightning-AI/lightning/pull/14045)) +- Remove the deprecated `on_train_batch_end(outputs)` format when multiple optimizers are used and TBPTT is enabled ([#14373](https://github.com/PyTorchLightning/pytorch-lightning/pull/14373)) + + +- Remove the deprecated `training_epoch_end(outputs)` format when multiple optimizers are used and TBPTT is enabled ([#14373](https://github.com/PyTorchLightning/pytorch-lightning/pull/14373)) + + - Removed the experimental `pytorch_lightning.utiltiies.meta` functions in favor of built-in https://github.com/pytorch/torchdistx support ([#13868](https://github.com/Lightning-AI/lightning/pull/13868)) diff --git a/src/pytorch_lightning/loops/epoch/training_epoch_loop.py b/src/pytorch_lightning/loops/epoch/training_epoch_loop.py index 6b3bc38400803..0fe93ca7104b6 100644 --- a/src/pytorch_lightning/loops/epoch/training_epoch_loop.py +++ b/src/pytorch_lightning/loops/epoch/training_epoch_loop.py @@ -22,7 +22,7 @@ from pytorch_lightning import loops # import as loops to avoid circular imports from pytorch_lightning.loops.batch import TrainingBatchLoop from pytorch_lightning.loops.batch.training_batch_loop import _OUTPUTS_TYPE as _BATCH_OUTPUTS_TYPE -from pytorch_lightning.loops.utilities import _get_active_optimizers, _is_max_limit_reached, _v1_8_output_format +from pytorch_lightning.loops.utilities import _get_active_optimizers, _is_max_limit_reached from pytorch_lightning.trainer.connectors.logger_connector.result import _ResultCollection from pytorch_lightning.trainer.progress import BatchProgress, SchedulerProgress from pytorch_lightning.trainer.supporters import CombinedLoader @@ -31,7 +31,7 @@ from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.fetching import AbstractDataFetcher, DataLoaderIterDataFetcher from pytorch_lightning.utilities.model_helpers import is_overridden -from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_warn +from pytorch_lightning.utilities.rank_zero import rank_zero_warn from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature from pytorch_lightning.utilities.warnings import WarningCache @@ -342,24 +342,6 @@ def _prepare_outputs_training_batch_end( ) array = np.array(batch_output, dtype=object) - # TODO: remove in v1.8 - if ( - num_optimizers > 1 - and lightning_module.truncated_bptt_steps > 0 - and is_overridden("on_train_batch_end", lightning_module) - and not _v1_8_output_format(lightning_module.on_train_batch_end) - ): - rank_zero_deprecation( - "You are training with multiple optimizers AND truncated backpropagation through time enabled." - " The current format of the `on_train_batch_end(outputs, ...)` is a 2d list with sizes" - " (n_optimizers, tbptt_steps), however, this has been deprecated and will change in version v1.8 to" - " (tbptt_steps, n_optimizers). You can update your code by adding the following parameter to your" - " hook signature: `on_train_batch_end(outputs, ..., new_format=True)`." - ) - # (tbptt_steps, n_opt) -> (n_opt, tbptt_steps) - if array.ndim == 1: - array = np.expand_dims(array, 1) - array = array.transpose((1, 0)) # squeeze all single-element dimensions array = array.squeeze() array = array.tolist() @@ -384,23 +366,6 @@ def _prepare_outputs_training_epoch_end( ) array = _recursive_pad(batch_outputs) - # TODO: remove in v1.8 - if ( - num_optimizers > 1 - and lightning_module.truncated_bptt_steps > 0 - and not _v1_8_output_format(lightning_module.on_train_epoch_end) - ): - rank_zero_deprecation( - "You are training with multiple optimizers AND truncated backpropagation through time enabled." - " The current format of the `training_epoch_end(outputs)` is a 3d list with sizes" - " (n_optimizers, n_batches, tbptt_steps), however, this has been deprecated and will change in version" - " v1.8 to (n_batches, tbptt_steps, n_optimizers). You can update your code by adding the following" - " parameter to your hook signature: `training_epoch_end(outputs, new_format=True)`." - ) - # (n_batches, tbptt_steps, n_opt) -> (n_opt, n_batches, tbptt_steps) - if array.ndim == 2: - array = np.expand_dims(array, 2) - array = array.transpose((2, 0, 1)) # squeeze all single-element dimensions array = array.squeeze() array = array.tolist() diff --git a/src/pytorch_lightning/loops/utilities.py b/src/pytorch_lightning/loops/utilities.py index 491af6605c135..9b8ec84ba3661 100644 --- a/src/pytorch_lightning/loops/utilities.py +++ b/src/pytorch_lightning/loops/utilities.py @@ -11,11 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import inspect from collections import OrderedDict from contextlib import contextmanager from functools import lru_cache -from typing import Any, Callable, Generator, List, Optional, Sequence, Tuple +from typing import Any, Generator, List, Optional, Sequence, Tuple import numpy as np import torch @@ -216,12 +215,6 @@ def _reset_progress(loop: Loop) -> None: _reset_progress(v) -# TODO: remove in v1.8 -def _v1_8_output_format(fx: Callable) -> bool: - parameters = inspect.signature(fx).parameters - return "new_format" in parameters and parameters["new_format"].default is True - - def _set_sampler_epoch(dataloader: DataLoader, epoch: int) -> None: """Calls the ``set_epoch`` method on either the sampler or the batch sampler of the given dataloader. diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py index a69071fd67610..e1fb0b57654c0 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py @@ -38,7 +38,6 @@ from pytorch_lightning.utilities.apply_func import move_data_to_device from pytorch_lightning.utilities.imports import _TORCHTEXT_LEGACY from pytorch_lightning.utilities.rank_zero import rank_zero_only, rank_zero_warn -from tests_pytorch.deprecated_api import no_deprecated_call from tests_pytorch.helpers.runif import RunIf from tests_pytorch.helpers.torchtext_utils import get_dummy_torchtext_data_iterator @@ -584,55 +583,6 @@ def test_v1_8_0_weights_save_path(tmpdir): _ = trainer.weights_save_path -def test_deprecated_epoch_outputs_format(tmpdir): - class DeprecationModel(BoringModel): - def __init__(self): - super().__init__() - self.truncated_bptt_steps = 1 - - def training_step(self, batch, batch_idx, optimizer_idx, hiddens): - output = super().training_step(batch, batch_idx) - output["hiddens"] = hiddens - return output - - def tbptt_split_batch(self, batch, split_size): - return [batch, batch] - - def training_epoch_end(self, outputs): - ... - - def on_train_batch_end(self, outputs, batch, batch_idx) -> None: - ... - - def configure_optimizers(self): - return [torch.optim.Adam(self.parameters()), torch.optim.Adam(self.parameters())] - - trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True) - model = DeprecationModel() - batch_match = r"on_train_batch_end.*will change in version v1.8 to \(tbptt_steps, n_optimizers\)" - with pytest.deprecated_call(match=batch_match): - trainer.fit(model) - - class DeprecationModel2(DeprecationModel): - def on_train_batch_end(self, *args, new_format=True): - ... - - trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True) - model = DeprecationModel() - epoch_match = r"training_epoch_end.*will change in version v1.8 to \(n_batches, tbptt_steps, n_optimizers\)" - with pytest.deprecated_call(match=epoch_match): - trainer.fit(model) - - class NoDeprecationModel(DeprecationModel2): - def training_epoch_end(self, outputs, new_format=True): - ... - - trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True) - model = NoDeprecationModel() - with no_deprecated_call(match="will change in version v1.8.*new_format=True"): - trainer.fit(model) - - @pytest.mark.flaky(reruns=3) @pytest.mark.parametrize(["action", "expected"], [("a", [3, 1]), ("b", [2]), ("c", [1])]) def test_simple_profiler_iterable_durations(tmpdir, action: str, expected: list): diff --git a/tests/tests_pytorch/loops/epoch/test_training_epoch_loop.py b/tests/tests_pytorch/loops/epoch/test_training_epoch_loop.py index d53871116e8b7..d601542075004 100644 --- a/tests/tests_pytorch/loops/epoch/test_training_epoch_loop.py +++ b/tests/tests_pytorch/loops/epoch/test_training_epoch_loop.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from unittest import mock from unittest.mock import patch import pytest @@ -20,7 +19,6 @@ from pytorch_lightning.demos.boring_classes import BoringModel from pytorch_lightning.loops import TrainingEpochLoop from pytorch_lightning.trainer.trainer import Trainer -from tests_pytorch.deprecated_api import no_deprecated_call _out00 = {"loss": 0.0} _out01 = {"loss": 0.1} @@ -33,43 +31,33 @@ class TestPrepareOutputs: - def prepare_outputs(self, fn, tbptt_splits, new_format, batch_outputs, num_optimizers, automatic_optimization): + def prepare_outputs(self, fn, tbptt_splits, batch_outputs, num_optimizers, automatic_optimization): lightning_module = LightningModule() - lightning_module.on_train_batch_end = lambda *_: None # override to trigger the deprecation message lightning_module.automatic_optimization = automatic_optimization lightning_module.truncated_bptt_steps = tbptt_splits - match = "will change in version v1.8.*new_format=True" - will_warn = tbptt_splits and num_optimizers > 1 and not new_format - ctx_manager = pytest.deprecated_call if will_warn else no_deprecated_call - with ctx_manager(match=match): - with mock.patch( - "pytorch_lightning.loops.epoch.training_epoch_loop._v1_8_output_format", return_value=new_format - ): - return fn( - batch_outputs, - lightning_module=lightning_module, - num_optimizers=num_optimizers, # does not matter for manual optimization - ) + return fn( + batch_outputs, + lightning_module=lightning_module, + num_optimizers=num_optimizers, # does not matter for manual optimization + ) def prepare_outputs_training_epoch_end( - self, tbptt_splits, new_format, batch_outputs, num_optimizers, automatic_optimization=True + self, tbptt_splits, batch_outputs, num_optimizers, automatic_optimization=True ): return self.prepare_outputs( TrainingEpochLoop._prepare_outputs_training_epoch_end, tbptt_splits, - new_format, batch_outputs, num_optimizers, automatic_optimization=automatic_optimization, ) def prepare_outputs_training_batch_end( - self, tbptt_splits, new_format, batch_outputs, num_optimizers, automatic_optimization=True + self, tbptt_splits, batch_outputs, num_optimizers, automatic_optimization=True ): return self.prepare_outputs( TrainingEpochLoop._prepare_outputs_training_batch_end, tbptt_splits, - new_format, batch_outputs, num_optimizers, automatic_optimization=automatic_optimization, @@ -97,53 +85,19 @@ def prepare_outputs_training_batch_end( ), # 1 batch, tbptt with 2 splits (uneven) (1, 2, [[{0: _out00}, {0: _out01}], [{0: _out03}]], [[_out00, _out01], [_out03]]), - ], - ) - @pytest.mark.parametrize("new_format", (False, True)) - def test_prepare_outputs_training_epoch_end_automatic( - self, num_optimizers, tbptt_splits, batch_outputs, expected, new_format - ): - """Test that the loop converts the nested lists of outputs to the format that the `training_epoch_end` hook - currently expects in the case of automatic optimization.""" - assert ( - self.prepare_outputs_training_epoch_end(tbptt_splits, new_format, batch_outputs, num_optimizers) == expected - ) - - @pytest.mark.parametrize( - "num_optimizers,tbptt_splits,batch_outputs,expected", - [ - # 3 batches, tbptt with 2 splits, 2 optimizers alternating - ( - 2, - 2, - [[{0: _out00}, {0: _out01}], [{1: _out10}, {1: _out11}], [{0: _out02}, {0: _out03}]], - [[[_out00, _out01], [], [_out02, _out03]], [[], [_out10, _out11], []]], - ) - ], - ) - def test_prepare_outputs_training_epoch_end_automatic_old_format( - self, num_optimizers, tbptt_splits, batch_outputs, expected - ): - assert self.prepare_outputs_training_epoch_end(tbptt_splits, False, batch_outputs, num_optimizers) == expected - - @pytest.mark.parametrize( - "num_optimizers,tbptt_splits,batch_outputs,expected", - [ # 3 batches, tbptt with 2 splits, 2 optimizers alternating ( 2, 2, [[{0: _out00}, {0: _out01}], [{1: _out10}, {1: _out11}], [{0: _out02}, {0: _out03}]], [[[_out00], [_out01]], [[_out10], [_out11]], [[_out02], [_out03]]], - ) + ), ], ) - def test_prepare_outputs_training_epoch_end_automatic_new_format( - self, num_optimizers, tbptt_splits, batch_outputs, expected - ): + def test_prepare_outputs_training_epoch_end_automatic(self, num_optimizers, tbptt_splits, batch_outputs, expected): """Test that the loop converts the nested lists of outputs to the format that the `training_epoch_end` hook currently expects in the case of automatic optimization.""" - assert self.prepare_outputs_training_epoch_end(tbptt_splits, True, batch_outputs, num_optimizers) == expected + assert self.prepare_outputs_training_epoch_end(tbptt_splits, batch_outputs, num_optimizers) == expected @pytest.mark.parametrize( "batch_outputs,expected", @@ -160,14 +114,10 @@ def test_prepare_outputs_training_epoch_end_automatic_new_format( ([[_out00, _out01], [_out02, _out03], [], [_out10]], [[_out00, _out01], [_out02, _out03], [_out10]]), ], ) - @pytest.mark.parametrize("new_format", (False, True)) - def test_prepare_outputs_training_epoch_end_manual(self, batch_outputs, expected, new_format): + def test_prepare_outputs_training_epoch_end_manual(self, batch_outputs, expected): """Test that the loop converts the nested lists of outputs to the format that the `training_epoch_end` hook currently expects in the case of manual optimization.""" - assert ( - self.prepare_outputs_training_epoch_end(0, new_format, batch_outputs, -1, automatic_optimization=False) - == expected - ) + assert self.prepare_outputs_training_epoch_end(0, batch_outputs, -1, automatic_optimization=False) == expected @pytest.mark.parametrize( "num_optimizers,tbptt_splits,batch_end_outputs,expected", @@ -180,47 +130,17 @@ def test_prepare_outputs_training_epoch_end_manual(self, batch_outputs, expected (2, 0, [{0: _out00, 1: _out01}], [_out00, _out01]), # tbptt with 2 splits (1, 2, [{0: _out00}, {0: _out01}], [_out00, _out01]), + # 2 optimizers, tbptt with 2 splits + (2, 2, [{0: _out00, 1: _out01}, {0: _out10, 1: _out11}], [[_out00, _out01], [_out10, _out11]]), ], ) - @pytest.mark.parametrize("new_format", (False, True)) def test_prepare_outputs_training_batch_end_automatic( - self, num_optimizers, tbptt_splits, batch_end_outputs, expected, new_format - ): - """Test that the loop converts the nested lists of outputs to the format that the `on_train_batch_end` hook - currently expects in the case of automatic optimization.""" - - assert ( - self.prepare_outputs_training_batch_end(tbptt_splits, new_format, batch_end_outputs, num_optimizers) - == expected - ) - - @pytest.mark.parametrize( - "num_optimizers,tbptt_splits,batch_end_outputs,expected", - # 2 optimizers, tbptt with 2 splits - [(2, 2, [{0: _out00, 1: _out01}, {0: _out10, 1: _out11}], [[_out00, _out10], [_out01, _out11]])], - ) - def test_prepare_outputs_training_batch_end_automatic_old_format( self, num_optimizers, tbptt_splits, batch_end_outputs, expected ): """Test that the loop converts the nested lists of outputs to the format that the `on_train_batch_end` hook currently expects in the case of automatic optimization.""" - assert ( - self.prepare_outputs_training_batch_end(tbptt_splits, False, batch_end_outputs, num_optimizers) == expected - ) - @pytest.mark.parametrize( - "num_optimizers,tbptt_splits,batch_end_outputs,expected", - # 2 optimizers, tbptt with 2 splits - [(2, 2, [{0: _out00, 1: _out01}, {0: _out10, 1: _out11}], [[_out00, _out01], [_out10, _out11]])], - ) - def test_prepare_outputs_training_batch_end_automatic_new_format( - self, num_optimizers, tbptt_splits, batch_end_outputs, expected - ): - """Test that the loop converts the nested lists of outputs to the format that the `on_train_batch_end` hook - currently expects in the case of automatic optimization.""" - assert ( - self.prepare_outputs_training_batch_end(tbptt_splits, True, batch_end_outputs, num_optimizers) == expected - ) + assert self.prepare_outputs_training_batch_end(tbptt_splits, batch_end_outputs, num_optimizers) == expected @pytest.mark.parametrize( "batch_end_outputs,expected", @@ -237,8 +157,7 @@ def test_prepare_outputs_training_batch_end_manual(self, batch_end_outputs, expe """Test that the loop converts the nested lists of outputs to the format that the `on_train_batch_end` hook currently expects in the case of manual optimization.""" assert ( - self.prepare_outputs_training_batch_end(0, False, batch_end_outputs, -1, automatic_optimization=False) - == expected + self.prepare_outputs_training_batch_end(0, batch_end_outputs, -1, automatic_optimization=False) == expected ) diff --git a/tests/tests_pytorch/loops/test_utilities.py b/tests/tests_pytorch/loops/test_utilities.py index 914c1de8e115b..2bd86d325806d 100644 --- a/tests/tests_pytorch/loops/test_utilities.py +++ b/tests/tests_pytorch/loops/test_utilities.py @@ -16,7 +16,7 @@ import pytest import torch -from pytorch_lightning.loops.utilities import _extract_hiddens, _set_sampler_epoch, _v1_8_output_format +from pytorch_lightning.loops.utilities import _extract_hiddens, _set_sampler_epoch from pytorch_lightning.utilities.exceptions import MisconfigurationException @@ -41,30 +41,6 @@ def test_extract_hiddens(): _extract_hiddens(None, 1) -def test_v1_8_output_format(): - # old format - def training_epoch_end(outputs): - ... - - assert not _v1_8_output_format(training_epoch_end) - - def training_epoch_end(outputs, new_format=1): - ... - - assert not _v1_8_output_format(training_epoch_end) - - def training_epoch_end(outputs, new_format=False): - ... - - assert not _v1_8_output_format(training_epoch_end) - - # new format - def training_epoch_end(outputs, new_format=True): - ... - - assert _v1_8_output_format(training_epoch_end) - - def test_set_sampler_epoch(): # No samplers dataloader = Mock()