Skip to content

Commit

Permalink
Remove the deprecated loop output format (#14373)
Browse files Browse the repository at this point in the history
  • Loading branch information
carmocca authored Aug 26, 2022
1 parent ed84d04 commit d4bcafa
Show file tree
Hide file tree
Showing 6 changed files with 27 additions and 218 deletions.
6 changes: 6 additions & 0 deletions src/pytorch_lightning/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,12 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Removed the deprecated `DistributedType` and `DeviceType` enum classes ([#14045](https://github.com/Lightning-AI/lightning/pull/14045))


- Remove the deprecated `on_train_batch_end(outputs)` format when multiple optimizers are used and TBPTT is enabled ([#14373](https://github.com/PyTorchLightning/pytorch-lightning/pull/14373))


- Remove the deprecated `training_epoch_end(outputs)` format when multiple optimizers are used and TBPTT is enabled ([#14373](https://github.com/PyTorchLightning/pytorch-lightning/pull/14373))


- Removed the experimental `pytorch_lightning.utiltiies.meta` functions in favor of built-in https://github.com/pytorch/torchdistx support ([#13868](https://github.com/Lightning-AI/lightning/pull/13868))


Expand Down
39 changes: 2 additions & 37 deletions src/pytorch_lightning/loops/epoch/training_epoch_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from pytorch_lightning import loops # import as loops to avoid circular imports
from pytorch_lightning.loops.batch import TrainingBatchLoop
from pytorch_lightning.loops.batch.training_batch_loop import _OUTPUTS_TYPE as _BATCH_OUTPUTS_TYPE
from pytorch_lightning.loops.utilities import _get_active_optimizers, _is_max_limit_reached, _v1_8_output_format
from pytorch_lightning.loops.utilities import _get_active_optimizers, _is_max_limit_reached
from pytorch_lightning.trainer.connectors.logger_connector.result import _ResultCollection
from pytorch_lightning.trainer.progress import BatchProgress, SchedulerProgress
from pytorch_lightning.trainer.supporters import CombinedLoader
Expand All @@ -31,7 +31,7 @@
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.fetching import AbstractDataFetcher, DataLoaderIterDataFetcher
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_warn
from pytorch_lightning.utilities.rank_zero import rank_zero_warn
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.warnings import WarningCache

Expand Down Expand Up @@ -342,24 +342,6 @@ def _prepare_outputs_training_batch_end(
)

array = np.array(batch_output, dtype=object)
# TODO: remove in v1.8
if (
num_optimizers > 1
and lightning_module.truncated_bptt_steps > 0
and is_overridden("on_train_batch_end", lightning_module)
and not _v1_8_output_format(lightning_module.on_train_batch_end)
):
rank_zero_deprecation(
"You are training with multiple optimizers AND truncated backpropagation through time enabled."
" The current format of the `on_train_batch_end(outputs, ...)` is a 2d list with sizes"
" (n_optimizers, tbptt_steps), however, this has been deprecated and will change in version v1.8 to"
" (tbptt_steps, n_optimizers). You can update your code by adding the following parameter to your"
" hook signature: `on_train_batch_end(outputs, ..., new_format=True)`."
)
# (tbptt_steps, n_opt) -> (n_opt, tbptt_steps)
if array.ndim == 1:
array = np.expand_dims(array, 1)
array = array.transpose((1, 0))
# squeeze all single-element dimensions
array = array.squeeze()
array = array.tolist()
Expand All @@ -384,23 +366,6 @@ def _prepare_outputs_training_epoch_end(
)

array = _recursive_pad(batch_outputs)
# TODO: remove in v1.8
if (
num_optimizers > 1
and lightning_module.truncated_bptt_steps > 0
and not _v1_8_output_format(lightning_module.on_train_epoch_end)
):
rank_zero_deprecation(
"You are training with multiple optimizers AND truncated backpropagation through time enabled."
" The current format of the `training_epoch_end(outputs)` is a 3d list with sizes"
" (n_optimizers, n_batches, tbptt_steps), however, this has been deprecated and will change in version"
" v1.8 to (n_batches, tbptt_steps, n_optimizers). You can update your code by adding the following"
" parameter to your hook signature: `training_epoch_end(outputs, new_format=True)`."
)
# (n_batches, tbptt_steps, n_opt) -> (n_opt, n_batches, tbptt_steps)
if array.ndim == 2:
array = np.expand_dims(array, 2)
array = array.transpose((2, 0, 1))
# squeeze all single-element dimensions
array = array.squeeze()
array = array.tolist()
Expand Down
9 changes: 1 addition & 8 deletions src/pytorch_lightning/loops/utilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from collections import OrderedDict
from contextlib import contextmanager
from functools import lru_cache
from typing import Any, Callable, Generator, List, Optional, Sequence, Tuple
from typing import Any, Generator, List, Optional, Sequence, Tuple

import numpy as np
import torch
Expand Down Expand Up @@ -216,12 +215,6 @@ def _reset_progress(loop: Loop) -> None:
_reset_progress(v)


# TODO: remove in v1.8
def _v1_8_output_format(fx: Callable) -> bool:
parameters = inspect.signature(fx).parameters
return "new_format" in parameters and parameters["new_format"].default is True


def _set_sampler_epoch(dataloader: DataLoader, epoch: int) -> None:
"""Calls the ``set_epoch`` method on either the sampler or the batch sampler of the given dataloader.
Expand Down
50 changes: 0 additions & 50 deletions tests/tests_pytorch/deprecated_api/test_remove_1-8.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
from pytorch_lightning.utilities.apply_func import move_data_to_device
from pytorch_lightning.utilities.imports import _TORCHTEXT_LEGACY
from pytorch_lightning.utilities.rank_zero import rank_zero_only, rank_zero_warn
from tests_pytorch.deprecated_api import no_deprecated_call
from tests_pytorch.helpers.runif import RunIf
from tests_pytorch.helpers.torchtext_utils import get_dummy_torchtext_data_iterator

Expand Down Expand Up @@ -584,55 +583,6 @@ def test_v1_8_0_weights_save_path(tmpdir):
_ = trainer.weights_save_path


def test_deprecated_epoch_outputs_format(tmpdir):
class DeprecationModel(BoringModel):
def __init__(self):
super().__init__()
self.truncated_bptt_steps = 1

def training_step(self, batch, batch_idx, optimizer_idx, hiddens):
output = super().training_step(batch, batch_idx)
output["hiddens"] = hiddens
return output

def tbptt_split_batch(self, batch, split_size):
return [batch, batch]

def training_epoch_end(self, outputs):
...

def on_train_batch_end(self, outputs, batch, batch_idx) -> None:
...

def configure_optimizers(self):
return [torch.optim.Adam(self.parameters()), torch.optim.Adam(self.parameters())]

trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
model = DeprecationModel()
batch_match = r"on_train_batch_end.*will change in version v1.8 to \(tbptt_steps, n_optimizers\)"
with pytest.deprecated_call(match=batch_match):
trainer.fit(model)

class DeprecationModel2(DeprecationModel):
def on_train_batch_end(self, *args, new_format=True):
...

trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
model = DeprecationModel()
epoch_match = r"training_epoch_end.*will change in version v1.8 to \(n_batches, tbptt_steps, n_optimizers\)"
with pytest.deprecated_call(match=epoch_match):
trainer.fit(model)

class NoDeprecationModel(DeprecationModel2):
def training_epoch_end(self, outputs, new_format=True):
...

trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
model = NoDeprecationModel()
with no_deprecated_call(match="will change in version v1.8.*new_format=True"):
trainer.fit(model)


@pytest.mark.flaky(reruns=3)
@pytest.mark.parametrize(["action", "expected"], [("a", [3, 1]), ("b", [2]), ("c", [1])])
def test_simple_profiler_iterable_durations(tmpdir, action: str, expected: list):
Expand Down
115 changes: 17 additions & 98 deletions tests/tests_pytorch/loops/epoch/test_training_epoch_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from unittest.mock import patch

import pytest
Expand All @@ -20,7 +19,6 @@
from pytorch_lightning.demos.boring_classes import BoringModel
from pytorch_lightning.loops import TrainingEpochLoop
from pytorch_lightning.trainer.trainer import Trainer
from tests_pytorch.deprecated_api import no_deprecated_call

_out00 = {"loss": 0.0}
_out01 = {"loss": 0.1}
Expand All @@ -33,43 +31,33 @@


class TestPrepareOutputs:
def prepare_outputs(self, fn, tbptt_splits, new_format, batch_outputs, num_optimizers, automatic_optimization):
def prepare_outputs(self, fn, tbptt_splits, batch_outputs, num_optimizers, automatic_optimization):
lightning_module = LightningModule()
lightning_module.on_train_batch_end = lambda *_: None # override to trigger the deprecation message
lightning_module.automatic_optimization = automatic_optimization
lightning_module.truncated_bptt_steps = tbptt_splits
match = "will change in version v1.8.*new_format=True"
will_warn = tbptt_splits and num_optimizers > 1 and not new_format
ctx_manager = pytest.deprecated_call if will_warn else no_deprecated_call
with ctx_manager(match=match):
with mock.patch(
"pytorch_lightning.loops.epoch.training_epoch_loop._v1_8_output_format", return_value=new_format
):
return fn(
batch_outputs,
lightning_module=lightning_module,
num_optimizers=num_optimizers, # does not matter for manual optimization
)
return fn(
batch_outputs,
lightning_module=lightning_module,
num_optimizers=num_optimizers, # does not matter for manual optimization
)

def prepare_outputs_training_epoch_end(
self, tbptt_splits, new_format, batch_outputs, num_optimizers, automatic_optimization=True
self, tbptt_splits, batch_outputs, num_optimizers, automatic_optimization=True
):
return self.prepare_outputs(
TrainingEpochLoop._prepare_outputs_training_epoch_end,
tbptt_splits,
new_format,
batch_outputs,
num_optimizers,
automatic_optimization=automatic_optimization,
)

def prepare_outputs_training_batch_end(
self, tbptt_splits, new_format, batch_outputs, num_optimizers, automatic_optimization=True
self, tbptt_splits, batch_outputs, num_optimizers, automatic_optimization=True
):
return self.prepare_outputs(
TrainingEpochLoop._prepare_outputs_training_batch_end,
tbptt_splits,
new_format,
batch_outputs,
num_optimizers,
automatic_optimization=automatic_optimization,
Expand Down Expand Up @@ -97,53 +85,19 @@ def prepare_outputs_training_batch_end(
),
# 1 batch, tbptt with 2 splits (uneven)
(1, 2, [[{0: _out00}, {0: _out01}], [{0: _out03}]], [[_out00, _out01], [_out03]]),
],
)
@pytest.mark.parametrize("new_format", (False, True))
def test_prepare_outputs_training_epoch_end_automatic(
self, num_optimizers, tbptt_splits, batch_outputs, expected, new_format
):
"""Test that the loop converts the nested lists of outputs to the format that the `training_epoch_end` hook
currently expects in the case of automatic optimization."""
assert (
self.prepare_outputs_training_epoch_end(tbptt_splits, new_format, batch_outputs, num_optimizers) == expected
)

@pytest.mark.parametrize(
"num_optimizers,tbptt_splits,batch_outputs,expected",
[
# 3 batches, tbptt with 2 splits, 2 optimizers alternating
(
2,
2,
[[{0: _out00}, {0: _out01}], [{1: _out10}, {1: _out11}], [{0: _out02}, {0: _out03}]],
[[[_out00, _out01], [], [_out02, _out03]], [[], [_out10, _out11], []]],
)
],
)
def test_prepare_outputs_training_epoch_end_automatic_old_format(
self, num_optimizers, tbptt_splits, batch_outputs, expected
):
assert self.prepare_outputs_training_epoch_end(tbptt_splits, False, batch_outputs, num_optimizers) == expected

@pytest.mark.parametrize(
"num_optimizers,tbptt_splits,batch_outputs,expected",
[
# 3 batches, tbptt with 2 splits, 2 optimizers alternating
(
2,
2,
[[{0: _out00}, {0: _out01}], [{1: _out10}, {1: _out11}], [{0: _out02}, {0: _out03}]],
[[[_out00], [_out01]], [[_out10], [_out11]], [[_out02], [_out03]]],
)
),
],
)
def test_prepare_outputs_training_epoch_end_automatic_new_format(
self, num_optimizers, tbptt_splits, batch_outputs, expected
):
def test_prepare_outputs_training_epoch_end_automatic(self, num_optimizers, tbptt_splits, batch_outputs, expected):
"""Test that the loop converts the nested lists of outputs to the format that the `training_epoch_end` hook
currently expects in the case of automatic optimization."""
assert self.prepare_outputs_training_epoch_end(tbptt_splits, True, batch_outputs, num_optimizers) == expected
assert self.prepare_outputs_training_epoch_end(tbptt_splits, batch_outputs, num_optimizers) == expected

@pytest.mark.parametrize(
"batch_outputs,expected",
Expand All @@ -160,14 +114,10 @@ def test_prepare_outputs_training_epoch_end_automatic_new_format(
([[_out00, _out01], [_out02, _out03], [], [_out10]], [[_out00, _out01], [_out02, _out03], [_out10]]),
],
)
@pytest.mark.parametrize("new_format", (False, True))
def test_prepare_outputs_training_epoch_end_manual(self, batch_outputs, expected, new_format):
def test_prepare_outputs_training_epoch_end_manual(self, batch_outputs, expected):
"""Test that the loop converts the nested lists of outputs to the format that the `training_epoch_end` hook
currently expects in the case of manual optimization."""
assert (
self.prepare_outputs_training_epoch_end(0, new_format, batch_outputs, -1, automatic_optimization=False)
== expected
)
assert self.prepare_outputs_training_epoch_end(0, batch_outputs, -1, automatic_optimization=False) == expected

@pytest.mark.parametrize(
"num_optimizers,tbptt_splits,batch_end_outputs,expected",
Expand All @@ -180,47 +130,17 @@ def test_prepare_outputs_training_epoch_end_manual(self, batch_outputs, expected
(2, 0, [{0: _out00, 1: _out01}], [_out00, _out01]),
# tbptt with 2 splits
(1, 2, [{0: _out00}, {0: _out01}], [_out00, _out01]),
# 2 optimizers, tbptt with 2 splits
(2, 2, [{0: _out00, 1: _out01}, {0: _out10, 1: _out11}], [[_out00, _out01], [_out10, _out11]]),
],
)
@pytest.mark.parametrize("new_format", (False, True))
def test_prepare_outputs_training_batch_end_automatic(
self, num_optimizers, tbptt_splits, batch_end_outputs, expected, new_format
):
"""Test that the loop converts the nested lists of outputs to the format that the `on_train_batch_end` hook
currently expects in the case of automatic optimization."""

assert (
self.prepare_outputs_training_batch_end(tbptt_splits, new_format, batch_end_outputs, num_optimizers)
== expected
)

@pytest.mark.parametrize(
"num_optimizers,tbptt_splits,batch_end_outputs,expected",
# 2 optimizers, tbptt with 2 splits
[(2, 2, [{0: _out00, 1: _out01}, {0: _out10, 1: _out11}], [[_out00, _out10], [_out01, _out11]])],
)
def test_prepare_outputs_training_batch_end_automatic_old_format(
self, num_optimizers, tbptt_splits, batch_end_outputs, expected
):
"""Test that the loop converts the nested lists of outputs to the format that the `on_train_batch_end` hook
currently expects in the case of automatic optimization."""
assert (
self.prepare_outputs_training_batch_end(tbptt_splits, False, batch_end_outputs, num_optimizers) == expected
)

@pytest.mark.parametrize(
"num_optimizers,tbptt_splits,batch_end_outputs,expected",
# 2 optimizers, tbptt with 2 splits
[(2, 2, [{0: _out00, 1: _out01}, {0: _out10, 1: _out11}], [[_out00, _out01], [_out10, _out11]])],
)
def test_prepare_outputs_training_batch_end_automatic_new_format(
self, num_optimizers, tbptt_splits, batch_end_outputs, expected
):
"""Test that the loop converts the nested lists of outputs to the format that the `on_train_batch_end` hook
currently expects in the case of automatic optimization."""
assert (
self.prepare_outputs_training_batch_end(tbptt_splits, True, batch_end_outputs, num_optimizers) == expected
)
assert self.prepare_outputs_training_batch_end(tbptt_splits, batch_end_outputs, num_optimizers) == expected

@pytest.mark.parametrize(
"batch_end_outputs,expected",
Expand All @@ -237,8 +157,7 @@ def test_prepare_outputs_training_batch_end_manual(self, batch_end_outputs, expe
"""Test that the loop converts the nested lists of outputs to the format that the `on_train_batch_end` hook
currently expects in the case of manual optimization."""
assert (
self.prepare_outputs_training_batch_end(0, False, batch_end_outputs, -1, automatic_optimization=False)
== expected
self.prepare_outputs_training_batch_end(0, batch_end_outputs, -1, automatic_optimization=False) == expected
)


Expand Down
Loading

0 comments on commit d4bcafa

Please sign in to comment.