From 7c5f868557402c647cf2da5741875a5ad03ce8de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Thu, 12 Jan 2023 14:30:31 +0100 Subject: [PATCH] Do not warn about the scheduler's interval key during manual optim (#16308) --- src/pytorch_lightning/CHANGELOG.md | 3 +++ src/pytorch_lightning/core/optimizer.py | 4 +++- .../optimization/test_manual_optimization.py | 4 +++- .../trainer/optimization/test_optimizers.py | 19 ------------------- 4 files changed, 9 insertions(+), 21 deletions(-) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 2ad3f1faa1c88..e16268f88a88a 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -169,6 +169,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Fixed a type error when dividing the chunk size in the ColossalAI strategy ([#16212](https://github.com/Lightning-AI/lightning/pull/16212)) +- Fixed bug where the ``interval`` key of the scheduler would be ignored during manual optimization, making the LearningRateMonitor callback fail to log the learning rate ([#16308](https://github.com/Lightning-AI/lightning/pull/16308)) + + ## [1.8.6] - 2022-12-21 - minor cleaning diff --git a/src/pytorch_lightning/core/optimizer.py b/src/pytorch_lightning/core/optimizer.py index 732ddd8b7cc7f..430c92ee62de8 100644 --- a/src/pytorch_lightning/core/optimizer.py +++ b/src/pytorch_lightning/core/optimizer.py @@ -322,7 +322,9 @@ def _configure_schedulers_manual_opt(schedulers: list) -> List[LRSchedulerConfig lr_scheduler_configs = [] for scheduler in schedulers: if isinstance(scheduler, dict): - invalid_keys = {"interval", "frequency", "reduce_on_plateau", "monitor", "strict"} + # interval is not in this list even though the user needs to manually call the scheduler because + # the `LearningRateMonitor` callback needs to check its value to know when to log the learning rate + invalid_keys = {"frequency", "reduce_on_plateau", "monitor", "strict"} keys_to_warn = [k for k in scheduler.keys() if k in invalid_keys] if keys_to_warn: diff --git a/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py b/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py index 9c3f85c5ab647..c8e20bb4427a7 100644 --- a/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py +++ b/tests/tests_pytorch/trainer/optimization/test_manual_optimization.py @@ -933,6 +933,7 @@ def configure_optimizers(self): scheduler = { "scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer), "monitor": "train_loss", + "interval": "step", # not warned } else: scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer) @@ -946,8 +947,9 @@ def configure_optimizers(self): ) if scheduler_as_dict: - with pytest.warns(RuntimeWarning, match="but the keys will be ignored"): + with pytest.warns(RuntimeWarning, match=r"\['monitor'\], but the keys will be ignored"): trainer.fit(model) + assert trainer.lr_scheduler_configs[0].interval == "step" else: trainer.fit(model) diff --git a/tests/tests_pytorch/trainer/optimization/test_optimizers.py b/tests/tests_pytorch/trainer/optimization/test_optimizers.py index ed821b0d6ff4c..95ab17e513f7a 100644 --- a/tests/tests_pytorch/trainer/optimization/test_optimizers.py +++ b/tests/tests_pytorch/trainer/optimization/test_optimizers.py @@ -557,25 +557,6 @@ def configure_optimizers(self): trainer.fit(model) -def test_warn_invalid_scheduler_key_in_manual_optimization(tmpdir): - """Test warning when invalid scheduler keys are provided in manual optimization.""" - - class TestModel(BoringModel): - def __init__(self): - super().__init__() - self.automatic_optimization = False - - def configure_optimizers(self): - opt = optim.SGD(self.layer.parameters(), lr=0.1) - sch = optim.lr_scheduler.StepLR(opt, step_size=1) - return [opt], [{"scheduler": sch, "interval": "epoch"}] - - model = TestModel() - trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True) - with pytest.warns(RuntimeWarning, match="the keys will be ignored"): - trainer.fit(model) - - @RunIf(min_cuda_gpus=2, standalone=True) def test_optimizer_state_on_device(tmpdir): """Test that optimizers that create state initially at instantiation still end up with the state on the GPU."""