Skip to content

Commit

Permalink
Do not warn about the scheduler's interval key during manual optim (#…
Browse files Browse the repository at this point in the history
  • Loading branch information
carmocca authored Jan 12, 2023
1 parent 12a4e71 commit 7c5f868
Show file tree
Hide file tree
Showing 4 changed files with 9 additions and 21 deletions.
3 changes: 3 additions & 0 deletions src/pytorch_lightning/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Fixed a type error when dividing the chunk size in the ColossalAI strategy ([#16212](https://github.com/Lightning-AI/lightning/pull/16212))


- Fixed bug where the ``interval`` key of the scheduler would be ignored during manual optimization, making the LearningRateMonitor callback fail to log the learning rate ([#16308](https://github.com/Lightning-AI/lightning/pull/16308))


## [1.8.6] - 2022-12-21

- minor cleaning
Expand Down
4 changes: 3 additions & 1 deletion src/pytorch_lightning/core/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,9 @@ def _configure_schedulers_manual_opt(schedulers: list) -> List[LRSchedulerConfig
lr_scheduler_configs = []
for scheduler in schedulers:
if isinstance(scheduler, dict):
invalid_keys = {"interval", "frequency", "reduce_on_plateau", "monitor", "strict"}
# interval is not in this list even though the user needs to manually call the scheduler because
# the `LearningRateMonitor` callback needs to check its value to know when to log the learning rate
invalid_keys = {"frequency", "reduce_on_plateau", "monitor", "strict"}
keys_to_warn = [k for k in scheduler.keys() if k in invalid_keys]

if keys_to_warn:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -933,6 +933,7 @@ def configure_optimizers(self):
scheduler = {
"scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer),
"monitor": "train_loss",
"interval": "step", # not warned
}
else:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
Expand All @@ -946,8 +947,9 @@ def configure_optimizers(self):
)

if scheduler_as_dict:
with pytest.warns(RuntimeWarning, match="but the keys will be ignored"):
with pytest.warns(RuntimeWarning, match=r"\['monitor'\], but the keys will be ignored"):
trainer.fit(model)
assert trainer.lr_scheduler_configs[0].interval == "step"
else:
trainer.fit(model)

Expand Down
19 changes: 0 additions & 19 deletions tests/tests_pytorch/trainer/optimization/test_optimizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -557,25 +557,6 @@ def configure_optimizers(self):
trainer.fit(model)


def test_warn_invalid_scheduler_key_in_manual_optimization(tmpdir):
"""Test warning when invalid scheduler keys are provided in manual optimization."""

class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.automatic_optimization = False

def configure_optimizers(self):
opt = optim.SGD(self.layer.parameters(), lr=0.1)
sch = optim.lr_scheduler.StepLR(opt, step_size=1)
return [opt], [{"scheduler": sch, "interval": "epoch"}]

model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
with pytest.warns(RuntimeWarning, match="the keys will be ignored"):
trainer.fit(model)


@RunIf(min_cuda_gpus=2, standalone=True)
def test_optimizer_state_on_device(tmpdir):
"""Test that optimizers that create state initially at instantiation still end up with the state on the GPU."""
Expand Down

0 comments on commit 7c5f868

Please sign in to comment.