diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a485566372ae..b0ebb9a37aa4c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Fixed +- Fixed a bug to disable logging hyperparameters in logger if there are no hparams ([#11105](https://github.com/PyTorchLightning/pytorch-lightning/issues/11105)) - Fixed an issue when torch-scripting a `LightningModule` after training with `Trainer(sync_batchnorm=True)` ([#11078](https://github.com/PyTorchLightning/pytorch-lightning/pull/11078)) - Fixed an `AttributeError` occuring when using a `CombinedLoader` (multiple dataloaders) for prediction ([#11111](https://github.com/PyTorchLightning/pytorch-lightning/pull/11111)) diff --git a/pytorch_lightning/core/mixins/hparams_mixin.py b/pytorch_lightning/core/mixins/hparams_mixin.py index 0e722f2bdb683..26a272dd3dd1d 100644 --- a/pytorch_lightning/core/mixins/hparams_mixin.py +++ b/pytorch_lightning/core/mixins/hparams_mixin.py @@ -28,7 +28,7 @@ class HyperparametersMixin: def __init__(self) -> None: super().__init__() - self._log_hyperparams = True + self._log_hyperparams = False def save_hyperparameters( self, diff --git a/tests/callbacks/test_gpu_stats_monitor.py b/tests/callbacks/test_gpu_stats_monitor.py index 5ed3f533b5588..ca9197c6a078c 100644 --- a/tests/callbacks/test_gpu_stats_monitor.py +++ b/tests/callbacks/test_gpu_stats_monitor.py @@ -83,7 +83,7 @@ def test_gpu_stats_monitor_no_queries(tmpdir): with mock.patch("pytorch_lightning.loggers.tensorboard.TensorBoardLogger.log_metrics") as log_metrics_mock: trainer.fit(model) - assert log_metrics_mock.mock_calls[2:] == [ + assert log_metrics_mock.mock_calls[1:] == [ mock.call({"batch_time/intra_step (ms)": mock.ANY}, step=0), mock.call({"batch_time/inter_step (ms)": mock.ANY}, step=1), mock.call({"batch_time/intra_step (ms)": mock.ANY}, step=1), diff --git a/tests/loggers/test_all.py b/tests/loggers/test_all.py index d66e77b4cea34..3bedd26621238 100644 --- a/tests/loggers/test_all.py +++ b/tests/loggers/test_all.py @@ -144,10 +144,8 @@ def log_metrics(self, metrics, step): log_metric_names = [(s, sorted(m.keys())) for s, m in logger.history] if logger_class == TensorBoardLogger: expected = [ - (0, ["hp_metric"]), (0, ["epoch", "train_some_val"]), (0, ["early_stop_on", "epoch", "val_loss"]), - (0, ["hp_metric"]), (1, ["epoch", "test_loss"]), ] assert log_metric_names == expected diff --git a/tests/loggers/test_base.py b/tests/loggers/test_base.py index f4a91f63b50c4..224271709f5f7 100644 --- a/tests/loggers/test_base.py +++ b/tests/loggers/test_base.py @@ -111,7 +111,6 @@ def training_step(self, batch, batch_idx): trainer = Trainer(max_steps=2, log_every_n_steps=1, logger=logger, default_root_dir=tmpdir) trainer.fit(model) assert trainer.state.finished, f"Training failed with {trainer.state}" - assert logger.hparams_logged == model.hparams assert logger.metrics_logged != {} assert logger.after_save_checkpoint_called assert logger.finalized_status == "success" @@ -133,11 +132,11 @@ def training_step(self, batch, batch_idx): trainer.fit(model) assert trainer.state.finished, f"Training failed with {trainer.state}" - assert logger1.hparams_logged == model.hparams + assert logger1.hparams_logged is None assert logger1.metrics_logged != {} assert logger1.finalized_status == "success" - assert logger2.hparams_logged == model.hparams + assert logger2.hparams_logged is None assert logger2.metrics_logged != {} assert logger2.finalized_status == "success" diff --git a/tests/models/test_hparams.py b/tests/models/test_hparams.py index dbd51d33bf0ed..d2ea07a12ea49 100644 --- a/tests/models/test_hparams.py +++ b/tests/models/test_hparams.py @@ -776,7 +776,10 @@ def test_adding_datamodule_hparams(tmpdir, model, data): # Merged hparams were logged merged_hparams = copy.deepcopy(org_model_hparams) merged_hparams.update(org_data_hparams) - mock_logger.log_hyperparams.assert_called_with(merged_hparams) + if merged_hparams: + mock_logger.log_hyperparams.assert_called_with(merged_hparams) + else: + mock_logger.log_hyperparams.assert_not_called() def test_no_datamodule_for_hparams(tmpdir): diff --git a/tests/trainer/logging_/test_distributed_logging.py b/tests/trainer/logging_/test_distributed_logging.py index d4ba4f242294a..36c266343b849 100644 --- a/tests/trainer/logging_/test_distributed_logging.py +++ b/tests/trainer/logging_/test_distributed_logging.py @@ -112,7 +112,6 @@ def on_fit_start(self, trainer, pl_module): def on_train_start(self, trainer, pl_module): assert trainer.logger.method_call - trainer.logger.log_hyperparams.assert_called_once() trainer.logger.log_graph.assert_called_once() logger = Mock() diff --git a/tests/trainer/logging_/test_eval_loop_logging.py b/tests/trainer/logging_/test_eval_loop_logging.py index c205ed8c6af48..d47cb1ef7d3bf 100644 --- a/tests/trainer/logging_/test_eval_loop_logging.py +++ b/tests/trainer/logging_/test_eval_loop_logging.py @@ -510,6 +510,10 @@ class ExtendedModel(BoringModel): val_losses = [] + def __init__(self, some_val=7): + super().__init__() + self.save_hyperparameters() + def training_step(self, batch, batch_idx): output = self.layer(batch) loss = self.loss(batch, output)