From ed5fcc0a5f777c30e2bb3475d624a614215c495d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Fri, 25 Nov 2022 13:54:30 +0100 Subject: [PATCH 1/3] Set the logger explicitly in tests --- requirements/pytorch/test.txt | 6 ++-- .../progress/test_rich_progress_bar.py | 4 ++- .../progress/test_tqdm_progress_bar.py | 2 ++ .../callbacks/test_device_stats_monitor.py | 4 +-- .../callbacks/test_lr_monitor.py | 35 ++++++++++++++++--- .../callbacks/test_stochastic_weight_avg.py | 3 +- .../checkpointing/test_model_checkpoint.py | 22 ++++++++---- tests/tests_pytorch/loggers/test_logger.py | 10 +++++- .../tests_pytorch/loggers/test_tensorboard.py | 2 +- tests/tests_pytorch/models/test_grad_norm.py | 9 ++++- tests/tests_pytorch/models/test_hparams.py | 10 ++++-- .../tests_pytorch/profilers/test_profiler.py | 21 ++++++----- .../strategies/test_deepspeed_strategy.py | 3 ++ .../trainer/flags/test_fast_dev_run.py | 3 +- .../logging_/test_eval_loop_logging.py | 3 +- .../logging_/test_train_loop_logging.py | 3 ++ .../tests_pytorch/trainer/test_dataloaders.py | 7 ++-- tests/tests_pytorch/trainer/test_trainer.py | 1 + 18 files changed, 112 insertions(+), 36 deletions(-) diff --git a/requirements/pytorch/test.txt b/requirements/pytorch/test.txt index 537c897620229..27d865f57d63c 100644 --- a/requirements/pytorch/test.txt +++ b/requirements/pytorch/test.txt @@ -12,8 +12,8 @@ scikit-learn>0.22.1, <1.1.3 onnxruntime<1.14.0 psutil<5.9.4 # for `DeviceStatsMonitor` pandas>1.0, <1.5.2 # needed in benchmarks -fastapi<0.87.0 -uvicorn<0.19.1 +fastapi<0.87.0 # for `ServableModuleValidator` +uvicorn<0.19.1 # for `ServableModuleValidator` -tensorboard>=2.9.1, <2.12.0 +tensorboard>=2.9.1, <2.12.0 # for `TensorBoardLogger` protobuf<=3.20.1 # strict # an extra is updating protobuf, this pin prevents TensorBoard failure diff --git a/tests/tests_pytorch/callbacks/progress/test_rich_progress_bar.py b/tests/tests_pytorch/callbacks/progress/test_rich_progress_bar.py index 5c9bd410d186e..b1a7082ef6448 100644 --- a/tests/tests_pytorch/callbacks/progress/test_rich_progress_bar.py +++ b/tests/tests_pytorch/callbacks/progress/test_rich_progress_bar.py @@ -23,6 +23,7 @@ from pytorch_lightning.callbacks import ProgressBarBase, RichProgressBar from pytorch_lightning.callbacks.progress.rich_progress import RichProgressBarTheme from pytorch_lightning.demos.boring_classes import BoringModel, RandomDataset, RandomIterableDataset +from pytorch_lightning.loggers import CSVLogger from tests_pytorch.helpers.runif import RunIf @@ -330,7 +331,7 @@ def training_step(self, *args, **kwargs): progress_bar = RichProgressBar() model = CustomModel() - trainer = Trainer(default_root_dir=tmpdir, callbacks=progress_bar, fast_dev_run=True) + trainer = Trainer(default_root_dir=tmpdir, callbacks=progress_bar, fast_dev_run=True, logger=CSVLogger(tmpdir)) trainer.fit(model) main_progress_bar_id = progress_bar.main_progress_bar_id @@ -384,6 +385,7 @@ def test_step(self, batch, batch_idx): enable_checkpointing=False, log_every_n_steps=1, callbacks=pbar, + logger=CSVLogger(tmpdir), ) trainer.fit(model) diff --git a/tests/tests_pytorch/callbacks/progress/test_tqdm_progress_bar.py b/tests/tests_pytorch/callbacks/progress/test_tqdm_progress_bar.py index 105f380be58f5..bdd1c2002f1cb 100644 --- a/tests/tests_pytorch/callbacks/progress/test_tqdm_progress_bar.py +++ b/tests/tests_pytorch/callbacks/progress/test_tqdm_progress_bar.py @@ -29,6 +29,7 @@ from pytorch_lightning.callbacks.progress.tqdm_progress import Tqdm from pytorch_lightning.core.module import LightningModule from pytorch_lightning.demos.boring_classes import BoringModel, RandomDataset +from pytorch_lightning.loggers import CSVLogger from pytorch_lightning.utilities.exceptions import MisconfigurationException from tests_pytorch.helpers.runif import RunIf @@ -706,6 +707,7 @@ def test_step(self, batch, batch_idx): enable_checkpointing=False, log_every_n_steps=1, callbacks=pbar, + logger=CSVLogger(tmpdir), ) trainer.fit(model) diff --git a/tests/tests_pytorch/callbacks/test_device_stats_monitor.py b/tests/tests_pytorch/callbacks/test_device_stats_monitor.py index 36b30dc346d65..826fa0f088f28 100644 --- a/tests/tests_pytorch/callbacks/test_device_stats_monitor.py +++ b/tests/tests_pytorch/callbacks/test_device_stats_monitor.py @@ -155,13 +155,13 @@ def test_prefix_metric_keys(): assert converted_metrics == {"foo.1": 1.0, "foo.2": 2.0, "foo.3": 3.0} -def test_device_stats_monitor_warning_when_psutil_not_available(monkeypatch): +def test_device_stats_monitor_warning_when_psutil_not_available(monkeypatch, tmp_path): """Test that warning is raised when psutil is not available.""" import pytorch_lightning.callbacks.device_stats_monitor as imports monkeypatch.setattr(imports, "_PSUTIL_AVAILABLE", False) monitor = DeviceStatsMonitor() - trainer = Trainer() + trainer = Trainer(logger=CSVLogger(tmp_path)) assert trainer.strategy.root_device == torch.device("cpu") # TODO: raise an exception from v1.9 with pytest.warns(UserWarning, match="psutil` is not installed"): diff --git a/tests/tests_pytorch/callbacks/test_lr_monitor.py b/tests/tests_pytorch/callbacks/test_lr_monitor.py index 90e2c0fa26909..3f54995342c38 100644 --- a/tests/tests_pytorch/callbacks/test_lr_monitor.py +++ b/tests/tests_pytorch/callbacks/test_lr_monitor.py @@ -20,6 +20,7 @@ from pytorch_lightning.callbacks.callback import Callback from pytorch_lightning.callbacks.finetuning import BackboneFinetuning from pytorch_lightning.demos.boring_classes import BoringModel +from pytorch_lightning.loggers import CSVLogger from pytorch_lightning.utilities.exceptions import MisconfigurationException from tests_pytorch.helpers.datamodules import ClassifDataModule from tests_pytorch.helpers.runif import RunIf @@ -32,7 +33,12 @@ def test_lr_monitor_single_lr(tmpdir): lr_monitor = LearningRateMonitor() trainer = Trainer( - default_root_dir=tmpdir, max_epochs=2, limit_val_batches=0.1, limit_train_batches=0.5, callbacks=[lr_monitor] + default_root_dir=tmpdir, + max_epochs=2, + limit_val_batches=0.1, + limit_train_batches=0.5, + callbacks=[lr_monitor], + logger=CSVLogger(tmpdir), ) trainer.fit(model) @@ -70,6 +76,7 @@ def configure_optimizers(self): limit_train_batches=5, log_every_n_steps=1, callbacks=[lr_monitor], + logger=CSVLogger(tmpdir), ) trainer.fit(model) @@ -96,6 +103,7 @@ def configure_optimizers(self): limit_train_batches=5, log_every_n_steps=1, callbacks=[lr_monitor], + logger=CSVLogger(tmpdir), ) with pytest.warns(RuntimeWarning, match="optimizers do not have momentum."): trainer.fit(model) @@ -117,7 +125,12 @@ def configure_optimizers(self): lr_monitor = LearningRateMonitor() trainer = Trainer( - default_root_dir=tmpdir, max_epochs=2, limit_val_batches=0.1, limit_train_batches=0.5, callbacks=[lr_monitor] + default_root_dir=tmpdir, + max_epochs=2, + limit_val_batches=0.1, + limit_train_batches=0.5, + callbacks=[lr_monitor], + logger=CSVLogger(tmpdir), ) trainer.fit(model) @@ -154,6 +167,7 @@ def configure_optimizers(self): limit_train_batches=5, log_every_n_steps=1, callbacks=[lr_monitor], + logger=CSVLogger(tmpdir), ) trainer.fit(model) @@ -179,6 +193,7 @@ def configure_optimizers(self): limit_train_batches=5, log_every_n_steps=1, callbacks=[lr_monitor], + logger=CSVLogger(tmpdir), ) with pytest.warns(RuntimeWarning, match="optimizers do not have momentum."): trainer.fit(model) @@ -192,7 +207,7 @@ def test_lr_monitor_no_logger(tmpdir): model = BoringModel() lr_monitor = LearningRateMonitor() - trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, callbacks=[lr_monitor], logger=False) + trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, callbacks=[lr_monitor]) with pytest.raises(MisconfigurationException, match="`Trainer` that has no logger"): trainer.fit(model) @@ -226,6 +241,7 @@ def configure_optimizers(self): limit_train_batches=7, limit_val_batches=0.1, callbacks=[lr_monitor], + logger=CSVLogger(tmpdir), ) trainer.fit(model) @@ -269,6 +285,7 @@ def configure_optimizers(self): limit_train_batches=7, limit_val_batches=0.1, callbacks=[lr_monitor], + logger=CSVLogger(tmpdir), ) trainer.fit(model) @@ -305,7 +322,12 @@ def configure_optimizers(self): lr_monitor = LearningRateMonitor() trainer = Trainer( - default_root_dir=tmpdir, max_epochs=2, limit_val_batches=0.1, limit_train_batches=0.5, callbacks=[lr_monitor] + default_root_dir=tmpdir, + max_epochs=2, + limit_val_batches=0.1, + limit_train_batches=0.5, + callbacks=[lr_monitor], + logger=CSVLogger(tmpdir), ) trainer.fit(model, datamodule=dm) @@ -330,6 +352,7 @@ def configure_optimizers(self): callbacks=[lr_monitor], enable_progress_bar=False, enable_model_summary=False, + logger=CSVLogger(tmpdir), ) trainer.fit(TestModel()) assert list(lr_monitor.lrs) == ["my_logging_name"] @@ -349,6 +372,7 @@ def configure_optimizers(self): limit_val_batches=2, limit_train_batches=2, callbacks=[lr_monitor], + logger=CSVLogger(tmpdir), enable_progress_bar=False, enable_model_summary=False, ) @@ -384,6 +408,7 @@ def configure_optimizers(self): limit_val_batches=2, limit_train_batches=2, callbacks=[lr_monitor], + logger=CSVLogger(tmpdir), enable_progress_bar=False, enable_model_summary=False, ) @@ -475,6 +500,7 @@ def finetune_function(self, pl_module, epoch: int, optimizer, opt_idx: int): limit_val_batches=0, limit_train_batches=2, callbacks=[TestFinetuning(), lr_monitor, Check()], + logger=CSVLogger(tmpdir), enable_progress_bar=False, enable_model_summary=False, enable_checkpointing=False, @@ -533,6 +559,7 @@ def configure_optimizers(self): limit_val_batches=2, limit_train_batches=2, callbacks=[lr_monitor], + logger=CSVLogger(tmpdir), enable_progress_bar=False, enable_model_summary=False, ) diff --git a/tests/tests_pytorch/callbacks/test_stochastic_weight_avg.py b/tests/tests_pytorch/callbacks/test_stochastic_weight_avg.py index e3f8a979f4353..ac2b5cb51274e 100644 --- a/tests/tests_pytorch/callbacks/test_stochastic_weight_avg.py +++ b/tests/tests_pytorch/callbacks/test_stochastic_weight_avg.py @@ -303,13 +303,14 @@ def _swa_resume_training_from_checkpoint(tmpdir, model, resume_model, ddp=False) "limit_val_batches": 0, "accumulate_grad_batches": 2, "enable_progress_bar": False, + "logger": False, } trainer = Trainer(callbacks=SwaTestCallback(swa_epoch_start=swa_start, swa_lrs=0.1), **trainer_kwargs) with _backward_patch(trainer), pytest.raises(Exception, match="SWA crash test"): trainer.fit(model) - checkpoint_dir = Path(tmpdir) / "lightning_logs" / "version_0" / "checkpoints" + checkpoint_dir = Path(tmpdir) / "checkpoints" checkpoint_files = os.listdir(checkpoint_dir) assert len(checkpoint_files) == 1 ckpt_path = str(checkpoint_dir / checkpoint_files[0]) diff --git a/tests/tests_pytorch/checkpointing/test_model_checkpoint.py b/tests/tests_pytorch/checkpointing/test_model_checkpoint.py index 5da689c4e62b8..291d6fbde8e94 100644 --- a/tests/tests_pytorch/checkpointing/test_model_checkpoint.py +++ b/tests/tests_pytorch/checkpointing/test_model_checkpoint.py @@ -35,7 +35,7 @@ from pytorch_lightning import seed_everything, Trainer from pytorch_lightning.callbacks import ModelCheckpoint from pytorch_lightning.demos.boring_classes import BoringModel -from pytorch_lightning.loggers import TensorBoardLogger +from pytorch_lightning.loggers import CSVLogger, TensorBoardLogger from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.imports import _OMEGACONF_AVAILABLE from tests_pytorch.helpers.runif import RunIf @@ -301,9 +301,11 @@ def test_model_checkpoint_with_non_string_input(tmpdir, save_top_k: int): checkpoint = ModelCheckpoint(monitor="early_stop_on", dirpath=None, filename="{epoch}", save_top_k=save_top_k) max_epochs = 2 - trainer = Trainer(default_root_dir=tmpdir, callbacks=[checkpoint], overfit_batches=0.20, max_epochs=max_epochs) + trainer = Trainer( + default_root_dir=tmpdir, callbacks=[checkpoint], overfit_batches=0.20, max_epochs=max_epochs, logger=False + ) trainer.fit(model) - assert checkpoint.dirpath == tmpdir / trainer.logger.name / "version_0" / "checkpoints" + assert checkpoint.dirpath == tmpdir / "checkpoints" if save_top_k == -1: ckpt_files = os.listdir(checkpoint.dirpath) @@ -753,7 +755,12 @@ def test_default_checkpoint_behavior(tmpdir): model = LogInTwoMethods() trainer = Trainer( - default_root_dir=tmpdir, max_epochs=3, enable_progress_bar=False, limit_train_batches=5, limit_val_batches=5 + default_root_dir=tmpdir, + max_epochs=3, + enable_progress_bar=False, + limit_train_batches=5, + limit_val_batches=5, + logger=False, ) with patch.object(trainer, "save_checkpoint", wraps=trainer.save_checkpoint) as save_mock: @@ -761,7 +768,7 @@ def test_default_checkpoint_behavior(tmpdir): results = trainer.test() assert len(results) == 1 - save_dir = tmpdir / "lightning_logs" / "version_0" / "checkpoints" + save_dir = tmpdir / "checkpoints" save_weights_only = trainer.checkpoint_callback.save_weights_only save_mock.assert_has_calls( [ @@ -867,6 +874,7 @@ def validation_step(self, batch, batch_idx): "enable_model_summary": False, "log_every_n_steps": 1, "default_root_dir": tmpdir, + "logger": CSVLogger(tmpdir), } trainer = Trainer(**trainer_kwargs, callbacks=[checkpoint_callback]) trainer.fit(model) @@ -931,6 +939,7 @@ def assert_checkpoint_log_dir(idx): limit_val_batches=3, limit_test_batches=4, callbacks=[checkpoint_cb], + logger=TensorBoardLogger(tmpdir), ) trainer = Trainer(**trainer_config) assert_trainer_init(trainer) @@ -948,7 +957,7 @@ def assert_checkpoint_log_dir(idx): trainer.test(model) assert trainer.current_epoch == epochs - for idx in range(1, 5): + for idx in range(5): chk = get_last_checkpoint(ckpt_dir) assert_checkpoint_content(ckpt_dir) @@ -975,7 +984,6 @@ def assert_checkpoint_log_dir(idx): assert trainer.global_step == epochs * limit_train_batches assert trainer.current_epoch == epochs assert trainer.fit_loop.epoch_progress.current.processed == epochs - assert_checkpoint_log_dir(idx) def test_configure_model_checkpoint(tmpdir): diff --git a/tests/tests_pytorch/loggers/test_logger.py b/tests/tests_pytorch/loggers/test_logger.py index 5903fce2a6348..a36375ac981c4 100644 --- a/tests/tests_pytorch/loggers/test_logger.py +++ b/tests/tests_pytorch/loggers/test_logger.py @@ -239,7 +239,12 @@ def __init__(self, param_one, param_two): model = TestModel("pytorch", "lightning") trainer = Trainer( - default_root_dir=tmpdir, max_epochs=1, limit_train_batches=0.1, limit_val_batches=0.1, num_sanity_val_steps=0 + default_root_dir=tmpdir, + max_epochs=1, + limit_train_batches=0.1, + limit_val_batches=0.1, + num_sanity_val_steps=0, + logger=TensorBoardLogger(tmpdir), ) trainer.fit(model) @@ -270,6 +275,7 @@ class _Test: trainer = Trainer( default_root_dir=tmpdir, + logger=TensorBoardLogger(tmpdir), max_epochs=1, limit_train_batches=0.1, limit_val_batches=0.1, @@ -294,6 +300,7 @@ class _Test: dm = TestDataModule(diff_params) trainer = Trainer( default_root_dir=tmpdir, + logger=TensorBoardLogger(tmpdir), max_epochs=1, limit_train_batches=0.1, limit_val_batches=0.1, @@ -311,6 +318,7 @@ class _Test: dm = TestDataModule(tensor_params) trainer = Trainer( default_root_dir=tmpdir, + logger=TensorBoardLogger(tmpdir), max_epochs=1, limit_train_batches=0.1, limit_val_batches=0.1, diff --git a/tests/tests_pytorch/loggers/test_tensorboard.py b/tests/tests_pytorch/loggers/test_tensorboard.py index 7189f4c735778..87264216f2b48 100644 --- a/tests/tests_pytorch/loggers/test_tensorboard.py +++ b/tests/tests_pytorch/loggers/test_tensorboard.py @@ -39,7 +39,7 @@ def __init__(self, b1=0.5, b2=0.999): super().__init__() self.save_hyperparameters() - trainer = Trainer(max_steps=1, default_root_dir=tmpdir) + trainer = Trainer(max_steps=1, default_root_dir=tmpdir, logger=TensorBoardLogger(tmpdir)) model = CustomModel() assert trainer.log_dir == trainer.logger.log_dir trainer.fit(model) diff --git a/tests/tests_pytorch/models/test_grad_norm.py b/tests/tests_pytorch/models/test_grad_norm.py index 4d31187caf1a6..2b7d979937309 100644 --- a/tests/tests_pytorch/models/test_grad_norm.py +++ b/tests/tests_pytorch/models/test_grad_norm.py @@ -18,6 +18,7 @@ from pytorch_lightning import Trainer from pytorch_lightning.demos.boring_classes import BoringModel +from pytorch_lightning.loggers import CSVLogger class ModelWithManualGradTracker(BoringModel): @@ -86,7 +87,13 @@ def on_train_batch_end(self, *_) -> None: @pytest.mark.parametrize("log_every_n_steps", [1, 2, 3]) def test_grad_tracking_interval(tmpdir, log_every_n_steps): """Test that gradient norms get tracked in the right interval and that everytime the same keys get logged.""" - trainer = Trainer(default_root_dir=tmpdir, track_grad_norm=2, log_every_n_steps=log_every_n_steps, max_steps=10) + trainer = Trainer( + default_root_dir=tmpdir, + track_grad_norm=2, + log_every_n_steps=log_every_n_steps, + max_steps=10, + logger=CSVLogger(tmpdir), + ) with patch.object(trainer.logger, "log_metrics") as mocked: model = BoringModel() diff --git a/tests/tests_pytorch/models/test_hparams.py b/tests/tests_pytorch/models/test_hparams.py index 80ef49e87fcf2..fe85e9abade56 100644 --- a/tests/tests_pytorch/models/test_hparams.py +++ b/tests/tests_pytorch/models/test_hparams.py @@ -33,6 +33,7 @@ from pytorch_lightning.core.mixins import HyperparametersMixin from pytorch_lightning.core.saving import load_hparams_from_yaml, save_hparams_to_yaml from pytorch_lightning.demos.boring_classes import BoringDataModule, BoringModel, RandomDataset +from pytorch_lightning.loggers import CSVLogger, TensorBoardLogger from pytorch_lightning.utilities import _OMEGACONF_AVAILABLE, AttributeDict, is_picklable from pytorch_lightning.utilities.exceptions import MisconfigurationException from tests_pytorch.helpers.runif import RunIf @@ -642,7 +643,12 @@ def test_init_arg_with_runtime_change(tmpdir, cls): assert model.hparams.running_arg == -1 trainer = Trainer( - default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=2, limit_test_batches=2, max_epochs=1 + default_root_dir=tmpdir, + limit_train_batches=2, + limit_val_batches=2, + limit_test_batches=2, + max_epochs=1, + logger=TensorBoardLogger(tmpdir), ) trainer.fit(model) @@ -875,7 +881,7 @@ def test_colliding_hparams(tmpdir): model = SaveHparamsModel({"data_dir": "abc", "arg2": "abc"}) data = DataModuleWithHparams({"data_dir": "foo"}) - trainer = Trainer(default_root_dir=tmpdir, max_epochs=1) + trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, logger=CSVLogger(tmpdir)) with pytest.raises(MisconfigurationException, match=r"Error while merging hparams:"): trainer.fit(model, datamodule=data) diff --git a/tests/tests_pytorch/profilers/test_profiler.py b/tests/tests_pytorch/profilers/test_profiler.py index 1ed1212840234..b2387f12a63f2 100644 --- a/tests/tests_pytorch/profilers/test_profiler.py +++ b/tests/tests_pytorch/profilers/test_profiler.py @@ -103,13 +103,12 @@ def test_simple_profiler_dirpath(tmpdir): assert profiler.dirpath is None model = BoringModel() - trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, profiler=profiler) + trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, profiler=profiler, logger=False) trainer.fit(model) - expected = tmpdir / "lightning_logs" / "version_0" - assert trainer.log_dir == expected + assert trainer.log_dir == tmpdir assert profiler.dirpath == trainer.log_dir - assert expected.join("fit-profiler.txt").exists() + assert tmpdir.join("fit-profiler.txt").exists() def test_simple_profiler_with_nonexisting_log_dir(tmpdir): @@ -121,15 +120,19 @@ def test_simple_profiler_with_nonexisting_log_dir(tmpdir): model = BoringModel() trainer = Trainer( - default_root_dir=nonexisting_tmpdir, max_epochs=1, limit_train_batches=1, limit_val_batches=1, profiler=profiler + default_root_dir=nonexisting_tmpdir, + max_epochs=1, + limit_train_batches=1, + limit_val_batches=1, + profiler=profiler, + logger=False, ) trainer.fit(model) - expected = nonexisting_tmpdir / "lightning_logs" / "version_0" - assert expected.exists() - assert trainer.log_dir == expected + assert nonexisting_tmpdir.exists() + assert trainer.log_dir == nonexisting_tmpdir assert profiler.dirpath == trainer.log_dir - assert expected.join("fit-profiler.txt").exists() + assert nonexisting_tmpdir.join("fit-profiler.txt").exists() def test_simple_profiler_with_nonexisting_dirpath(tmpdir): diff --git a/tests/tests_pytorch/strategies/test_deepspeed_strategy.py b/tests/tests_pytorch/strategies/test_deepspeed_strategy.py index 786cfd1ab1504..d2d8479e5bfa0 100644 --- a/tests/tests_pytorch/strategies/test_deepspeed_strategy.py +++ b/tests/tests_pytorch/strategies/test_deepspeed_strategy.py @@ -29,6 +29,7 @@ from pytorch_lightning import LightningDataModule, LightningModule, Trainer from pytorch_lightning.callbacks import Callback, LearningRateMonitor, ModelCheckpoint from pytorch_lightning.demos.boring_classes import BoringModel, RandomDataset, RandomIterableDataset +from pytorch_lightning.loggers import CSVLogger from pytorch_lightning.plugins import DeepSpeedPrecisionPlugin from pytorch_lightning.strategies import DeepSpeedStrategy from pytorch_lightning.strategies.deepspeed import _DEEPSPEED_AVAILABLE, LightningDeepSpeedModule @@ -298,6 +299,7 @@ def configure_optimizers(self): fast_dev_run=True, precision=16, callbacks=[TestCB(), lr_monitor], + logger=CSVLogger(tmpdir), enable_progress_bar=False, enable_model_summary=False, ) @@ -337,6 +339,7 @@ def on_train_start(self, trainer, pl_module) -> None: max_epochs=2, precision=16, callbacks=[TestCB(), lr_monitor], + logger=CSVLogger(tmpdir), enable_progress_bar=False, enable_model_summary=False, ) diff --git a/tests/tests_pytorch/trainer/flags/test_fast_dev_run.py b/tests/tests_pytorch/trainer/flags/test_fast_dev_run.py index 27e194eab1b32..0e54f2993e708 100644 --- a/tests/tests_pytorch/trainer/flags/test_fast_dev_run.py +++ b/tests/tests_pytorch/trainer/flags/test_fast_dev_run.py @@ -7,6 +7,7 @@ from pytorch_lightning import Trainer from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.demos.boring_classes import BoringModel +from pytorch_lightning.loggers import TensorBoardLogger from pytorch_lightning.loggers.logger import DummyLogger @@ -72,7 +73,7 @@ def test_step(self, batch, batch_idx): default_root_dir=tmpdir, fast_dev_run=fast_dev_run, val_check_interval=2, - logger=True, + logger=TensorBoardLogger(tmpdir), log_every_n_steps=1, callbacks=[checkpoint_callback, early_stopping_callback], ) diff --git a/tests/tests_pytorch/trainer/logging_/test_eval_loop_logging.py b/tests/tests_pytorch/trainer/logging_/test_eval_loop_logging.py index 20c5323907027..1f234166be5aa 100644 --- a/tests/tests_pytorch/trainer/logging_/test_eval_loop_logging.py +++ b/tests/tests_pytorch/trainer/logging_/test_eval_loop_logging.py @@ -765,7 +765,7 @@ def test_dataloader(self): return [super().test_dataloader(), super().test_dataloader()] model = CustomBoringModel() - trainer = Trainer(default_root_dir=tmpdir, limit_test_batches=1) + trainer = Trainer(default_root_dir=tmpdir, limit_test_batches=1, logger=TensorBoardLogger(tmpdir)) results = trainer.test(model) # what's logged in `test_epoch_end` gets included in the results of each dataloader @@ -997,6 +997,7 @@ def test_dataloader(self): limit_train_batches=1, limit_val_batches=limit_batches, limit_test_batches=limit_batches, + logger=TensorBoardLogger(tmpdir), ) model = CustomBoringModel() diff --git a/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py b/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py index 8a44b7e131644..c2fd64271ede5 100644 --- a/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py +++ b/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py @@ -29,6 +29,7 @@ from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint, TQDMProgressBar from pytorch_lightning.core.module import LightningModule from pytorch_lightning.demos.boring_classes import BoringModel, RandomDataset, RandomDictDataset +from pytorch_lightning.loggers.tensorboard import TensorBoardLogger from pytorch_lightning.trainer.states import RunningStage from pytorch_lightning.utilities.exceptions import MisconfigurationException from tests_pytorch.helpers.runif import RunIf @@ -798,6 +799,7 @@ def training_step(self, batch, batch_idx): enable_model_summary=False, enable_checkpointing=False, enable_progress_bar=False, + logger=TensorBoardLogger(tmpdir), ) trainer.fit(model) @@ -831,6 +833,7 @@ def on_train_start(self): enable_model_summary=False, enable_checkpointing=False, enable_progress_bar=False, + logger=TensorBoardLogger(tmpdir), ) trainer.fit(model) diff --git a/tests/tests_pytorch/trainer/test_dataloaders.py b/tests/tests_pytorch/trainer/test_dataloaders.py index 08e81e5915351..673ba94b67b9c 100644 --- a/tests/tests_pytorch/trainer/test_dataloaders.py +++ b/tests/tests_pytorch/trainer/test_dataloaders.py @@ -32,6 +32,7 @@ RandomIterableDataset, RandomIterableDatasetWithLen, ) +from pytorch_lightning.loggers import CSVLogger from pytorch_lightning.trainer.states import RunningStage from pytorch_lightning.utilities.data import has_len_all_ranks from pytorch_lightning.utilities.exceptions import MisconfigurationException @@ -689,11 +690,13 @@ def test_warning_with_small_dataloader_and_logging_interval(tmpdir): model.train_dataloader = lambda: dataloader with pytest.warns(UserWarning, match=r"The number of training batches \(10\) is smaller than the logging interval"): - trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, log_every_n_steps=11) + trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, log_every_n_steps=11, logger=CSVLogger(tmpdir)) trainer.fit(model) with pytest.warns(UserWarning, match=r"The number of training batches \(1\) is smaller than the logging interval"): - trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, log_every_n_steps=2, limit_train_batches=1) + trainer = Trainer( + default_root_dir=tmpdir, max_epochs=1, log_every_n_steps=2, limit_train_batches=1, logger=CSVLogger(".") + ) trainer.fit(model) diff --git a/tests/tests_pytorch/trainer/test_trainer.py b/tests/tests_pytorch/trainer/test_trainer.py index aa3af86abfe27..1fb441ce1f0aa 100644 --- a/tests/tests_pytorch/trainer/test_trainer.py +++ b/tests/tests_pytorch/trainer/test_trainer.py @@ -1342,6 +1342,7 @@ def training_step(self, *args, **kwargs): limit_train_batches=train_batches, limit_val_batches=0, max_steps=max_steps, + logger=TensorBoardLogger(tmpdir), ) trainer.fit(model) expected_calls = [call(metrics=ANY, step=s) for s in range(log_interval - 1, max_steps, log_interval)] From 2c54afbd0d030900757abb5f99cbc3afbf850be9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Fri, 25 Nov 2022 18:25:48 +0100 Subject: [PATCH 2/3] Fix --- tests/tests_pytorch/callbacks/test_lr_monitor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/tests_pytorch/callbacks/test_lr_monitor.py b/tests/tests_pytorch/callbacks/test_lr_monitor.py index 3f54995342c38..6fa62fa91697d 100644 --- a/tests/tests_pytorch/callbacks/test_lr_monitor.py +++ b/tests/tests_pytorch/callbacks/test_lr_monitor.py @@ -207,7 +207,7 @@ def test_lr_monitor_no_logger(tmpdir): model = BoringModel() lr_monitor = LearningRateMonitor() - trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, callbacks=[lr_monitor]) + trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, callbacks=[lr_monitor], logger=False) with pytest.raises(MisconfigurationException, match="`Trainer` that has no logger"): trainer.fit(model) From 60fbdb9ade630c5b111ec9bf3f25ec7888b88ff0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Mon, 28 Nov 2022 07:08:09 +0100 Subject: [PATCH 3/3] Revert --- tests/tests_pytorch/checkpointing/test_model_checkpoint.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/tests_pytorch/checkpointing/test_model_checkpoint.py b/tests/tests_pytorch/checkpointing/test_model_checkpoint.py index 291d6fbde8e94..e763bf22e018f 100644 --- a/tests/tests_pytorch/checkpointing/test_model_checkpoint.py +++ b/tests/tests_pytorch/checkpointing/test_model_checkpoint.py @@ -957,11 +957,12 @@ def assert_checkpoint_log_dir(idx): trainer.test(model) assert trainer.current_epoch == epochs - for idx in range(5): + for idx in range(1, 5): chk = get_last_checkpoint(ckpt_dir) assert_checkpoint_content(ckpt_dir) # load from checkpoint + trainer_config["logger"] = TensorBoardLogger(tmpdir) trainer = pl.Trainer(**trainer_config) assert_trainer_init(trainer) @@ -984,6 +985,7 @@ def assert_checkpoint_log_dir(idx): assert trainer.global_step == epochs * limit_train_batches assert trainer.current_epoch == epochs assert trainer.fit_loop.epoch_progress.current.processed == epochs + assert_checkpoint_log_dir(idx) def test_configure_model_checkpoint(tmpdir):