Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Set the logger explicitly in tests #15815

Merged
merged 9 commits into from
Dec 10, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions requirements/pytorch/test.txt
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ scikit-learn>0.22.1, <1.1.3
onnxruntime<1.14.0
psutil<5.9.4 # for `DeviceStatsMonitor`
pandas>1.0, <1.5.2 # needed in benchmarks
fastapi<0.87.0
uvicorn<0.19.1
fastapi<0.87.0 # for `ServableModuleValidator`
uvicorn<0.19.1 # for `ServableModuleValidator`

tensorboard>=2.9.1, <2.12.0
tensorboard>=2.9.1, <2.12.0 # for `TensorBoardLogger`
protobuf<=3.20.1 # strict # an extra is updating protobuf, this pin prevents TensorBoard failure
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from pytorch_lightning.callbacks import ProgressBarBase, RichProgressBar
from pytorch_lightning.callbacks.progress.rich_progress import RichProgressBarTheme
from pytorch_lightning.demos.boring_classes import BoringModel, RandomDataset, RandomIterableDataset
from pytorch_lightning.loggers import CSVLogger
from tests_pytorch.helpers.runif import RunIf


Expand Down Expand Up @@ -330,7 +331,7 @@ def training_step(self, *args, **kwargs):

progress_bar = RichProgressBar()
model = CustomModel()
trainer = Trainer(default_root_dir=tmpdir, callbacks=progress_bar, fast_dev_run=True)
trainer = Trainer(default_root_dir=tmpdir, callbacks=progress_bar, fast_dev_run=True, logger=CSVLogger(tmpdir))

trainer.fit(model)
main_progress_bar_id = progress_bar.main_progress_bar_id
Expand Down Expand Up @@ -384,6 +385,7 @@ def test_step(self, batch, batch_idx):
enable_checkpointing=False,
log_every_n_steps=1,
callbacks=pbar,
logger=CSVLogger(tmpdir),
)

trainer.fit(model)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
from pytorch_lightning.callbacks.progress.tqdm_progress import Tqdm
from pytorch_lightning.core.module import LightningModule
from pytorch_lightning.demos.boring_classes import BoringModel, RandomDataset
from pytorch_lightning.loggers import CSVLogger
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests_pytorch.helpers.runif import RunIf

Expand Down Expand Up @@ -706,6 +707,7 @@ def test_step(self, batch, batch_idx):
enable_checkpointing=False,
log_every_n_steps=1,
callbacks=pbar,
logger=CSVLogger(tmpdir),
)

trainer.fit(model)
Expand Down
4 changes: 2 additions & 2 deletions tests/tests_pytorch/callbacks/test_device_stats_monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,13 +155,13 @@ def test_prefix_metric_keys():
assert converted_metrics == {"foo.1": 1.0, "foo.2": 2.0, "foo.3": 3.0}


def test_device_stats_monitor_warning_when_psutil_not_available(monkeypatch):
def test_device_stats_monitor_warning_when_psutil_not_available(monkeypatch, tmp_path):
"""Test that warning is raised when psutil is not available."""
import pytorch_lightning.callbacks.device_stats_monitor as imports

monkeypatch.setattr(imports, "_PSUTIL_AVAILABLE", False)
monitor = DeviceStatsMonitor()
trainer = Trainer()
trainer = Trainer(logger=CSVLogger(tmp_path))
assert trainer.strategy.root_device == torch.device("cpu")
# TODO: raise an exception from v1.9
with pytest.warns(UserWarning, match="psutil` is not installed"):
Expand Down
33 changes: 30 additions & 3 deletions tests/tests_pytorch/callbacks/test_lr_monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from pytorch_lightning.callbacks.callback import Callback
from pytorch_lightning.callbacks.finetuning import BackboneFinetuning
from pytorch_lightning.demos.boring_classes import BoringModel
from pytorch_lightning.loggers import CSVLogger
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests_pytorch.helpers.datamodules import ClassifDataModule
from tests_pytorch.helpers.runif import RunIf
Expand All @@ -32,7 +33,12 @@ def test_lr_monitor_single_lr(tmpdir):

lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=2, limit_val_batches=0.1, limit_train_batches=0.5, callbacks=[lr_monitor]
default_root_dir=tmpdir,
max_epochs=2,
limit_val_batches=0.1,
limit_train_batches=0.5,
callbacks=[lr_monitor],
logger=CSVLogger(tmpdir),
)
trainer.fit(model)

Expand Down Expand Up @@ -70,6 +76,7 @@ def configure_optimizers(self):
limit_train_batches=5,
log_every_n_steps=1,
callbacks=[lr_monitor],
logger=CSVLogger(tmpdir),
)
trainer.fit(model)

Expand All @@ -96,6 +103,7 @@ def configure_optimizers(self):
limit_train_batches=5,
log_every_n_steps=1,
callbacks=[lr_monitor],
logger=CSVLogger(tmpdir),
)
with pytest.warns(RuntimeWarning, match="optimizers do not have momentum."):
trainer.fit(model)
Expand All @@ -117,7 +125,12 @@ def configure_optimizers(self):

lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=2, limit_val_batches=0.1, limit_train_batches=0.5, callbacks=[lr_monitor]
default_root_dir=tmpdir,
max_epochs=2,
limit_val_batches=0.1,
limit_train_batches=0.5,
callbacks=[lr_monitor],
logger=CSVLogger(tmpdir),
)

trainer.fit(model)
Expand Down Expand Up @@ -154,6 +167,7 @@ def configure_optimizers(self):
limit_train_batches=5,
log_every_n_steps=1,
callbacks=[lr_monitor],
logger=CSVLogger(tmpdir),
)
trainer.fit(model)

Expand All @@ -179,6 +193,7 @@ def configure_optimizers(self):
limit_train_batches=5,
log_every_n_steps=1,
callbacks=[lr_monitor],
logger=CSVLogger(tmpdir),
)
with pytest.warns(RuntimeWarning, match="optimizers do not have momentum."):
trainer.fit(model)
Expand Down Expand Up @@ -226,6 +241,7 @@ def configure_optimizers(self):
limit_train_batches=7,
limit_val_batches=0.1,
callbacks=[lr_monitor],
logger=CSVLogger(tmpdir),
)
trainer.fit(model)

Expand Down Expand Up @@ -269,6 +285,7 @@ def configure_optimizers(self):
limit_train_batches=7,
limit_val_batches=0.1,
callbacks=[lr_monitor],
logger=CSVLogger(tmpdir),
)
trainer.fit(model)

Expand Down Expand Up @@ -305,7 +322,12 @@ def configure_optimizers(self):

lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=2, limit_val_batches=0.1, limit_train_batches=0.5, callbacks=[lr_monitor]
default_root_dir=tmpdir,
max_epochs=2,
limit_val_batches=0.1,
limit_train_batches=0.5,
callbacks=[lr_monitor],
logger=CSVLogger(tmpdir),
)
trainer.fit(model, datamodule=dm)

Expand All @@ -330,6 +352,7 @@ def configure_optimizers(self):
callbacks=[lr_monitor],
enable_progress_bar=False,
enable_model_summary=False,
logger=CSVLogger(tmpdir),
)
trainer.fit(TestModel())
assert list(lr_monitor.lrs) == ["my_logging_name"]
Expand All @@ -349,6 +372,7 @@ def configure_optimizers(self):
limit_val_batches=2,
limit_train_batches=2,
callbacks=[lr_monitor],
logger=CSVLogger(tmpdir),
enable_progress_bar=False,
enable_model_summary=False,
)
Expand Down Expand Up @@ -384,6 +408,7 @@ def configure_optimizers(self):
limit_val_batches=2,
limit_train_batches=2,
callbacks=[lr_monitor],
logger=CSVLogger(tmpdir),
enable_progress_bar=False,
enable_model_summary=False,
)
Expand Down Expand Up @@ -475,6 +500,7 @@ def finetune_function(self, pl_module, epoch: int, optimizer, opt_idx: int):
limit_val_batches=0,
limit_train_batches=2,
callbacks=[TestFinetuning(), lr_monitor, Check()],
logger=CSVLogger(tmpdir),
enable_progress_bar=False,
enable_model_summary=False,
enable_checkpointing=False,
Expand Down Expand Up @@ -533,6 +559,7 @@ def configure_optimizers(self):
limit_val_batches=2,
limit_train_batches=2,
callbacks=[lr_monitor],
logger=CSVLogger(tmpdir),
enable_progress_bar=False,
enable_model_summary=False,
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -303,13 +303,14 @@ def _swa_resume_training_from_checkpoint(tmpdir, model, resume_model, ddp=False)
"limit_val_batches": 0,
"accumulate_grad_batches": 2,
"enable_progress_bar": False,
"logger": False,
}
trainer = Trainer(callbacks=SwaTestCallback(swa_epoch_start=swa_start, swa_lrs=0.1), **trainer_kwargs)

with _backward_patch(trainer), pytest.raises(Exception, match="SWA crash test"):
trainer.fit(model)

checkpoint_dir = Path(tmpdir) / "lightning_logs" / "version_0" / "checkpoints"
checkpoint_dir = Path(tmpdir) / "checkpoints"
checkpoint_files = os.listdir(checkpoint_dir)
assert len(checkpoint_files) == 1
ckpt_path = str(checkpoint_dir / checkpoint_files[0])
Expand Down
20 changes: 15 additions & 5 deletions tests/tests_pytorch/checkpointing/test_model_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.demos.boring_classes import BoringModel
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.loggers import CSVLogger, TensorBoardLogger
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _OMEGACONF_AVAILABLE
from tests_pytorch.helpers.runif import RunIf
Expand Down Expand Up @@ -301,9 +301,11 @@ def test_model_checkpoint_with_non_string_input(tmpdir, save_top_k: int):

checkpoint = ModelCheckpoint(monitor="early_stop_on", dirpath=None, filename="{epoch}", save_top_k=save_top_k)
max_epochs = 2
trainer = Trainer(default_root_dir=tmpdir, callbacks=[checkpoint], overfit_batches=0.20, max_epochs=max_epochs)
trainer = Trainer(
default_root_dir=tmpdir, callbacks=[checkpoint], overfit_batches=0.20, max_epochs=max_epochs, logger=False
)
trainer.fit(model)
assert checkpoint.dirpath == tmpdir / trainer.logger.name / "version_0" / "checkpoints"
assert checkpoint.dirpath == tmpdir / "checkpoints"

if save_top_k == -1:
ckpt_files = os.listdir(checkpoint.dirpath)
Expand Down Expand Up @@ -753,15 +755,20 @@ def test_default_checkpoint_behavior(tmpdir):

model = LogInTwoMethods()
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=3, enable_progress_bar=False, limit_train_batches=5, limit_val_batches=5
default_root_dir=tmpdir,
max_epochs=3,
enable_progress_bar=False,
limit_train_batches=5,
limit_val_batches=5,
logger=False,
)

with patch.object(trainer, "save_checkpoint", wraps=trainer.save_checkpoint) as save_mock:
trainer.fit(model)
results = trainer.test()

assert len(results) == 1
save_dir = tmpdir / "lightning_logs" / "version_0" / "checkpoints"
save_dir = tmpdir / "checkpoints"
save_weights_only = trainer.checkpoint_callback.save_weights_only
save_mock.assert_has_calls(
[
Expand Down Expand Up @@ -867,6 +874,7 @@ def validation_step(self, batch, batch_idx):
"enable_model_summary": False,
"log_every_n_steps": 1,
"default_root_dir": tmpdir,
"logger": CSVLogger(tmpdir),
}
trainer = Trainer(**trainer_kwargs, callbacks=[checkpoint_callback])
trainer.fit(model)
Expand Down Expand Up @@ -931,6 +939,7 @@ def assert_checkpoint_log_dir(idx):
limit_val_batches=3,
limit_test_batches=4,
callbacks=[checkpoint_cb],
logger=TensorBoardLogger(tmpdir),
)
trainer = Trainer(**trainer_config)
assert_trainer_init(trainer)
Expand All @@ -953,6 +962,7 @@ def assert_checkpoint_log_dir(idx):
assert_checkpoint_content(ckpt_dir)

# load from checkpoint
trainer_config["logger"] = TensorBoardLogger(tmpdir)
trainer = pl.Trainer(**trainer_config)
assert_trainer_init(trainer)

Expand Down
10 changes: 9 additions & 1 deletion tests/tests_pytorch/loggers/test_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,12 @@ def __init__(self, param_one, param_two):

model = TestModel("pytorch", "lightning")
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=1, limit_train_batches=0.1, limit_val_batches=0.1, num_sanity_val_steps=0
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=0.1,
limit_val_batches=0.1,
num_sanity_val_steps=0,
logger=TensorBoardLogger(tmpdir),
)
trainer.fit(model)

Expand Down Expand Up @@ -270,6 +275,7 @@ class _Test:

trainer = Trainer(
default_root_dir=tmpdir,
logger=TensorBoardLogger(tmpdir),
max_epochs=1,
limit_train_batches=0.1,
limit_val_batches=0.1,
Expand All @@ -294,6 +300,7 @@ class _Test:
dm = TestDataModule(diff_params)
trainer = Trainer(
default_root_dir=tmpdir,
logger=TensorBoardLogger(tmpdir),
max_epochs=1,
limit_train_batches=0.1,
limit_val_batches=0.1,
Expand All @@ -311,6 +318,7 @@ class _Test:
dm = TestDataModule(tensor_params)
trainer = Trainer(
default_root_dir=tmpdir,
logger=TensorBoardLogger(tmpdir),
max_epochs=1,
limit_train_batches=0.1,
limit_val_batches=0.1,
Expand Down
2 changes: 1 addition & 1 deletion tests/tests_pytorch/loggers/test_tensorboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def __init__(self, b1=0.5, b2=0.999):
super().__init__()
self.save_hyperparameters()

trainer = Trainer(max_steps=1, default_root_dir=tmpdir)
trainer = Trainer(max_steps=1, default_root_dir=tmpdir, logger=TensorBoardLogger(tmpdir))
model = CustomModel()
assert trainer.log_dir == trainer.logger.log_dir
trainer.fit(model)
Expand Down
9 changes: 8 additions & 1 deletion tests/tests_pytorch/models/test_grad_norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

from pytorch_lightning import Trainer
from pytorch_lightning.demos.boring_classes import BoringModel
from pytorch_lightning.loggers import CSVLogger


class ModelWithManualGradTracker(BoringModel):
Expand Down Expand Up @@ -86,7 +87,13 @@ def on_train_batch_end(self, *_) -> None:
@pytest.mark.parametrize("log_every_n_steps", [1, 2, 3])
def test_grad_tracking_interval(tmpdir, log_every_n_steps):
"""Test that gradient norms get tracked in the right interval and that everytime the same keys get logged."""
trainer = Trainer(default_root_dir=tmpdir, track_grad_norm=2, log_every_n_steps=log_every_n_steps, max_steps=10)
trainer = Trainer(
default_root_dir=tmpdir,
track_grad_norm=2,
log_every_n_steps=log_every_n_steps,
max_steps=10,
logger=CSVLogger(tmpdir),
)

with patch.object(trainer.logger, "log_metrics") as mocked:
model = BoringModel()
Expand Down
10 changes: 8 additions & 2 deletions tests/tests_pytorch/models/test_hparams.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
from pytorch_lightning.core.mixins import HyperparametersMixin
from pytorch_lightning.core.saving import load_hparams_from_yaml, save_hparams_to_yaml
from pytorch_lightning.demos.boring_classes import BoringDataModule, BoringModel, RandomDataset
from pytorch_lightning.loggers import CSVLogger, TensorBoardLogger
from pytorch_lightning.utilities import _OMEGACONF_AVAILABLE, AttributeDict, is_picklable
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests_pytorch.helpers.runif import RunIf
Expand Down Expand Up @@ -642,7 +643,12 @@ def test_init_arg_with_runtime_change(tmpdir, cls):
assert model.hparams.running_arg == -1

trainer = Trainer(
default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=2, limit_test_batches=2, max_epochs=1
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
max_epochs=1,
logger=TensorBoardLogger(tmpdir),
)
trainer.fit(model)

Expand Down Expand Up @@ -875,7 +881,7 @@ def test_colliding_hparams(tmpdir):
model = SaveHparamsModel({"data_dir": "abc", "arg2": "abc"})
data = DataModuleWithHparams({"data_dir": "foo"})

trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, logger=CSVLogger(tmpdir))
with pytest.raises(MisconfigurationException, match=r"Error while merging hparams:"):
trainer.fit(model, datamodule=data)

Expand Down
Loading