diff --git a/docs/source-pytorch/common/trainer.rst b/docs/source-pytorch/common/trainer.rst index 1eb3f270fa1a2..6b81e949d6eb6 100644 --- a/docs/source-pytorch/common/trainer.rst +++ b/docs/source-pytorch/common/trainer.rst @@ -856,21 +856,13 @@ See Also: logger ^^^^^^ -.. raw:: html - - - -| - -:doc:`Logger <../visualize/loggers>` (or iterable collection of loggers) for experiment tracking. A ``True`` value uses the default ``TensorBoardLogger`` shown below. ``False`` will disable logging. +Pass a :doc:`Logger <../visualize/loggers>` (or iterable collection of loggers) for experiment tracking. .. testcode:: + :skipif: not _TENSORBOARD_AVAILABLE from pytorch_lightning.loggers import TensorBoardLogger - # default logger used by trainer logger = TensorBoardLogger(save_dir=os.getcwd(), version=1, name="lightning_logs") Trainer(logger=logger) diff --git a/docs/source-pytorch/conf.py b/docs/source-pytorch/conf.py index 5bb3eb4c1115f..bbfefcb43af01 100644 --- a/docs/source-pytorch/conf.py +++ b/docs/source-pytorch/conf.py @@ -401,6 +401,7 @@ def package_list_from_file(file): _APEX_AVAILABLE, _TORCHVISION_AVAILABLE, ) +from pytorch_lightning.loggers.tensorboard import _TENSORBOARD_AVAILABLE from pytorch_lightning.loggers.neptune import _NEPTUNE_AVAILABLE from pytorch_lightning.loggers.comet import _COMET_AVAILABLE from pytorch_lightning.loggers.mlflow import _MLFLOW_AVAILABLE diff --git a/docs/source-pytorch/extensions/logging.rst b/docs/source-pytorch/extensions/logging.rst index fdb467b7eda80..3c362fafc4382 100644 --- a/docs/source-pytorch/extensions/logging.rst +++ b/docs/source-pytorch/extensions/logging.rst @@ -62,6 +62,7 @@ To visualize tensorboard in a jupyter notebook environment, run the following co You can also pass a custom Logger to the :class:`~pytorch_lightning.trainer.trainer.Trainer`. .. testcode:: + :skipif: not _TENSORBOARD_AVAILABLE from pytorch_lightning import loggers as pl_loggers @@ -79,6 +80,7 @@ Choose from any of the others such as MLflow, Comet, Neptune, WandB, etc. To use multiple loggers, simply pass in a ``list`` or ``tuple`` of loggers. .. testcode:: + :skipif: not _TENSORBOARD_AVAILABLE :skipif: not _COMET_AVAILABLE tb_logger = pl_loggers.TensorBoardLogger(save_dir="logs/") @@ -378,7 +380,7 @@ When Lightning creates a checkpoint, it stores a key ``"hyper_parameters"`` with Some loggers also allow logging the hyperparams used in the experiment. For instance, when using the ``TensorBoardLogger``, all hyperparams will show -in the `hparams tab `_. +in the hparams tab at :meth:`torch.utils.tensorboard.writer.SummaryWriter.add_hparams`. .. note:: If you want to track a metric in the tensorboard hparams tab, log scalars to the key ``hp_metric``. If tracking multiple metrics, initialize ``TensorBoardLogger`` with ``default_hp_metric=False`` and call ``log_hyperparams`` only once with your metric keys and initial values. Subsequent updates can simply be logged to the metric keys. Refer to the examples below for setting up proper hyperparams metrics tracking within the :doc:`LightningModule <../common/lightning_module>`. diff --git a/docs/source-pytorch/starter/introduction.rst b/docs/source-pytorch/starter/introduction.rst index 3ce0eea21c8ad..0cd8ca10527f4 100644 --- a/docs/source-pytorch/starter/introduction.rst +++ b/docs/source-pytorch/starter/introduction.rst @@ -139,7 +139,6 @@ A LightningModule enables your PyTorch nn.Module to play together in complex way z = self.encoder(x) x_hat = self.decoder(z) loss = nn.functional.mse_loss(x_hat, x) - # Logging to TensorBoard by default self.log("train_loss", loss) return loss @@ -185,7 +184,7 @@ The Lightning :doc:`Trainer <../common/trainer>` automates `40+ tricks <../commo * ``optimizer.step()``, ``loss.backward()``, ``optimizer.zero_grad()`` calls * Calling of ``model.eval()``, enabling/disabling grads during evaluation * :doc:`Checkpoint Saving and Loading <../common/checkpointing>` -* Tensorboard (see :doc:`loggers <../visualize/loggers>` options) +* Multiple :doc:`loggers <../visualize/loggers>` options * :doc:`Multi-GPU <../accelerators/gpu>` support * :doc:`TPU <../accelerators/tpu>` * :ref:`16-bit precision AMP ` support @@ -220,7 +219,16 @@ Once you've trained the model you can export to onnx, torchscript and put it int ********************* Lightning comes with a *lot* of batteries included. A helpful one is Tensorboard for visualizing experiments. -Run this on your commandline and open your browser to **http://localhost:6006/** + +.. testcode:: + :skipif: not _TENSORBOARD_AVAILABLE + + from pytorch_lightning.loggers import TensorBoardLogger + + Trainer(logger=TensorBoardLogger(save_dir=".")) + + +If you set the ``TensorBoardLogger``, you can run this on your commandline and open your browser to **http://localhost:6006/** .. code:: bash diff --git a/docs/source-pytorch/visualize/logging_basic.rst b/docs/source-pytorch/visualize/logging_basic.rst index 198639fd9cd4e..bd1b011b92f15 100644 --- a/docs/source-pytorch/visualize/logging_basic.rst +++ b/docs/source-pytorch/visualize/logging_basic.rst @@ -58,14 +58,15 @@ TODO: need progress bar here View in the browser =================== -To view metrics in the browser you need to use an *experiment manager* with these capabilities. By Default, Lightning uses Tensorboard which is free and opensource. - -Tensorboard is already enabled by default +To view metrics in the browser you need to use an *experiment manager* with these capabilities. +You can choose to pass the logger of your choice. For example, for TensorBoard: .. code-block:: python - # every trainer already has tensorboard enabled by default - trainer = Trainer() + from pytorch_lightning.loggers import TensorBoardLogger + + logger = TensorBoardLogger(save_dir=os.getcwd()) + Trainer(logger=logger) To launch the tensorboard dashboard run the following command on the commandline. @@ -114,8 +115,10 @@ For other reductions, we recommend logging a :class:`torchmetrics.Metric` instan ****************************** Configure the saving directory ****************************** -By default, anything that is logged is saved to the current working directory. To use a different directory, set the *default_root_dir* argument in the Trainer. +By default, anything that is logged is saved to the current working directory. To use a different directory, set the *default_root_dir* argument in the Trainer +and the ``save_dir`` argument in your logger of choice. .. code-block:: python - Trainer(default_root_dir="/your/custom/path") + path = "your/custom/path/" + Trainer(default_root_dir=path, logger=ALogger(save_dir=path)) diff --git a/docs/source-pytorch/visualize/supported_exp_managers.rst b/docs/source-pytorch/visualize/supported_exp_managers.rst index 948974b47dac3..13d7d878001f8 100644 --- a/docs/source-pytorch/visualize/supported_exp_managers.rst +++ b/docs/source-pytorch/visualize/supported_exp_managers.rst @@ -104,7 +104,7 @@ Here's the full documentation for the :class:`~pytorch_lightning.loggers.Neptune Tensorboard =========== -`TensorBoard `_ already comes installed with Lightning. If you removed the install install the following package. +`TensorBoard `_ can be installed with. .. code-block:: bash @@ -179,6 +179,7 @@ Use multiple exp managers To use multiple experiment managers at the same time, pass a list to the *logger* :class:`~pytorch_lightning.trainer.trainer.Trainer` argument. .. testcode:: + :skipif: not _TENSORBOARD_AVAILABLE :skipif: not _WANDB_AVAILABLE from pytorch_lightning.loggers import TensorBoardLogger, WandbLogger diff --git a/requirements/pytorch/base.txt b/requirements/pytorch/base.txt index 31163ecb602b7..ecbd1e69fcb89 100644 --- a/requirements/pytorch/base.txt +++ b/requirements/pytorch/base.txt @@ -6,7 +6,6 @@ torch>=1.10.0, <=1.13.0 tqdm>=4.57.0, <4.65.0 PyYAML>=5.4, <=6.0 fsspec[http]>2021.06.0, <2022.8.0 -tensorboardX>=2.2, <=2.5.1 # min version is set by torch.onnx missing attribute torchmetrics>=0.7.0, <0.10.1 # needed for using fixed compare_version packaging>=17.0, <=21.3 typing-extensions>=4.0.0, <=4.4.0 diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index 6f6e1f156d169..7b1adc1e2398e 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -58,6 +58,16 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Deprecated + +- Deprecated `Trainer(logger=False)` in favor of `Trainer(logger=None)` ([#15662](https://github.com/Lightning-AI/lightning/pull/15662)) + + +- Deprecated `Trainer(logger=True)` in favor of `Trainer(logger=TensorBoardLogger())` ([#15662](https://github.com/Lightning-AI/lightning/pull/15662)) + + +- Deprecated installing `tensorboard` automatically. It's still installed programmatically when `TensorBoardLogger` is used ([#15662](https://github.com/Lightning-AI/lightning/pull/15662)) + + - Deprecated `description`, `env_prefix` and `env_parse` parameters in `LightningCLI.__init__` in favour of giving them through `parser_kwargs` ([#15651](https://github.com/Lightning-AI/lightning/pull/15651)) @@ -85,6 +95,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Enhanced `reduce_boolean_decision` to accommodate `any`-analogous semantics expected by the `EarlyStopping` callback ([#15253](https://github.com/Lightning-AI/lightning/pull/15253)) +- Fixed the automatic fallback from `Trainer(strategy="ddp_spawn", ...)` to `Trainer(strategy="ddp", ...)` when on an LSF cluster ([#15103](https://github.com/PyTorchLightning/pytorch-lightning/issues/15103)) - Fixed the `XLAProfiler` not recording anything due to mismatching of action names ([#15885](https://github.com/Lightning-AI/lightning/pull/15885)) diff --git a/src/pytorch_lightning/callbacks/device_stats_monitor.py b/src/pytorch_lightning/callbacks/device_stats_monitor.py index 0bc014290f271..895ba3c245c7c 100644 --- a/src/pytorch_lightning/callbacks/device_stats_monitor.py +++ b/src/pytorch_lightning/callbacks/device_stats_monitor.py @@ -64,7 +64,7 @@ def setup( return if not trainer.loggers: - raise MisconfigurationException("Cannot use `DeviceStatsMonitor` callback with `Trainer(logger=False)`.") + raise MisconfigurationException("Cannot use `DeviceStatsMonitor` callback with `Trainer(logger=None)`.") # warn in setup to warn once device = trainer.strategy.root_device diff --git a/src/pytorch_lightning/cli.py b/src/pytorch_lightning/cli.py index 95822a522e85d..ef5a550cfd7ff 100644 --- a/src/pytorch_lightning/cli.py +++ b/src/pytorch_lightning/cli.py @@ -25,6 +25,7 @@ import pytorch_lightning as pl from lightning_lite.utilities.cloud_io import get_filesystem from pytorch_lightning import Callback, LightningDataModule, LightningModule, seed_everything, Trainer +from pytorch_lightning.trainer.connectors.logger_connector.logger_connector import _NoneSentinel from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.model_helpers import is_overridden from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_warn @@ -44,6 +45,11 @@ register_unresolvable_import_paths(torch) # Required until fix https://github.com/pytorch/pytorch/issues/74483 set_config_read_mode(fsspec_enabled=True) + + # TODO: remove this override in v2.0.0 + from jsonargparse.typing import register_type + + register_type(_NoneSentinel, serializer=lambda v: None) else: locals()["ArgumentParser"] = object locals()["Namespace"] = object @@ -559,9 +565,10 @@ def _instantiate_trainer(self, config: Dict[str, Any], callbacks: List[Callback] value = self.trainer_defaults[key] config[key] += value if isinstance(value, list) else [value] if self.save_config_callback and not config.get("fast_dev_run", False): + to_save = self.config.get(str(self.subcommand), self.config) config_callback = self.save_config_callback( self._parser(self.subcommand), - self.config.get(str(self.subcommand), self.config), + to_save, **self.save_config_kwargs, ) config[key].append(config_callback) diff --git a/src/pytorch_lightning/loggers/tensorboard.py b/src/pytorch_lightning/loggers/tensorboard.py index 25b0d8fa0c127..8ce4a4fff1cff 100644 --- a/src/pytorch_lightning/loggers/tensorboard.py +++ b/src/pytorch_lightning/loggers/tensorboard.py @@ -18,6 +18,8 @@ import logging import os +import subprocess +import sys from argparse import Namespace from typing import Any, Dict, Mapping, Optional, TYPE_CHECKING, Union @@ -33,18 +35,13 @@ from pytorch_lightning.utilities.imports import _OMEGACONF_AVAILABLE from pytorch_lightning.utilities.logger import _add_prefix, _convert_params, _flatten_dict from pytorch_lightning.utilities.logger import _sanitize_params as _utils_sanitize_params -from pytorch_lightning.utilities.rank_zero import rank_zero_only, rank_zero_warn +from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_only, rank_zero_warn log = logging.getLogger(__name__) _TENSORBOARD_AVAILABLE = RequirementCache("tensorboard") -_TENSORBOARDX_AVAILABLE = RequirementCache("tensorboardX") -if TYPE_CHECKING: - # assumes at least one will be installed when type checking - if _TENSORBOARD_AVAILABLE: - from torch.utils.tensorboard import SummaryWriter - else: - from tensorboardX import SummaryWriter # type: ignore[no-redef] +if TYPE_CHECKING and _TENSORBOARD_AVAILABLE: + from torch.utils.tensorboard import SummaryWriter if _OMEGACONF_AVAILABLE: from omegaconf import Container, OmegaConf @@ -54,7 +51,7 @@ class TensorBoardLogger(Logger): r""" Log to local file system in `TensorBoard `_ format. - Implemented using :class:`~tensorboardX.SummaryWriter`. Logs are saved to + Implemented using :class:`torch.utils.tensorboard.SummaryWriter`. Logs are saved to ``os.path.join(save_dir, name, version)``. This is the default logger in Lightning, it comes preinstalled. @@ -85,7 +82,7 @@ class TensorBoardLogger(Logger): sub_dir: Sub-directory to group TensorBoard logs. If a sub_dir argument is passed then logs are saved in ``/save_dir/name/version/sub_dir/``. Defaults to ``None`` in which logs are saved in ``/save_dir/name/version/``. - \**kwargs: Additional arguments used by :class:`tensorboardX.SummaryWriter` can be passed as keyword + \**kwargs: Additional arguments used by :class:`torch.utils.tensorboard.SummaryWriter` can be passed as keyword arguments in this logger. To automatically flush to disk, `max_queue` sets the size of the queue for pending logs before flushing. `flush_secs` determines how many seconds elapses before flushing. @@ -114,10 +111,17 @@ def __init__( sub_dir: Optional[_PATH] = None, **kwargs: Any, ): - if not _TENSORBOARD_AVAILABLE and not _TENSORBOARDX_AVAILABLE: - raise ModuleNotFoundError( - "Neither `tensorboard` nor `tensorboardX` is available. Try `pip install`ing either." + if not _TENSORBOARD_AVAILABLE: + # TODO: replace this with `raise ModuleNotFoundError` in v2.0.0 + rank_zero_deprecation( + "`tensorboard` has been removed as a dependency of `pytorch_lightning` in v1.9.0. Until v2.0.0 we will" + f" try to install it for you automatically. This deprecation is caused by {_TENSORBOARD_AVAILABLE!s}" ) + subprocess.check_call([sys.executable, "-m", "pip", "install", _TENSORBOARD_AVAILABLE.requirement]) + delattr(_TENSORBOARD_AVAILABLE, "available") # force re-evaluation + if not _TENSORBOARD_AVAILABLE: + raise RuntimeError("Failed to install `tensorboard` automatically.") + super().__init__() save_dir = os.fspath(save_dir) self._save_dir = save_dir @@ -197,11 +201,7 @@ def experiment(self) -> "SummaryWriter": assert rank_zero_only.rank == 0, "tried to init log dirs in non global_rank=0" if self.root_dir: self._fs.makedirs(self.root_dir, exist_ok=True) - - if _TENSORBOARD_AVAILABLE: - from torch.utils.tensorboard import SummaryWriter - else: - from tensorboardX import SummaryWriter # type: ignore[no-redef] + from torch.utils.tensorboard import SummaryWriter self._experiment = SummaryWriter(log_dir=self.log_dir, **self._kwargs) return self._experiment @@ -239,11 +239,7 @@ def log_hyperparams( if metrics: self.log_metrics(metrics, 0) - - if _TENSORBOARD_AVAILABLE: - from torch.utils.tensorboard.summary import hparams - else: - from tensorboardX.summary import hparams # type: ignore[no-redef] + from torch.utils.tensorboard.summary import hparams exp, ssi, sei = hparams(params, metrics) writer = self.experiment._get_file_writer() diff --git a/src/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py b/src/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py index 779185190b2f3..d0472aa6c511b 100644 --- a/src/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py +++ b/src/pytorch_lightning/trainer/connectors/logger_connector/logger_connector.py @@ -22,6 +22,15 @@ from pytorch_lightning.loggers import Logger, TensorBoardLogger from pytorch_lightning.trainer.connectors.logger_connector.result import _METRICS, _OUT_DICT, _PBAR_DICT from pytorch_lightning.utilities.metrics import metrics_to_scalars +from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation + + +# TODO: remove in v2.0.0 +class _NoneSentinel: + """Used as a sentinel value for ``None`` in the depreaction for ``Trainer(logger=bool)``. + + Remove this class with the deprecation. + """ class LoggerConnector: @@ -37,7 +46,7 @@ def __init__(self, trainer: "pl.Trainer") -> None: def on_trainer_init( self, - logger: Union[bool, Logger, Iterable[Logger]], + logger: Optional[Union[Logger, Iterable[Logger], _NoneSentinel]], log_every_n_steps: int, move_metrics_to_cpu: bool, ) -> None: @@ -51,12 +60,22 @@ def should_update_logs(self) -> bool: should_log = (self.trainer.fit_loop.epoch_loop._batches_that_stepped + 1) % self.trainer.log_every_n_steps == 0 return should_log or self.trainer.should_stop - def configure_logger(self, logger: Union[bool, Logger, Iterable[Logger]]) -> None: - if not logger: - # logger is None or logger is False + def configure_logger(self, logger: Optional[Union[Logger, Iterable[Logger], _NoneSentinel]]) -> None: + if isinstance(logger, _NoneSentinel) or not logger: + if logger is False: + # TODO: remove in v2.0.0 + rank_zero_deprecation( + "`Trainer(logger=False)` has been deprecated in favor of `Trainer(logger=None)` in v1.9.0 and will" + " be removed in v2.0.0." + ) self.trainer.loggers = [] elif logger is True: - # default logger + # TODO: remove in v2.0.0 + rank_zero_deprecation( + "`Trainer(logger=True)` has been deprecated in favor of `Trainer(logger=TensorBoardLogger())` in v1.9.0" + " and will be removed in v2.0.0. Additionally, the `tensorboard` dependency will not be installed" + " when installing `pytorch_lightning`." + ) self.trainer.loggers = [ TensorBoardLogger(save_dir=self.trainer.default_root_dir, version=SLURMEnvironment.job_id()) ] diff --git a/src/pytorch_lightning/trainer/trainer.py b/src/pytorch_lightning/trainer/trainer.py index 3317814367dde..79d9360e0789d 100644 --- a/src/pytorch_lightning/trainer/trainer.py +++ b/src/pytorch_lightning/trainer/trainer.py @@ -78,7 +78,7 @@ from pytorch_lightning.trainer.connectors.callback_connector import CallbackConnector from pytorch_lightning.trainer.connectors.checkpoint_connector import CheckpointConnector from pytorch_lightning.trainer.connectors.data_connector import DataConnector -from pytorch_lightning.trainer.connectors.logger_connector import LoggerConnector +from pytorch_lightning.trainer.connectors.logger_connector.logger_connector import _NoneSentinel, LoggerConnector from pytorch_lightning.trainer.connectors.logger_connector.result import _OUT_DICT, _PBAR_DICT, _ResultCollection from pytorch_lightning.trainer.connectors.signal_connector import SignalConnector from pytorch_lightning.trainer.states import RunningStage, TrainerFn, TrainerState, TrainerStatus @@ -118,7 +118,8 @@ class Trainer: @_defaults_from_env_vars def __init__( self, - logger: Union[Logger, Iterable[Logger], bool] = True, + # TODO: Replace default for `None` in 2.0 + logger: Optional[Union[Logger, Iterable[Logger], _NoneSentinel]] = _NoneSentinel(), enable_checkpointing: bool = True, callbacks: Optional[Union[List[Callback], Callback]] = None, default_root_dir: Optional[_PATH] = None, @@ -276,11 +277,10 @@ def __init__( limit_predict_batches: How much of prediction dataset to check (float = fraction, int = num_batches). Default: ``1.0``. - logger: Logger (or iterable collection of loggers) for experiment tracking. A ``True`` value uses - the default ``TensorBoardLogger``. ``False`` will disable logging. If multiple loggers are - provided, local files (checkpoints, profiler traces, etc.) are saved in the ``log_dir`` of - the first logger. - Default: ``True``. + logger: Logger (or iterable collection of loggers) for experiment tracking. ``None`` will disable logging. + If multiple loggers are provided, local files (checkpoints, profiler traces, etc.) are saved in the + ``log_dir`` of the first logger. + Default: ``None``. log_every_n_steps: How often to log within steps. Default: ``50``. diff --git a/src/pytorch_lightning/utilities/argparse.py b/src/pytorch_lightning/utilities/argparse.py index 8b1872ee7b643..78ab54602d26e 100644 --- a/src/pytorch_lightning/utilities/argparse.py +++ b/src/pytorch_lightning/utilities/argparse.py @@ -51,7 +51,7 @@ def from_argparse_args( >>> parser = Trainer.add_argparse_args(parser) >>> parser.add_argument('--my_custom_arg', default='something') # doctest: +SKIP >>> args = Trainer.parse_argparser(parser.parse_args("")) - >>> trainer = Trainer.from_argparse_args(args, logger=False) + >>> trainer = Trainer.from_argparse_args(args) """ if isinstance(args, ArgumentParser): args = cls.parse_argparser(args) @@ -139,6 +139,12 @@ def get_init_arguments_and_types(cls: _ARGPARSE_CLS) -> List[Tuple[str, Tuple, A for arg in cls_default_params: arg_type = cls_default_params[arg].annotation arg_default = cls_default_params[arg].default + # TODO: remove override in v2.0.0 + if arg == "logger": + from pytorch_lightning.trainer.connectors.logger_connector.logger_connector import _NoneSentinel + + arg_type.__args__ = tuple(type_ for type_ in arg_type.__args__ if type_ is not _NoneSentinel) + arg_default = None try: arg_types = tuple(arg_type.__args__) except (AttributeError, TypeError): @@ -213,7 +219,7 @@ def add_argparse_args( ignore_arg_names = ["self", "args", "kwargs"] - allowed_types = (str, int, float, bool) + allowed_types = (str, int, float, bool, type(None)) # Get symbols from cls or init function. for symbol in (cls, cls.__init__): diff --git a/tests/tests_pytorch/benchmarks/test_basic_parity.py b/tests/tests_pytorch/benchmarks/test_basic_parity.py index 1e817af34d892..c87e953fc1c77 100644 --- a/tests/tests_pytorch/benchmarks/test_basic_parity.py +++ b/tests/tests_pytorch/benchmarks/test_basic_parity.py @@ -160,7 +160,6 @@ def lightning_loop(cls_model, idx, device_type: str = "cuda", num_epochs=10): enable_checkpointing=False, accelerator="gpu" if device_type == "cuda" else "cpu", devices=1, - logger=False, replace_sampler_ddp=False, benchmark=False, ) diff --git a/tests/tests_pytorch/callbacks/progress/test_rich_progress_bar.py b/tests/tests_pytorch/callbacks/progress/test_rich_progress_bar.py index b1a7082ef6448..94ef24103448c 100644 --- a/tests/tests_pytorch/callbacks/progress/test_rich_progress_bar.py +++ b/tests/tests_pytorch/callbacks/progress/test_rich_progress_bar.py @@ -190,7 +190,6 @@ def test_rich_progress_bar_leave(tmpdir, leave, reset_call_count): limit_val_batches=0, max_epochs=4, callbacks=progress_bar, - logger=False, enable_checkpointing=False, enable_model_summary=False, ) @@ -431,7 +430,6 @@ def test_rich_progress_bar_can_be_pickled(): limit_val_batches=1, limit_test_batches=1, limit_predict_batches=1, - logger=False, enable_model_summary=False, ) model = BoringModel() diff --git a/tests/tests_pytorch/callbacks/progress/test_tqdm_progress_bar.py b/tests/tests_pytorch/callbacks/progress/test_tqdm_progress_bar.py index bdd1c2002f1cb..8c3cba4439966 100644 --- a/tests/tests_pytorch/callbacks/progress/test_tqdm_progress_bar.py +++ b/tests/tests_pytorch/callbacks/progress/test_tqdm_progress_bar.py @@ -316,7 +316,6 @@ def on_validation_epoch_end(self, *args): limit_train_batches=1, limit_val_batches=limit_val_batches, callbacks=[pbar], - logger=False, enable_checkpointing=False, ) trainer.fit(model) @@ -372,7 +371,6 @@ def test_main_progress_bar_update_amount( limit_train_batches=train_batches, limit_val_batches=val_batches, callbacks=[progress_bar], - logger=False, enable_checkpointing=False, ) with mock.patch("pytorch_lightning.callbacks.progress.tqdm_progress.Tqdm", MockTqdm): @@ -393,7 +391,6 @@ def test_test_progress_bar_update_amount(tmpdir, test_batches: int, refresh_rate max_epochs=1, limit_test_batches=test_batches, callbacks=[progress_bar], - logger=False, enable_checkpointing=False, ) with mock.patch("pytorch_lightning.callbacks.progress.tqdm_progress.Tqdm", MockTqdm): @@ -411,9 +408,7 @@ def training_step(self, batch, batch_idx): self.log("c", {"c1": 2}, prog_bar=True, on_epoch=False) return super().training_step(batch, batch_idx) - trainer = Trainer( - default_root_dir=tmpdir, max_epochs=1, limit_train_batches=2, logger=False, enable_checkpointing=False - ) + trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, limit_train_batches=2, enable_checkpointing=False) trainer.fit(TestModel()) torch.testing.assert_close(trainer.progress_bar_metrics["a"], 0.123) @@ -547,7 +542,6 @@ def test_tqdm_progress_bar_can_be_pickled(): limit_val_batches=1, limit_test_batches=1, limit_predict_batches=1, - logger=False, enable_model_summary=False, ) model = BoringModel() diff --git a/tests/tests_pytorch/callbacks/test_device_stats_monitor.py b/tests/tests_pytorch/callbacks/test_device_stats_monitor.py index 826fa0f088f28..89d0f2cfdf540 100644 --- a/tests/tests_pytorch/callbacks/test_device_stats_monitor.py +++ b/tests/tests_pytorch/callbacks/test_device_stats_monitor.py @@ -137,7 +137,6 @@ def test_device_stats_monitor_no_logger(tmpdir): default_root_dir=tmpdir, callbacks=[device_stats], max_epochs=1, - logger=False, enable_checkpointing=False, enable_progress_bar=False, ) diff --git a/tests/tests_pytorch/callbacks/test_early_stopping.py b/tests/tests_pytorch/callbacks/test_early_stopping.py index 7663a53212427..edff71a7cddc0 100644 --- a/tests/tests_pytorch/callbacks/test_early_stopping.py +++ b/tests/tests_pytorch/callbacks/test_early_stopping.py @@ -308,7 +308,6 @@ def training_step(self, batch, batch_idx): limit_train_batches=limit_train_batches, min_epochs=min_epochs, min_steps=min_steps, - logger=False, enable_checkpointing=False, enable_progress_bar=False, enable_model_summary=False, diff --git a/tests/tests_pytorch/callbacks/test_lr_monitor.py b/tests/tests_pytorch/callbacks/test_lr_monitor.py index 6fa62fa91697d..3f54995342c38 100644 --- a/tests/tests_pytorch/callbacks/test_lr_monitor.py +++ b/tests/tests_pytorch/callbacks/test_lr_monitor.py @@ -207,7 +207,7 @@ def test_lr_monitor_no_logger(tmpdir): model = BoringModel() lr_monitor = LearningRateMonitor() - trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, callbacks=[lr_monitor], logger=False) + trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, callbacks=[lr_monitor]) with pytest.raises(MisconfigurationException, match="`Trainer` that has no logger"): trainer.fit(model) diff --git a/tests/tests_pytorch/callbacks/test_pruning.py b/tests/tests_pytorch/callbacks/test_pruning.py index ccadaca5f8c75..ead619e487cea 100644 --- a/tests/tests_pytorch/callbacks/test_pruning.py +++ b/tests/tests_pytorch/callbacks/test_pruning.py @@ -108,7 +108,6 @@ def train_with_pruning_callback( enable_progress_bar=False, enable_model_summary=False, enable_checkpointing=False, - logger=False, limit_train_batches=10, limit_val_batches=2, max_epochs=10, @@ -214,7 +213,6 @@ def apply_lottery_ticket_hypothesis(self): enable_progress_bar=False, enable_model_summary=False, enable_checkpointing=False, - logger=False, limit_train_batches=10, limit_val_batches=2, max_epochs=5, @@ -241,7 +239,6 @@ def test_multiple_pruning_callbacks(tmpdir, caplog, make_pruning_permanent: bool enable_progress_bar=False, enable_model_summary=False, enable_checkpointing=False, - logger=False, limit_train_batches=10, limit_val_batches=2, max_epochs=3, diff --git a/tests/tests_pytorch/checkpointing/test_model_checkpoint.py b/tests/tests_pytorch/checkpointing/test_model_checkpoint.py index e763bf22e018f..bc5f977741b91 100644 --- a/tests/tests_pytorch/checkpointing/test_model_checkpoint.py +++ b/tests/tests_pytorch/checkpointing/test_model_checkpoint.py @@ -467,7 +467,7 @@ def test_model_checkpoint_file_extension(tmpdir): model_checkpoint = ModelCheckpointExtensionTest( monitor="early_stop_on", dirpath=tmpdir, save_top_k=1, save_last=True ) - trainer = Trainer(default_root_dir=tmpdir, callbacks=[model_checkpoint], max_steps=1, logger=False) + trainer = Trainer(default_root_dir=tmpdir, callbacks=[model_checkpoint], max_steps=1) trainer.fit(model) expected = ["epoch=0-step=1.tpkc", "last.tpkc"] @@ -487,7 +487,6 @@ def test_model_checkpoint_save_last(tmpdir): max_epochs=epochs, limit_train_batches=10, limit_val_batches=10, - logger=False, ) trainer.fit(model) last_filename = model_checkpoint._format_checkpoint_name( @@ -575,7 +574,6 @@ def test_model_checkpoint_save_last_none_monitor(tmpdir, caplog): limit_train_batches=10, limit_val_batches=10, max_epochs=epochs, - logger=False, ) with caplog.at_level(INFO): @@ -609,7 +607,6 @@ def test_model_checkpoint_every_n_epochs(tmpdir, every_n_epochs): max_epochs=epochs, limit_train_batches=1, limit_val_batches=1, - logger=False, ) trainer.fit(model) @@ -638,7 +635,6 @@ def test_ckpt_every_n_train_steps(tmpdir): max_epochs=2, enable_progress_bar=False, callbacks=[checkpoint_callback], - logger=False, ) trainer.fit(model) @@ -673,7 +669,6 @@ def test_model_checkpoint_train_time_interval(mock_datetime, tmpdir) -> None: save_last=False, ) ], - logger=False, ) trainer.fit(model) @@ -687,7 +682,7 @@ def test_model_checkpoint_topk_zero(tmpdir): """Test that no checkpoints are saved when save_top_k=0.""" model = LogInTwoMethods() checkpoint_callback = ModelCheckpoint(dirpath=tmpdir, save_top_k=0, save_last=True) - trainer = Trainer(default_root_dir=tmpdir, callbacks=[checkpoint_callback], max_epochs=2, logger=False) + trainer = Trainer(default_root_dir=tmpdir, callbacks=[checkpoint_callback], max_epochs=2) trainer.fit(model) # these should not be set if monitor is None assert checkpoint_callback.monitor is None @@ -713,7 +708,6 @@ def test_model_checkpoint_topk_all(tmpdir): default_root_dir=tmpdir, callbacks=[checkpoint_callback], max_epochs=epochs, - logger=False, val_check_interval=1.0, ) trainer.fit(model) @@ -957,7 +951,7 @@ def assert_checkpoint_log_dir(idx): trainer.test(model) assert trainer.current_epoch == epochs - for idx in range(1, 5): + for idx in range(5): chk = get_last_checkpoint(ckpt_dir) assert_checkpoint_content(ckpt_dir) @@ -985,7 +979,6 @@ def assert_checkpoint_log_dir(idx): assert trainer.global_step == epochs * limit_train_batches assert trainer.current_epoch == epochs assert trainer.fit_loop.epoch_progress.current.processed == epochs - assert_checkpoint_log_dir(idx) def test_configure_model_checkpoint(tmpdir): @@ -1028,7 +1021,6 @@ def test_val_check_interval_checkpoint_files(tmpdir): max_epochs=1, limit_train_batches=10, callbacks=[model_checkpoint], - logger=False, enable_progress_bar=False, enable_model_summary=False, ) @@ -1052,7 +1044,6 @@ def training_step(self, *args): limit_train_batches=1, limit_val_batches=1, callbacks=[model_checkpoint], - logger=False, enable_progress_bar=False, enable_model_summary=False, ) @@ -1085,7 +1076,6 @@ def training_step(self, *args): limit_train_batches=1, limit_val_batches=1, callbacks=[model_checkpoint], - logger=False, enable_progress_bar=False, enable_model_summary=False, ) @@ -1109,7 +1099,6 @@ def __init__(self, hparams): limit_train_batches=1, limit_val_batches=1, callbacks=[model_checkpoint], - logger=False, enable_progress_bar=False, enable_model_summary=False, ) @@ -1137,7 +1126,6 @@ def test_ckpt_version_after_rerun_new_trainer(tmpdir): limit_val_batches=1, default_root_dir=tmpdir, callbacks=[mc], - logger=False, enable_progress_bar=False, enable_model_summary=False, ) @@ -1163,7 +1151,6 @@ def test_ckpt_version_after_rerun_same_trainer(tmpdir): limit_val_batches=1, default_root_dir=tmpdir, callbacks=[mc], - logger=False, enable_progress_bar=False, enable_model_summary=False, ) @@ -1196,7 +1183,6 @@ def test_check_val_every_n_epochs_top_k_integration(tmpdir): check_val_every_n_epoch=2, callbacks=mc, enable_model_summary=False, - logger=False, ) trainer.fit(model) assert set(os.listdir(tmpdir)) == {"epoch=1.ckpt", "epoch=3.ckpt"} @@ -1273,7 +1259,6 @@ def test_resume_training_preserves_old_ckpt_last(tmpdir): "limit_train_batches": 3, "limit_val_batches": 0, "enable_model_summary": False, - "logger": False, } mc_kwargs = { "filename": "{step}", @@ -1322,7 +1307,6 @@ def test_save_last_versioning(tmpdir): limit_val_batches=0, enable_progress_bar=False, enable_model_summary=False, - logger=False, ) trainer.fit(model) assert {"last.ckpt", "last-v1.ckpt"} == set(os.listdir(tmpdir)) @@ -1363,7 +1347,6 @@ def test_save_last_every_n_epochs_interaction(tmpdir, every_n_epochs): limit_val_batches=0, enable_progress_bar=False, enable_model_summary=False, - logger=False, ) model = BoringModel() with patch.object(trainer, "save_checkpoint") as save_mock: diff --git a/tests/tests_pytorch/checkpointing/test_torch_saving.py b/tests/tests_pytorch/checkpointing/test_torch_saving.py index 3439ccc91ff90..1ac71b66476a1 100644 --- a/tests/tests_pytorch/checkpointing/test_torch_saving.py +++ b/tests/tests_pytorch/checkpointing/test_torch_saving.py @@ -40,7 +40,7 @@ def test_model_torch_save_ddp_cpu(tmpdir): model = BoringModel() num_epochs = 1 trainer = Trainer( - default_root_dir=tmpdir, max_epochs=num_epochs, strategy="ddp_spawn", accelerator="cpu", devices=2, logger=False + default_root_dir=tmpdir, max_epochs=num_epochs, strategy="ddp_spawn", accelerator="cpu", devices=2 ) temp_path = os.path.join(tmpdir, "temp.pt") trainer.fit(model) diff --git a/tests/tests_pytorch/checkpointing/test_trainer_checkpoint.py b/tests/tests_pytorch/checkpointing/test_trainer_checkpoint.py index b6d90c4c84fa9..947ca73aa4ea3 100644 --- a/tests/tests_pytorch/checkpointing/test_trainer_checkpoint.py +++ b/tests/tests_pytorch/checkpointing/test_trainer_checkpoint.py @@ -50,7 +50,6 @@ def validation_step(self, batch, batch_idx): limit_val_batches=6, limit_test_batches=12, callbacks=[checkpoint_callback], - logger=False, ) trainer.fit(model) assert os.listdir(tmpdir) == ["epoch=00.ckpt"] diff --git a/tests/tests_pytorch/core/test_datamodules.py b/tests/tests_pytorch/core/test_datamodules.py index 53493ed8cc103..00b4c137dec49 100644 --- a/tests/tests_pytorch/core/test_datamodules.py +++ b/tests/tests_pytorch/core/test_datamodules.py @@ -484,7 +484,6 @@ def get_trainer(): profiler="simple", enable_model_summary=False, enable_progress_bar=False, - logger=False, ) return trainer diff --git a/tests/tests_pytorch/core/test_metric_result_integration.py b/tests/tests_pytorch/core/test_metric_result_integration.py index 03f630efac914..5755a4c72303f 100644 --- a/tests/tests_pytorch/core/test_metric_result_integration.py +++ b/tests/tests_pytorch/core/test_metric_result_integration.py @@ -647,7 +647,6 @@ def on_train_start(self): max_epochs=1, enable_progress_bar=False, enable_checkpointing=False, - logger=False, enable_model_summary=False, ) with pytest.raises(ValueError, match=r"compute\(\)` return of.*foo' must be a tensor"): diff --git a/tests/tests_pytorch/deprecated_api/test_remove_2-0.py b/tests/tests_pytorch/deprecated_api/test_remove_2-0.py index 0c0e1da19840d..03f826ec36e4a 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_2-0.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_2-0.py @@ -13,6 +13,7 @@ # limitations under the License. """Test deprecated functionality which will be removed in v2.0.0.""" from unittest import mock +from unittest.mock import ANY import pytest @@ -20,6 +21,7 @@ from pytorch_lightning import Callback, Trainer from pytorch_lightning.cli import LightningCLI from pytorch_lightning.demos.boring_classes import BoringModel +from pytorch_lightning.loggers import TensorBoardLogger from tests_pytorch.callbacks.test_callbacks import OldStatefulCallback from tests_pytorch.helpers.runif import RunIf @@ -98,7 +100,6 @@ def on_load_checkpoint(self, trainer, pl_module, callback_state): max_epochs=1, fast_dev_run=True, enable_progress_bar=False, - logger=False, default_root_dir=tmpdir, ) with pytest.raises( @@ -122,7 +123,6 @@ def on_save_checkpoint(self, trainer, pl_module, checkpoint): max_epochs=1, fast_dev_run=True, enable_progress_bar=False, - logger=False, default_root_dir=tmpdir, ) trainer.fit(model) @@ -178,7 +178,6 @@ def on_configure_sharded_model(self, trainer, model): max_epochs=1, fast_dev_run=True, enable_progress_bar=False, - logger=False, default_root_dir=tmpdir, ) with pytest.raises(RuntimeError, match="The `on_configure_sharded_model` callback hook was removed in v1.8."): @@ -251,7 +250,6 @@ def on_before_accelerator_backend_setup(self, *args, **kwargs): max_epochs=1, fast_dev_run=True, enable_progress_bar=False, - logger=False, default_root_dir=tmpdir, ) with pytest.raises( @@ -310,7 +308,6 @@ def test_v2_0_0_unsupported_on_init_start_end(callback_class, tmpdir): max_epochs=1, fast_dev_run=True, enable_progress_bar=False, - logger=False, default_root_dir=tmpdir, ) with pytest.raises( @@ -323,6 +320,24 @@ def test_v2_0_0_unsupported_on_init_start_end(callback_class, tmpdir): trainer.validate(model) +def test_v2_0_0_default_tensorboard(monkeypatch, tmp_path): + with pytest.deprecated_call(match=r"logger=False\)` has been deprecated"): + trainer = Trainer(logger=False) + assert trainer.logger is None + + with pytest.deprecated_call(match=r"logger=True\)` has been deprecated"): + trainer = Trainer(logger=True) + assert isinstance(trainer.logger, TensorBoardLogger) + + monkeypatch.setattr(pytorch_lightning.loggers.tensorboard._TENSORBOARD_AVAILABLE, "available", False) + + with pytest.deprecated_call(match=r"tensorboard` has been removed"), mock.patch( + "subprocess.check_call" + ) as install_mock: + TensorBoardLogger(tmp_path) + install_mock.assert_called_with([ANY, "-m", "pip", "install", "tensorboard>=2.9.1"]) + + @pytest.mark.parametrize( ["name", "value"], [("description", "description"), ("env_prefix", "PL"), ("env_parse", False)], diff --git a/tests/tests_pytorch/loggers/test_all.py b/tests/tests_pytorch/loggers/test_all.py index 4477b13b5b2a9..d4ae164477d64 100644 --- a/tests/tests_pytorch/loggers/test_all.py +++ b/tests/tests_pytorch/loggers/test_all.py @@ -301,10 +301,7 @@ def test_logger_with_prefix_all(tmpdir, monkeypatch): logger.experiment.__getitem__().log.assert_called_once_with(1.0) # TensorBoard - if _TENSORBOARD_AVAILABLE: - import torch.utils.tensorboard as tb - else: - import tensorboardX as tb + import torch.utils.tensorboard as tb monkeypatch.setattr(tb, "SummaryWriter", Mock()) logger = _instantiate_logger(TensorBoardLogger, save_dir=tmpdir, prefix=prefix) @@ -330,10 +327,7 @@ def test_logger_default_name(tmpdir, monkeypatch): assert logger.name == "lightning_logs" # TensorBoard - if _TENSORBOARD_AVAILABLE: - import torch.utils.tensorboard as tb - else: - import tensorboardX as tb + import torch.utils.tensorboard as tb monkeypatch.setattr(tb, "SummaryWriter", Mock()) logger = _instantiate_logger(TensorBoardLogger, save_dir=tmpdir) diff --git a/tests/tests_pytorch/loggers/test_tensorboard.py b/tests/tests_pytorch/loggers/test_tensorboard.py index 87264216f2b48..4aaa375e41268 100644 --- a/tests/tests_pytorch/loggers/test_tensorboard.py +++ b/tests/tests_pytorch/loggers/test_tensorboard.py @@ -281,10 +281,7 @@ def training_step(self, *args): def test_tensorboard_finalize(monkeypatch, tmpdir): """Test that the SummaryWriter closes in finalize.""" - if _TENSORBOARD_AVAILABLE: - import torch.utils.tensorboard as tb - else: - import tensorboardX as tb + import torch.utils.tensorboard as tb monkeypatch.setattr(tb, "SummaryWriter", Mock()) logger = TensorBoardLogger(save_dir=tmpdir) diff --git a/tests/tests_pytorch/loops/batch/test_truncated_bptt.py b/tests/tests_pytorch/loops/batch/test_truncated_bptt.py index a43d15909f9bc..260ec26a317ec 100644 --- a/tests/tests_pytorch/loops/batch/test_truncated_bptt.py +++ b/tests/tests_pytorch/loops/batch/test_truncated_bptt.py @@ -100,7 +100,6 @@ def on_train_batch_start(self, *_, **__) -> None: default_root_dir=tmpdir, max_epochs=2, enable_model_summary=False, - logger=False, enable_checkpointing=False, ) trainer.fit(model) @@ -141,7 +140,6 @@ def training_epoch_end(self, training_step_outputs): default_root_dir=tmpdir, max_epochs=1, enable_model_summary=False, - logger=False, enable_checkpointing=False, ) trainer.fit(model, train_dataloaders=train_dataloader) @@ -197,7 +195,6 @@ def configure_optimizers(self): limit_train_batches=1, limit_val_batches=0, enable_model_summary=False, - logger=False, enable_checkpointing=False, enable_progress_bar=False, ) diff --git a/tests/tests_pytorch/loops/optimization/test_optimizer_loop.py b/tests/tests_pytorch/loops/optimization/test_optimizer_loop.py index 9cd851de10b40..75f2bd99f3a07 100644 --- a/tests/tests_pytorch/loops/optimization/test_optimizer_loop.py +++ b/tests/tests_pytorch/loops/optimization/test_optimizer_loop.py @@ -188,7 +188,6 @@ def configure_optimizers(self): limit_train_batches=n_batches, limit_val_batches=0, num_sanity_val_steps=0, - logger=False, enable_checkpointing=False, ) trainer.fit(model) @@ -207,7 +206,6 @@ def configure_optimizers(self): limit_train_batches=n_batches, limit_val_batches=0, num_sanity_val_steps=0, - logger=False, enable_checkpointing=False, ) with pytest.raises(CustomException): @@ -227,7 +225,6 @@ def configure_optimizers(self): limit_train_batches=n_batches, limit_val_batches=0, num_sanity_val_steps=0, - logger=False, enable_checkpointing=False, ) trainer.fit(model, ckpt_path=str(tmpdir / ".pl_auto_save.ckpt")) diff --git a/tests/tests_pytorch/loops/test_evaluation_loop.py b/tests/tests_pytorch/loops/test_evaluation_loop.py index 80e31a2781d1b..5eb1b5c287b5b 100644 --- a/tests/tests_pytorch/loops/test_evaluation_loop.py +++ b/tests/tests_pytorch/loops/test_evaluation_loop.py @@ -61,7 +61,6 @@ def _get_dataloader(): max_epochs=2, enable_model_summary=False, enable_checkpointing=False, - logger=False, ) train_dataloader = _get_dataloader() @@ -95,7 +94,6 @@ def _get_dataloader(): max_epochs=2, enable_model_summary=False, enable_checkpointing=False, - logger=False, ) train_dataloader = _get_dataloader() diff --git a/tests/tests_pytorch/loops/test_loops.py b/tests/tests_pytorch/loops/test_loops.py index ce9b5ab704007..398dfdda5a5bc 100644 --- a/tests/tests_pytorch/loops/test_loops.py +++ b/tests/tests_pytorch/loops/test_loops.py @@ -424,7 +424,6 @@ def configure_optimizers_multiple(self): limit_val_batches=0, accumulate_grad_batches=accumulate_grad_batches, enable_progress_bar=False, - logger=False, enable_checkpointing=False, ) @@ -625,7 +624,6 @@ def train_dataloader(self): limit_val_batches=0, accumulate_grad_batches=accumulate_grad_batches, enable_progress_bar=False, - logger=False, ) trainer.fit(model) @@ -737,7 +735,6 @@ def test_fit_loop_reset(tmpdir): limit_train_batches=4, max_epochs=2, callbacks=[checkpoint_callback], - logger=False, enable_model_summary=False, ) trainer.fit(model) diff --git a/tests/tests_pytorch/loops/test_training_loop_flow_scalar.py b/tests/tests_pytorch/loops/test_training_loop_flow_scalar.py index 45b372d0020e6..771a091ecb7e8 100644 --- a/tests/tests_pytorch/loops/test_training_loop_flow_scalar.py +++ b/tests/tests_pytorch/loops/test_training_loop_flow_scalar.py @@ -288,7 +288,6 @@ def training_step(self, batch, batch_idx): limit_val_batches=1, max_epochs=4, enable_model_summary=False, - logger=False, enable_checkpointing=False, ) @@ -337,7 +336,6 @@ def on_train_batch_end(self, outputs, batch, batch_idx): limit_val_batches=1, max_epochs=4, enable_model_summary=False, - logger=False, enable_checkpointing=False, ) diff --git a/tests/tests_pytorch/models/test_amp.py b/tests/tests_pytorch/models/test_amp.py index f769a904b7a31..2bba177cf511a 100644 --- a/tests/tests_pytorch/models/test_amp.py +++ b/tests/tests_pytorch/models/test_amp.py @@ -90,7 +90,6 @@ def test_amp_cpus(tmpdir, strategy, precision, devices): limit_val_batches=1, limit_test_batches=1, limit_predict_batches=1, - logger=False, enable_checkpointing=False, enable_model_summary=False, enable_progress_bar=False, diff --git a/tests/tests_pytorch/models/test_horovod.py b/tests/tests_pytorch/models/test_horovod.py index 3d223ef93a154..a21a4b91a3f0d 100644 --- a/tests/tests_pytorch/models/test_horovod.py +++ b/tests/tests_pytorch/models/test_horovod.py @@ -357,7 +357,6 @@ def training_epoch_end(self, outputs) -> None: max_epochs=1, log_every_n_steps=1, enable_model_summary=False, - logger=False, ) trainer.fit(model) @@ -384,7 +383,7 @@ def sk_metric(preds, target): target = torch.randint(high=2, size=(num_batches, batch_size)) def _compute_batch(): - trainer = Trainer(fast_dev_run=True, strategy="horovod", logger=False) + trainer = Trainer(fast_dev_run=True, strategy="horovod") assert isinstance(trainer.accelerator, CPUAccelerator) # TODO: test that we selected the correct strategy based on horovod flags diff --git a/tests/tests_pytorch/models/test_hparams.py b/tests/tests_pytorch/models/test_hparams.py index fe85e9abade56..a980449abbbf2 100644 --- a/tests/tests_pytorch/models/test_hparams.py +++ b/tests/tests_pytorch/models/test_hparams.py @@ -700,7 +700,6 @@ def __init__(self, args_0, args_1, args_2, kwarg_1=None): limit_train_batches=10, limit_val_batches=10, max_epochs=epochs, - logger=False, ) trainer.fit(model) _ = TestHydraModel.load_from_checkpoint(checkpoint_callback.best_model_path) diff --git a/tests/tests_pytorch/models/test_restore.py b/tests/tests_pytorch/models/test_restore.py index 3748264d42e9e..e5d58cf8ac7eb 100644 --- a/tests/tests_pytorch/models/test_restore.py +++ b/tests/tests_pytorch/models/test_restore.py @@ -113,7 +113,6 @@ def test_model_properties_fit_ckpt_path(tmpdir): max_epochs=1, limit_train_batches=2, limit_val_batches=2, - logger=False, callbacks=[checkpoint_callback, ModelTrainerPropertyParity()], # this performs the assertions ) trainer = Trainer(**trainer_args) @@ -145,7 +144,6 @@ def configure_optimizers(self): limit_val_batches=2, limit_test_batches=2, limit_predict_batches=2, - logger=False, callbacks=[checkpoint_callback], num_sanity_val_steps=0, ) @@ -266,7 +264,6 @@ def on_train_epoch_end(self, *_): limit_train_batches=1, limit_val_batches=1, default_root_dir=tmpdir, - logger=False, enable_checkpointing=False, enable_model_summary=False, enable_progress_bar=False, @@ -307,7 +304,6 @@ def get_trainer_args(): limit_train_batches=1, limit_val_batches=2, max_epochs=1, - logger=False, callbacks=[checkpoint, callback_capture], ) assert checkpoint.best_model_path == "" @@ -347,7 +343,6 @@ def test_callbacks_references_fit_ckpt_path(tmpdir): args = { "default_root_dir": tmpdir, "max_steps": 1, - "logger": False, "limit_val_batches": 2, "num_sanity_val_steps": 0, } diff --git a/tests/tests_pytorch/plugins/precision/test_native_amp_integration.py b/tests/tests_pytorch/plugins/precision/test_native_amp_integration.py index 2afb772f28a3e..554925ab6fbc9 100644 --- a/tests/tests_pytorch/plugins/precision/test_native_amp_integration.py +++ b/tests/tests_pytorch/plugins/precision/test_native_amp_integration.py @@ -40,7 +40,6 @@ def run(fused=False): devices=1, precision=16, max_steps=5, - logger=False, enable_checkpointing=False, enable_progress_bar=False, enable_model_summary=False, diff --git a/tests/tests_pytorch/profilers/test_profiler.py b/tests/tests_pytorch/profilers/test_profiler.py index b2387f12a63f2..a464e551a9bda 100644 --- a/tests/tests_pytorch/profilers/test_profiler.py +++ b/tests/tests_pytorch/profilers/test_profiler.py @@ -163,7 +163,6 @@ def test_simple_profiler_distributed_files(tmpdir): accelerator="cpu", devices=2, profiler=profiler, - logger=False, ) trainer.fit(model) trainer.validate(model) @@ -180,7 +179,7 @@ def test_simple_profiler_distributed_files(tmpdir): def test_simple_profiler_logs(tmpdir, caplog, simple_profiler): """Ensure that the number of printed logs is correct.""" model = BoringModel() - trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=2, profiler=simple_profiler, logger=False) + trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=2, profiler=simple_profiler) with caplog.at_level(logging.INFO, logger="pytorch_lightning.profiler"): trainer.fit(model) trainer.test(model) diff --git a/tests/tests_pytorch/strategies/test_bagua_strategy.py b/tests/tests_pytorch/strategies/test_bagua_strategy.py index 27a965d4b1a05..80f31c2d0f9a4 100644 --- a/tests/tests_pytorch/strategies/test_bagua_strategy.py +++ b/tests/tests_pytorch/strategies/test_bagua_strategy.py @@ -56,7 +56,6 @@ def test_manual_optimization(tmpdir): strategy="bagua", accelerator="gpu", devices=1, - logger=False, enable_checkpointing=False, enable_model_summary=False, enable_progress_bar=False, diff --git a/tests/tests_pytorch/strategies/test_ddp.py b/tests/tests_pytorch/strategies/test_ddp.py index d95c76e20d4a5..ee5e1008dbed6 100644 --- a/tests/tests_pytorch/strategies/test_ddp.py +++ b/tests/tests_pytorch/strategies/test_ddp.py @@ -66,7 +66,6 @@ def test_torch_distributed_backend_invalid(cuda_count_2, tmpdir): strategy=DDPStrategy(process_group_backend="undefined"), accelerator="cuda", devices=2, - logger=False, ) with pytest.raises(ValueError, match="Invalid backend: 'undefined'"): trainer.fit(model) diff --git a/tests/tests_pytorch/test_cli.py b/tests/tests_pytorch/test_cli.py index f80d32f4fbc64..eaffcfb8f227b 100644 --- a/tests/tests_pytorch/test_cli.py +++ b/tests/tests_pytorch/test_cli.py @@ -231,7 +231,6 @@ def test_lightning_cli_args(cleandir): "--trainer.limit_train_batches=1", "--trainer.limit_val_batches=0", "--trainer.enable_model_summary=False", - "--trainer.logger=False", "--seed_everything=1234", ] @@ -265,20 +264,20 @@ def test_lightning_env_parse(cleandir): "PL_FIT__DATA__DATA_DIR": ".", "PL_FIT__TRAINER__DEFAULT_ROOT_DIR": ".", "PL_FIT__TRAINER__MAX_EPOCHS": "1", - "PL_FIT__TRAINER__LOGGER": "False", + "PL_FIT__TRAINER__LOGGER": "null", } with mock.patch.dict(os.environ, env_vars), mock.patch("sys.argv", ["", "fit"]): cli = LightningCLI(BoringModel, DataDirDataModule, parser_kwargs={"default_env": True}) assert cli.config.fit.data.data_dir == "." assert cli.config.fit.trainer.default_root_dir == "." assert cli.config.fit.trainer.max_epochs == 1 - assert cli.config.fit.trainer.logger is False + assert cli.config.fit.trainer.logger is None def test_lightning_cli_save_config_cases(cleandir): config_path = "config.yaml" - cli_args = ["fit", "--trainer.logger=false", "--trainer.fast_dev_run=1"] + cli_args = ["fit", "--trainer.logger=null", "--trainer.fast_dev_run=1"] # With fast_dev_run!=False config should not be saved with mock.patch("sys.argv", ["any.py"] + cli_args): @@ -298,7 +297,7 @@ def test_lightning_cli_save_config_cases(cleandir): def test_lightning_cli_save_config_only_once(cleandir): config_path = "config.yaml" - cli_args = ["--trainer.logger=false", "--trainer.max_epochs=1"] + cli_args = ["--trainer.logger=null", "--trainer.max_epochs=1"] with mock.patch("sys.argv", ["any.py"] + cli_args): cli = LightningCLI(BoringModel, run=False) @@ -320,7 +319,7 @@ def test_lightning_cli_config_and_subclass_mode(cleandir): "class_path": "DataDirDataModule", "init_args": {"data_dir": "."}, }, - "trainer": {"max_epochs": 1, "enable_model_summary": False, "logger": False}, + "trainer": {"max_epochs": 1, "enable_model_summary": False}, } } config_path = "config.yaml" @@ -546,7 +545,7 @@ def test_cli_distributed_save_config_callback(cleandir, logger, strategy): def test_cli_config_overwrite(cleandir): - trainer_defaults = {"max_steps": 1, "max_epochs": 1, "logger": False} + trainer_defaults = {"max_steps": 1, "max_epochs": 1} argv = ["any.py", "fit"] with mock.patch("sys.argv", argv): @@ -1302,14 +1301,14 @@ def test_ddpstrategy_instantiation_and_find_unused_parameters(): def test_cli_logger_shorthand(): with mock.patch("sys.argv", ["any.py"]): - cli = LightningCLI(TestModel, run=False, trainer_defaults={"logger": False}) + cli = LightningCLI(TestModel, run=False, trainer_defaults={"logger": None}) assert cli.trainer.logger is None with mock.patch("sys.argv", ["any.py", "--trainer.logger=TensorBoardLogger", "--trainer.logger.save_dir=foo"]): - cli = LightningCLI(TestModel, run=False, trainer_defaults={"logger": False}) + cli = LightningCLI(TestModel, run=False, trainer_defaults={"logger": None}) assert isinstance(cli.trainer.logger, TensorBoardLogger) - with mock.patch("sys.argv", ["any.py", "--trainer.logger=False"]): + with mock.patch("sys.argv", ["any.py", "--trainer.logger=null"]): cli = LightningCLI(TestModel, run=False) assert cli.trainer.logger is None @@ -1482,16 +1481,16 @@ def test_pytorch_profiler_init_args(): @pytest.mark.parametrize( ["args"], [ - (["--trainer.logger=False", "--model.foo=456"],), - ({"trainer": {"logger": False}, "model": {"foo": 456}},), - (Namespace(trainer=Namespace(logger=False), model=Namespace(foo=456)),), + (["--trainer.logger=null", "--model.foo=456"],), + ({"trainer": {"logger": None}, "model": {"foo": 456}},), + (Namespace(trainer=Namespace(logger=None), model=Namespace(foo=456)),), ], ) def test_lightning_cli_with_args_given(args): with mock.patch("sys.argv", [""]): cli = LightningCLI(TestModel, run=False, args=args) assert isinstance(cli.model, TestModel) - assert cli.config.trainer.logger is False + assert cli.config.trainer.logger is None assert cli.model.foo == 456 diff --git a/tests/tests_pytorch/trainer/connectors/test_checkpoint_connector.py b/tests/tests_pytorch/trainer/connectors/test_checkpoint_connector.py index 9d69ad1bd3c82..5d28e9df43031 100644 --- a/tests/tests_pytorch/trainer/connectors/test_checkpoint_connector.py +++ b/tests/tests_pytorch/trainer/connectors/test_checkpoint_connector.py @@ -58,7 +58,7 @@ def test_preloaded_checkpoint_lifecycle(tmpdir): def test_hpc_restore_attempt(_, tmpdir): """Test that restore() attempts to restore the hpc_ckpt with highest priority.""" model = BoringModel() - trainer = Trainer(default_root_dir=tmpdir, max_steps=1, enable_checkpointing=False, logger=False) + trainer = Trainer(default_root_dir=tmpdir, max_steps=1, enable_checkpointing=False) trainer.fit(model) hpc_ckpt_path = tmpdir / "hpc_ckpt_3.ckpt" @@ -70,7 +70,7 @@ def test_hpc_restore_attempt(_, tmpdir): torch.nn.init.constant_(param, 0) # case 1: restore hpc first, no explicit resume path provided - trainer = Trainer(default_root_dir=tmpdir, max_steps=2, enable_checkpointing=False, logger=False) + trainer = Trainer(default_root_dir=tmpdir, max_steps=2, enable_checkpointing=False) trainer.fit(model) for param in model.parameters(): @@ -135,7 +135,6 @@ def test_loops_restore(tmpdir): max_epochs=1, limit_train_batches=1, limit_val_batches=1, - logger=False, callbacks=[checkpoint_callback], num_sanity_val_steps=0, ) diff --git a/tests/tests_pytorch/trainer/flags/test_check_val_every_n_epoch.py b/tests/tests_pytorch/trainer/flags/test_check_val_every_n_epoch.py index dbb2a3aa81d20..2da83a4364c73 100644 --- a/tests/tests_pytorch/trainer/flags/test_check_val_every_n_epoch.py +++ b/tests/tests_pytorch/trainer/flags/test_check_val_every_n_epoch.py @@ -40,7 +40,6 @@ def on_validation_epoch_start(self) -> None: num_sanity_val_steps=0, limit_val_batches=2, check_val_every_n_epoch=2, - logger=False, ) trainer.fit(model) assert trainer.state.finished, f"Training failed with {trainer.state}" diff --git a/tests/tests_pytorch/trainer/flags/test_env_vars.py b/tests/tests_pytorch/trainer/flags/test_env_vars.py index 606fdc89467ce..7034df825b028 100644 --- a/tests/tests_pytorch/trainer/flags/test_env_vars.py +++ b/tests/tests_pytorch/trainer/flags/test_env_vars.py @@ -16,35 +16,37 @@ from pytorch_lightning import Trainer from pytorch_lightning.demos.boring_classes import BoringModel +from pytorch_lightning.loggers import CSVLogger +from pytorch_lightning.strategies import DDPStrategy -def test_passing_no_env_variables(): +def test_passing_no_env_variables(tmp_path): """Testing overwriting trainer arguments.""" trainer = Trainer() model = BoringModel() - assert trainer.logger is not None + assert trainer.logger is None assert trainer.max_steps == -1 assert trainer.max_epochs is None - trainer = Trainer(logger=False, max_steps=1) + trainer = Trainer(logger=CSVLogger(tmp_path), max_steps=1) trainer.fit(model) - assert trainer.logger is None + assert isinstance(trainer.logger, CSVLogger) assert trainer.max_steps == 1 assert trainer.max_epochs == -1 -@mock.patch.dict(os.environ, {"PL_TRAINER_LOGGER": "False", "PL_TRAINER_MAX_STEPS": "7"}) +@mock.patch.dict(os.environ, {"PL_TRAINER_STRATEGY": "ddp", "PL_TRAINER_MAX_STEPS": "7"}) def test_passing_env_variables_only(): """Testing overwriting trainer arguments.""" trainer = Trainer() - assert trainer.logger is None + assert isinstance(trainer.strategy, DDPStrategy) assert trainer.max_steps == 7 -@mock.patch.dict(os.environ, {"PL_TRAINER_LOGGER": "True", "PL_TRAINER_MAX_STEPS": "7"}) +@mock.patch.dict(os.environ, {"PL_TRAINER_STRATEGY": "ddp", "PL_TRAINER_MAX_STEPS": "7"}) def test_passing_env_variables_defaults(): """Testing overwriting trainer arguments.""" - trainer = Trainer(False, max_steps=42) - assert trainer.logger is None + trainer = Trainer(strategy="dp", max_steps=42) + assert isinstance(trainer.strategy, DDPStrategy) assert trainer.max_steps == 42 diff --git a/tests/tests_pytorch/trainer/flags/test_inference_mode.py b/tests/tests_pytorch/trainer/flags/test_inference_mode.py index 3ac65348c317c..b19d199fd1b9c 100644 --- a/tests/tests_pytorch/trainer/flags/test_inference_mode.py +++ b/tests/tests_pytorch/trainer/flags/test_inference_mode.py @@ -33,7 +33,7 @@ def on_test_epoch_start(self) -> None: assert torch.is_inference_mode_enabled() return super().on_test_epoch_start() - trainer = Trainer(logger=False, inference_mode=False, fast_dev_run=True) + trainer = Trainer(inference_mode=False, fast_dev_run=True) trainer.test(BoringModelNoGrad()) - trainer = Trainer(logger=False, inference_mode=True, fast_dev_run=True) + trainer = Trainer(inference_mode=True, fast_dev_run=True) trainer.test(BoringModelForInferenceMode()) diff --git a/tests/tests_pytorch/trainer/flags/test_val_check_interval.py b/tests/tests_pytorch/trainer/flags/test_val_check_interval.py index e5fd9b5dd2706..f6ca40959df3f 100644 --- a/tests/tests_pytorch/trainer/flags/test_val_check_interval.py +++ b/tests/tests_pytorch/trainer/flags/test_val_check_interval.py @@ -38,7 +38,7 @@ def on_validation_epoch_start(self) -> None: self.val_epoch_calls += 1 model = TestModel() - trainer = Trainer(max_epochs=max_epochs, val_check_interval=1 / denominator, logger=False) + trainer = Trainer(max_epochs=max_epochs, val_check_interval=1 / denominator) trainer.fit(model) assert model.train_epoch_calls == max_epochs diff --git a/tests/tests_pytorch/trainer/logging_/test_logger_connector.py b/tests/tests_pytorch/trainer/logging_/test_logger_connector.py index 1ffe7ffe9defb..b97eb5022ac17 100644 --- a/tests/tests_pytorch/trainer/logging_/test_logger_connector.py +++ b/tests/tests_pytorch/trainer/logging_/test_logger_connector.py @@ -614,9 +614,7 @@ def training_step(self, batch, batch_idx): return super().training_step(batch, batch_idx) model = TestModel() - trainer_kwargs = dict( - default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=0, max_epochs=1, logger=False - ) + trainer_kwargs = dict(default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=0, max_epochs=1) if logger: trainer_kwargs["logger"] = CSVLogger(tmpdir) trainer = Trainer(**trainer_kwargs) diff --git a/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py b/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py index c2fd64271ede5..f3c4dcc699ddf 100644 --- a/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py +++ b/tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py @@ -29,6 +29,8 @@ from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint, TQDMProgressBar from pytorch_lightning.core.module import LightningModule from pytorch_lightning.demos.boring_classes import BoringModel, RandomDataset, RandomDictDataset + + from pytorch_lightning.loggers.tensorboard import TensorBoardLogger from pytorch_lightning.trainer.states import RunningStage from pytorch_lightning.utilities.exceptions import MisconfigurationException @@ -504,7 +506,6 @@ def on_train_end(self, trainer: Trainer, model: LightningModule): limit_train_batches=1, limit_val_batches=0, enable_checkpointing=False, - logger=False, enable_model_summary=False, ) model = TestModel() @@ -638,7 +639,6 @@ def training_step(self, batch, batch_idx): max_epochs=1, enable_progress_bar=False, enable_checkpointing=False, - logger=False, enable_model_summary=False, ) model = TestModel() diff --git a/tests/tests_pytorch/trainer/optimization/test_optimizers.py b/tests/tests_pytorch/trainer/optimization/test_optimizers.py index 52fb6ba5028ae..46c8361b88be3 100644 --- a/tests/tests_pytorch/trainer/optimization/test_optimizers.py +++ b/tests/tests_pytorch/trainer/optimization/test_optimizers.py @@ -629,7 +629,6 @@ def test_lr_scheduler_state_updated_before_saving(tmpdir, every_n_train_steps, e trainer = Trainer( default_root_dir=tmpdir, enable_progress_bar=False, - logger=False, max_epochs=max_epochs, limit_train_batches=batches, limit_val_batches=1, @@ -665,7 +664,6 @@ def test_plateau_scheduler_lr_step_interval_updated_after_saving(tmpdir, save_on trainer = Trainer( default_root_dir=tmpdir, enable_progress_bar=False, - logger=False, max_epochs=1, limit_train_batches=batches, limit_val_batches=1, @@ -749,7 +747,6 @@ def configure_optimizers(self): trainer = Trainer( default_root_dir=tmpdir, enable_checkpointing=False, - logger=False, max_epochs=max_epochs, limit_train_batches=limit_train_batches, limit_val_batches=0, diff --git a/tests/tests_pytorch/trainer/properties/test_log_dir.py b/tests/tests_pytorch/trainer/properties/test_log_dir.py index 9e8453b9e7032..304f43347e91d 100644 --- a/tests/tests_pytorch/trainer/properties/test_log_dir.py +++ b/tests/tests_pytorch/trainer/properties/test_log_dir.py @@ -35,7 +35,12 @@ def test_logdir(tmpdir): model = TestModel(expected) - trainer = Trainer(default_root_dir=tmpdir, max_steps=2, callbacks=[ModelCheckpoint(dirpath=tmpdir)]) + trainer = Trainer( + default_root_dir=tmpdir, + max_steps=2, + callbacks=ModelCheckpoint(dirpath=tmpdir), + logger=TensorBoardLogger(tmpdir), + ) assert trainer.log_dir == expected trainer.fit(model) @@ -47,7 +52,9 @@ def test_logdir_no_checkpoint_cb(tmpdir): expected = os.path.join(tmpdir, "lightning_logs", "version_0") model = TestModel(expected) - trainer = Trainer(default_root_dir=tmpdir, max_steps=2, enable_checkpointing=False) + trainer = Trainer( + default_root_dir=tmpdir, max_steps=2, enable_checkpointing=False, logger=TensorBoardLogger(tmpdir) + ) assert trainer.log_dir == expected trainer.fit(model) @@ -59,7 +66,7 @@ def test_logdir_no_logger(tmpdir): expected = os.path.join(tmpdir) model = TestModel(expected) - trainer = Trainer(default_root_dir=tmpdir, max_steps=2, logger=False, callbacks=[ModelCheckpoint(dirpath=tmpdir)]) + trainer = Trainer(default_root_dir=tmpdir, max_steps=2, callbacks=[ModelCheckpoint(dirpath=tmpdir)]) assert trainer.log_dir == expected trainer.fit(model) @@ -71,7 +78,7 @@ def test_logdir_no_logger_no_checkpoint(tmpdir): expected = os.path.join(tmpdir) model = TestModel(expected) - trainer = Trainer(default_root_dir=tmpdir, max_steps=2, logger=False, enable_checkpointing=False) + trainer = Trainer(default_root_dir=tmpdir, max_steps=2, enable_checkpointing=False) assert trainer.log_dir == expected trainer.fit(model) @@ -84,7 +91,10 @@ def test_logdir_custom_callback(tmpdir): model = TestModel(expected) trainer = Trainer( - default_root_dir=tmpdir, max_steps=2, callbacks=[ModelCheckpoint(dirpath=os.path.join(tmpdir, "ckpts"))] + default_root_dir=tmpdir, + max_steps=2, + callbacks=[ModelCheckpoint(dirpath=os.path.join(tmpdir, "ckpts"))], + logger=TensorBoardLogger(tmpdir), ) assert trainer.log_dir == expected diff --git a/tests/tests_pytorch/trainer/properties/test_loggers.py b/tests/tests_pytorch/trainer/properties/test_loggers.py index e0e1057b77eb7..205477bd2e4a9 100644 --- a/tests/tests_pytorch/trainer/properties/test_loggers.py +++ b/tests/tests_pytorch/trainer/properties/test_loggers.py @@ -19,7 +19,7 @@ from tests_pytorch.loggers.test_logger import CustomLogger -def test_trainer_loggers_property(): +def test_trainer_loggers_property(tmp_path): """Test for correct initialization of loggers in Trainer.""" logger1 = CustomLogger() logger2 = CustomLogger() @@ -36,18 +36,18 @@ def test_trainer_loggers_property(): assert trainer.loggers == [logger1] # trainer.loggers should be a list of size 1 holding the default logger - trainer = Trainer(logger=True) + trainer = Trainer(logger=TensorBoardLogger(tmp_path)) assert trainer.loggers == [trainer.logger] assert isinstance(trainer.logger, TensorBoardLogger) -def test_trainer_loggers_setters(): +def test_trainer_loggers_setters(tmp_path): """Test the behavior of setters for trainer.logger and trainer.loggers.""" logger1 = CustomLogger() logger2 = CustomLogger() - trainer = Trainer() + trainer = Trainer(logger=TensorBoardLogger(tmp_path)) assert type(trainer.logger) == TensorBoardLogger assert trainer.loggers == [trainer.logger] @@ -87,11 +87,11 @@ def test_trainer_loggers_setters(): ) def test_no_logger(tmpdir, logger_value): """Test the cases where logger=None, logger=False, logger=[] are passed to Trainer.""" - trainer = Trainer( - logger=logger_value, - default_root_dir=tmpdir, - max_steps=1, - ) + if logger_value is False: + with pytest.deprecated_call(match=r"logger=False\)` has been deprecated"): + trainer = Trainer(default_root_dir=tmpdir, logger=logger_value) + else: + trainer = Trainer(default_root_dir=tmpdir, logger=logger_value) assert trainer.logger is None assert trainer.loggers == [] assert trainer.log_dir == tmpdir diff --git a/tests/tests_pytorch/trainer/test_trainer.py b/tests/tests_pytorch/trainer/test_trainer.py index 65c98323e1356..926f059172fc3 100644 --- a/tests/tests_pytorch/trainer/test_trainer.py +++ b/tests/tests_pytorch/trainer/test_trainer.py @@ -454,7 +454,6 @@ def on_load_checkpoint(self, _): default_root_dir=tmpdir, val_check_interval=1.0, enable_progress_bar=False, - logger=False, enable_model_summary=False, ) trainer.fit(model) @@ -491,7 +490,6 @@ def test_trainer_max_steps_and_epochs(tmpdir): "default_root_dir": tmpdir, "max_epochs": 3, "max_steps": num_train_samples + 10, - "logger": False, "enable_model_summary": False, "enable_progress_bar": False, } @@ -586,7 +584,6 @@ def training_step(self, *args, **kwargs): "max_epochs": 7, # define less min steps than 1 epoch "min_steps": num_train_samples // 2, - "logger": False, "enable_model_summary": False, "enable_progress_bar": False, } @@ -655,7 +652,6 @@ def test_trainer_max_steps_accumulate_batches(tmpdir): default_root_dir=tmpdir, max_steps=num_train_samples + 10, accumulate_grad_batches=10, - logger=False, enable_progress_bar=False, enable_model_summary=False, ) @@ -738,7 +734,6 @@ def test_checkpoint_path_input_last(tmpdir, ckpt_path, save_last, fn): limit_val_batches=1, enable_model_summary=False, enable_progress_bar=False, - logger=False, default_root_dir=tmpdir, callbacks=[mc], ) @@ -782,7 +777,6 @@ def test_checkpoint_find_last(tmpdir): limit_val_batches=0, enable_model_summary=False, enable_progress_bar=False, - logger=False, default_root_dir=tmpdir, callbacks=[mc], ) @@ -797,7 +791,6 @@ def test_checkpoint_find_last(tmpdir): limit_val_batches=0, enable_model_summary=False, enable_progress_bar=False, - logger=False, default_root_dir=tmpdir, callbacks=[mc], ) @@ -866,11 +859,7 @@ def predict_step(self, batch, *_): with pytest.raises(FileNotFoundError): trainer_fn(ckpt_path="random.ckpt") else: - ckpt_path = str( - list((Path(tmpdir) / f"lightning_logs/version_{trainer.logger.version}/checkpoints").iterdir())[ - 0 - ].absolute() - ) + ckpt_path = str(list((Path(tmpdir) / "checkpoints").iterdir())[0].absolute()) trainer_fn(ckpt_path=ckpt_path) assert trainer.ckpt_path == ckpt_path @@ -1086,7 +1075,6 @@ def on_exception(self, trainer, pl_module, exception): limit_val_batches=0.1, limit_train_batches=0.2, enable_progress_bar=False, - logger=False, default_root_dir=tmpdir, ) assert not trainer.interrupted @@ -1793,7 +1781,7 @@ def test_on_load_checkpoint_missing_callbacks(tmpdir): def test_module_current_fx_attributes_reset(tmpdir): """Ensure that lightning module's attributes related to current fx are reset at the end of execution.""" model = BoringModel() - trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=1, enable_checkpointing=False, logger=False) + trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=1, enable_checkpointing=False) trainer.fit(model) assert model._current_fx_name is None diff --git a/tests/tests_pytorch/trainer/test_trainer_cli.py b/tests/tests_pytorch/trainer/test_trainer_cli.py index c4b39977a35b0..8147df6c288ce 100644 --- a/tests/tests_pytorch/trainer/test_trainer_cli.py +++ b/tests/tests_pytorch/trainer/test_trainer_cli.py @@ -20,6 +20,7 @@ import tests_pytorch.helpers.utils as tutils from pytorch_lightning import Trainer +from pytorch_lightning.loggers import CSVLogger from pytorch_lightning.utilities import argparse @@ -179,9 +180,9 @@ def test_argparse_args_parsing_devices(cli_args, expected_parsed, cuda_count_1): ["cli_args", "extra_args"], [ ({}, {}), - ({"logger": False}, {}), - ({"logger": False}, {"logger": True}), - ({"logger": False}, {"enable_checkpointing": True}), + ({"logger": None}, {}), + ({"logger": None}, {"logger": CSVLogger(".")}), + ({"logger": None}, {"enable_checkpointing": True}), ], ) def test_init_from_argparse_args(cli_args, extra_args): diff --git a/tests/tests_pytorch/utilities/test_fetching.py b/tests/tests_pytorch/utilities/test_fetching.py index f1ecabdbdd55a..f0d96ef2044b1 100644 --- a/tests/tests_pytorch/utilities/test_fetching.py +++ b/tests/tests_pytorch/utilities/test_fetching.py @@ -533,7 +533,6 @@ def val_dataloader(self): enable_model_summary=False, enable_checkpointing=False, enable_progress_bar=False, - logger=False, ) trainer.fit(model) trainer.test(model) @@ -583,7 +582,6 @@ def training_step(self, dataloader_iter): enable_model_summary=False, enable_checkpointing=False, enable_progress_bar=False, - logger=False, ) trainer.fit(model) diff --git a/tests/tests_pytorch/utilities/test_logger.py b/tests/tests_pytorch/utilities/test_logger.py index d041556df272b..3b25963900844 100644 --- a/tests/tests_pytorch/utilities/test_logger.py +++ b/tests/tests_pytorch/utilities/test_logger.py @@ -88,7 +88,7 @@ def test_flatten_dict(): params = _flatten_dict(wrapping_dict) assert type(params) == dict - assert params["params/logger"] is True + assert params["params/logger"] is None assert params["params/gpus"] is None assert "logger" not in params assert "gpus" not in params