Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Deprecate logger=bool #15662

Closed
wants to merge 26 commits into from
Closed
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/source-pytorch/extensions/logging.rst
Original file line number Diff line number Diff line change
Expand Up @@ -378,7 +378,7 @@ When Lightning creates a checkpoint, it stores a key ``"hyper_parameters"`` with

Some loggers also allow logging the hyperparams used in the experiment. For instance,
when using the ``TensorBoardLogger``, all hyperparams will show
in the `hparams tab <https://pytorch.org/docs/stable/tensorboard.html#torch.utils.tensorboard.writer.SummaryWriter.add_hparams>`_.
in the hparams tab at :meth:`torch.utils.tensorboard.writer.SummaryWriter.add_hparams`.

.. note::
If you want to track a metric in the tensorboard hparams tab, log scalars to the key ``hp_metric``. If tracking multiple metrics, initialize ``TensorBoardLogger`` with ``default_hp_metric=False`` and call ``log_hyperparams`` only once with your metric keys and initial values. Subsequent updates can simply be logged to the metric keys. Refer to the examples below for setting up proper hyperparams metrics tracking within the :doc:`LightningModule <../common/lightning_module>`.
Expand Down
1 change: 0 additions & 1 deletion requirements/pytorch/base.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ torch>=1.10.*, <1.13.0
tqdm>=4.57.0, <4.65.0
PyYAML>=5.4, <=6.0
fsspec[http]>2021.06.0, <2022.8.0
tensorboard>=2.9.1, <2.11.0
torchmetrics>=0.7.0, <0.10.1 # needed for using fixed compare_version
packaging>=17.0, <=21.3
typing-extensions>=4.0.0, <=4.4.0
Expand Down
5 changes: 3 additions & 2 deletions requirements/pytorch/test.txt
Original file line number Diff line number Diff line change
Expand Up @@ -12,5 +12,6 @@ scikit-learn>0.22.1, <1.1.3
onnxruntime<1.13.0
psutil<5.9.4 # for `DeviceStatsMonitor`
pandas>1.0, <1.5.2 # needed in benchmarks
fastapi<0.87.0
uvicorn<0.19.1
tensorboard>=2.9.1, <2.11.0 # for `TensorBoardLogger`
carmocca marked this conversation as resolved.
Show resolved Hide resolved
fastapi<0.87.0 # for `ServableModuleValidator`
uvicorn<0.19.1 # for `ServableModuleValidator`
2 changes: 1 addition & 1 deletion src/pytorch_lightning/callbacks/device_stats_monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def setup(
return

if not trainer.loggers:
raise MisconfigurationException("Cannot use `DeviceStatsMonitor` callback with `Trainer(logger=False)`.")
raise MisconfigurationException("Cannot use `DeviceStatsMonitor` callback with `Trainer(logger=None)`.")

# warn in setup to warn once
device = trainer.strategy.root_device
Expand Down
11 changes: 10 additions & 1 deletion src/pytorch_lightning/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import pytorch_lightning as pl
from lightning_lite.utilities.cloud_io import get_filesystem
from pytorch_lightning import Callback, LightningDataModule, LightningModule, seed_everything, Trainer
from pytorch_lightning.trainer.connectors.logger_connector.logger_connector import _NoneSentinel
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_warn
Expand Down Expand Up @@ -550,9 +551,17 @@ def _instantiate_trainer(self, config: Dict[str, Any], callbacks: List[Callback]
value = self.trainer_defaults[key]
config[key] += value if isinstance(value, list) else [value]
if self.save_config_callback and not config.get("fast_dev_run", False):
to_save = self.config.get(str(self.subcommand), self.config)
# TODO: remove this override in v2.0.0
if (
"trainer" in to_save
and "logger" in to_save.trainer
and isinstance(to_save.trainer.logger, _NoneSentinel)
):
to_save.trainer.logger = None
carmocca marked this conversation as resolved.
Show resolved Hide resolved
config_callback = self.save_config_callback(
self._parser(self.subcommand),
self.config.get(str(self.subcommand), self.config),
to_save,
**self.save_config_kwargs,
)
config[key].append(config_callback)
Expand Down
5 changes: 1 addition & 4 deletions src/pytorch_lightning/demos/boring_classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,11 +156,8 @@ def predict_dataloader(self) -> DataLoader:


class BoringDataModule(LightningDataModule):
def __init__(self, data_dir: str = "./"):
def __init__(self) -> None:
super().__init__()
self.data_dir = data_dir
self.non_picklable = None
self.checkpoint_state: Optional[str] = None
self.random_full = RandomDataset(32, 64 * 4)

def setup(self, stage: str) -> None:
Expand Down
32 changes: 25 additions & 7 deletions src/pytorch_lightning/loggers/tensorboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,11 @@
import logging
import os
from argparse import Namespace
from typing import Any, Dict, Mapping, Optional, Union
from typing import Any, Dict, Mapping, Optional, TYPE_CHECKING, Union

import numpy as np
from lightning_utilities.core.imports import RequirementCache
from torch import Tensor
from torch.utils.tensorboard import SummaryWriter
from torch.utils.tensorboard.summary import hparams

import pytorch_lightning as pl
from lightning_lite.utilities.cloud_io import get_filesystem
Expand All @@ -34,19 +33,23 @@
from pytorch_lightning.utilities.imports import _OMEGACONF_AVAILABLE
from pytorch_lightning.utilities.logger import _add_prefix, _convert_params, _flatten_dict
from pytorch_lightning.utilities.logger import _sanitize_params as _utils_sanitize_params
from pytorch_lightning.utilities.rank_zero import rank_zero_only, rank_zero_warn
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_only, rank_zero_warn

log = logging.getLogger(__name__)

if _OMEGACONF_AVAILABLE:
from omegaconf import Container, OmegaConf

_TENSORBOARD_AVAILABLE = RequirementCache("tensorboard>=2.9.1")
if TYPE_CHECKING and _TENSORBOARD_AVAILABLE:
from torch.utils.tensorboard import SummaryWriter


class TensorBoardLogger(Logger):
r"""
Log to local file system in `TensorBoard <https://www.tensorflow.org/tensorboard>`_ format.

Implemented using :class:`~torch.utils.tensorboard.SummaryWriter`. Logs are saved to
Implemented using :class:`torch.utils.tensorboard.SummaryWriter`. Logs are saved to
``os.path.join(save_dir, name, version)``. This is the default logger in Lightning, it comes
preinstalled.

Expand Down Expand Up @@ -77,7 +80,7 @@ class TensorBoardLogger(Logger):
sub_dir: Sub-directory to group TensorBoard logs. If a sub_dir argument is passed
then logs are saved in ``/save_dir/name/version/sub_dir/``. Defaults to ``None`` in which
logs are saved in ``/save_dir/name/version/``.
\**kwargs: Additional arguments used by :class:`SummaryWriter` can be passed as keyword
\**kwargs: Additional arguments used by :class:`torch.utils.tensorboard.SummaryWriter` can be passed as keyword
arguments in this logger. To automatically flush to disk, `max_queue` sets the size
of the queue for pending logs before flushing. `flush_secs` determines how many seconds
elapses before flushing.
Expand All @@ -97,6 +100,17 @@ def __init__(
sub_dir: Optional[_PATH] = None,
**kwargs: Any,
):
if not _TENSORBOARD_AVAILABLE:
# TODO: replace this with `raise ModuleNotFoundError` in v2.0.0
rank_zero_deprecation(
"`tensorboard` has been removed as a dependency of `pytorch_lightning` in v1.9.0. Until v2.0.0 we will"
f" try to install it for you automatically. This deprecation is caused by {_TENSORBOARD_AVAILABLE!s}"
)
import pip

retcode = pip.main(["install", _TENSORBOARD_AVAILABLE.requirement])
assert retcode

super().__init__()
save_dir = os.fspath(save_dir)
self._save_dir = save_dir
Expand Down Expand Up @@ -157,7 +171,7 @@ def sub_dir(self) -> Optional[str]:

@property
@rank_zero_experiment
def experiment(self) -> SummaryWriter:
def experiment(self) -> "SummaryWriter":
r"""
Actual tensorboard object. To use TensorBoard features in your
:class:`~pytorch_lightning.core.module.LightningModule` do the following.
Expand All @@ -173,6 +187,8 @@ def experiment(self) -> SummaryWriter:
assert rank_zero_only.rank == 0, "tried to init log dirs in non global_rank=0"
if self.root_dir:
self._fs.makedirs(self.root_dir, exist_ok=True)
from torch.utils.tensorboard import SummaryWriter

self._experiment = SummaryWriter(log_dir=self.log_dir, **self._kwargs)
return self._experiment

Expand Down Expand Up @@ -209,6 +225,8 @@ def log_hyperparams(

if metrics:
self.log_metrics(metrics, 0)
from torch.utils.tensorboard.summary import hparams

exp, ssi, sei = hparams(params, metrics)
writer = self.experiment._get_file_writer()
writer.add_summary(exp)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,15 @@
from pytorch_lightning.loggers import Logger, TensorBoardLogger
from pytorch_lightning.trainer.connectors.logger_connector.result import _METRICS, _OUT_DICT, _PBAR_DICT
from pytorch_lightning.utilities.metrics import metrics_to_scalars
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation


# TODO: remove in v2.0.0
class _NoneSentinel:
"""Used as a sentinel value for ``None`` in the depreaction for ``Trainer(logger=bool)``.

Remove this class with the deprecation.
"""


class LoggerConnector:
Expand All @@ -37,7 +46,7 @@ def __init__(self, trainer: "pl.Trainer") -> None:

def on_trainer_init(
self,
logger: Union[bool, Logger, Iterable[Logger]],
logger: Optional[Union[Logger, Iterable[Logger], _NoneSentinel]],
log_every_n_steps: int,
move_metrics_to_cpu: bool,
) -> None:
Expand All @@ -51,12 +60,22 @@ def should_update_logs(self) -> bool:
should_log = (self.trainer.fit_loop.epoch_loop._batches_that_stepped + 1) % self.trainer.log_every_n_steps == 0
return should_log or self.trainer.should_stop

def configure_logger(self, logger: Union[bool, Logger, Iterable[Logger]]) -> None:
if not logger:
# logger is None or logger is False
def configure_logger(self, logger: Optional[Union[Logger, Iterable[Logger], _NoneSentinel]]) -> None:
if isinstance(logger, _NoneSentinel) or not logger:
if logger is False:
# TODO: remove in v2.0.0
rank_zero_deprecation(
"`Trainer(logger=False)` has been deprecated in favor of `Trainer(logger=None)` in v1.9.0 and will"
" be removed in v2.0.0."
)
self.trainer.loggers = []
elif logger is True:
# default logger
# TODO: remove in v2.0.0
rank_zero_deprecation(
"`Trainer(logger=True)` has been deprecated in favor of `Trainer(logger=TensorBoardLogger())` in v1.9.0"
" and will be removed in v2.0.0. Additionally, the `tensorboard` dependency will not be installed"
" when installing `pytorch_lightning`."
)
self.trainer.loggers = [
TensorBoardLogger(save_dir=self.trainer.default_root_dir, version=SLURMEnvironment.job_id())
]
Expand Down
14 changes: 7 additions & 7 deletions src/pytorch_lightning/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@
from pytorch_lightning.trainer.connectors.callback_connector import CallbackConnector
from pytorch_lightning.trainer.connectors.checkpoint_connector import CheckpointConnector
from pytorch_lightning.trainer.connectors.data_connector import DataConnector
from pytorch_lightning.trainer.connectors.logger_connector import LoggerConnector
from pytorch_lightning.trainer.connectors.logger_connector.logger_connector import _NoneSentinel, LoggerConnector
from pytorch_lightning.trainer.connectors.logger_connector.result import _OUT_DICT, _PBAR_DICT, _ResultCollection
from pytorch_lightning.trainer.connectors.signal_connector import SignalConnector
from pytorch_lightning.trainer.states import RunningStage, TrainerFn, TrainerState, TrainerStatus
Expand Down Expand Up @@ -112,7 +112,8 @@ class Trainer:
@_defaults_from_env_vars
def __init__(
self,
logger: Union[Logger, Iterable[Logger], bool] = True,
# TODO: Replace default for `None` in 2.0
logger: Optional[Union[Logger, Iterable[Logger], _NoneSentinel]] = _NoneSentinel(),
enable_checkpointing: bool = True,
callbacks: Optional[Union[List[Callback], Callback]] = None,
default_root_dir: Optional[_PATH] = None,
Expand Down Expand Up @@ -270,11 +271,10 @@ def __init__(
limit_predict_batches: How much of prediction dataset to check (float = fraction, int = num_batches).
Default: ``1.0``.

logger: Logger (or iterable collection of loggers) for experiment tracking. A ``True`` value uses
the default ``TensorBoardLogger``. ``False`` will disable logging. If multiple loggers are
provided, local files (checkpoints, profiler traces, etc.) are saved in the ``log_dir`` of
the first logger.
Default: ``True``.
logger: Logger (or iterable collection of loggers) for experiment tracking. ``None`` will disable logging.
If multiple loggers are provided, local files (checkpoints, profiler traces, etc.) are saved in the
``log_dir`` of the first logger.
Default: ``None``.

log_every_n_steps: How often to log within steps.
Default: ``50``.
Expand Down
2 changes: 1 addition & 1 deletion src/pytorch_lightning/utilities/argparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def from_argparse_args(
>>> parser = Trainer.add_argparse_args(parser)
>>> parser.add_argument('--my_custom_arg', default='something') # doctest: +SKIP
>>> args = Trainer.parse_argparser(parser.parse_args(""))
>>> trainer = Trainer.from_argparse_args(args, logger=False)
>>> trainer = Trainer.from_argparse_args(args)
"""
if isinstance(args, ArgumentParser):
args = cls.parse_argparser(args)
Expand Down
1 change: 0 additions & 1 deletion tests/tests_pytorch/benchmarks/test_basic_parity.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,6 @@ def lightning_loop(cls_model, idx, device_type: str = "cuda", num_epochs=10):
enable_checkpointing=False,
accelerator="gpu" if device_type == "cuda" else "cpu",
devices=1,
logger=False,
replace_sampler_ddp=False,
benchmark=False,
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,6 @@ def test_rich_progress_bar_leave(tmpdir, leave, reset_call_count):
limit_val_batches=0,
max_epochs=4,
callbacks=progress_bar,
logger=False,
enable_checkpointing=False,
enable_model_summary=False,
)
Expand Down Expand Up @@ -429,7 +428,6 @@ def test_rich_progress_bar_can_be_pickled():
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
logger=False,
enable_model_summary=False,
)
model = BoringModel()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,6 @@ def on_validation_epoch_end(self, *args):
limit_train_batches=1,
limit_val_batches=limit_val_batches,
callbacks=[pbar],
logger=False,
enable_checkpointing=False,
)
trainer.fit(model)
Expand Down Expand Up @@ -371,7 +370,6 @@ def test_main_progress_bar_update_amount(
limit_train_batches=train_batches,
limit_val_batches=val_batches,
callbacks=[progress_bar],
logger=False,
enable_checkpointing=False,
)
with mock.patch("pytorch_lightning.callbacks.progress.tqdm_progress.Tqdm", MockTqdm):
Expand All @@ -392,7 +390,6 @@ def test_test_progress_bar_update_amount(tmpdir, test_batches: int, refresh_rate
max_epochs=1,
limit_test_batches=test_batches,
callbacks=[progress_bar],
logger=False,
enable_checkpointing=False,
)
with mock.patch("pytorch_lightning.callbacks.progress.tqdm_progress.Tqdm", MockTqdm):
Expand All @@ -410,9 +407,7 @@ def training_step(self, batch, batch_idx):
self.log("c", {"c1": 2}, prog_bar=True, on_epoch=False)
return super().training_step(batch, batch_idx)

trainer = Trainer(
default_root_dir=tmpdir, max_epochs=1, limit_train_batches=2, logger=False, enable_checkpointing=False
)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, limit_train_batches=2, enable_checkpointing=False)
trainer.fit(TestModel())

torch.testing.assert_close(trainer.progress_bar_metrics["a"], 0.123)
Expand Down Expand Up @@ -546,7 +541,6 @@ def test_tqdm_progress_bar_can_be_pickled():
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
logger=False,
enable_model_summary=False,
)
model = BoringModel()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,6 @@ def test_device_stats_monitor_no_logger(tmpdir):
default_root_dir=tmpdir,
callbacks=[device_stats],
max_epochs=1,
logger=False,
enable_checkpointing=False,
enable_progress_bar=False,
)
Expand Down
1 change: 0 additions & 1 deletion tests/tests_pytorch/callbacks/test_early_stopping.py
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,6 @@ def training_step(self, batch, batch_idx):
limit_train_batches=limit_train_batches,
min_epochs=min_epochs,
min_steps=min_steps,
logger=False,
enable_checkpointing=False,
enable_progress_bar=False,
enable_model_summary=False,
Expand Down
2 changes: 1 addition & 1 deletion tests/tests_pytorch/callbacks/test_lr_monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ def test_lr_monitor_no_logger(tmpdir):
model = BoringModel()

lr_monitor = LearningRateMonitor()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, callbacks=[lr_monitor], logger=False)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, callbacks=[lr_monitor])

with pytest.raises(MisconfigurationException, match="`Trainer` that has no logger"):
trainer.fit(model)
Expand Down
3 changes: 0 additions & 3 deletions tests/tests_pytorch/callbacks/test_pruning.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,6 @@ def train_with_pruning_callback(
enable_progress_bar=False,
enable_model_summary=False,
enable_checkpointing=False,
logger=False,
limit_train_batches=10,
limit_val_batches=2,
max_epochs=10,
Expand Down Expand Up @@ -214,7 +213,6 @@ def apply_lottery_ticket_hypothesis(self):
enable_progress_bar=False,
enable_model_summary=False,
enable_checkpointing=False,
logger=False,
limit_train_batches=10,
limit_val_batches=2,
max_epochs=5,
Expand All @@ -241,7 +239,6 @@ def test_multiple_pruning_callbacks(tmpdir, caplog, make_pruning_permanent: bool
enable_progress_bar=False,
enable_model_summary=False,
enable_checkpointing=False,
logger=False,
limit_train_batches=10,
limit_val_batches=2,
max_epochs=3,
Expand Down
Loading