Skip to content

Commit

Permalink
[pre-commit.ci] pre-commit suggestions (#20035)
Browse files Browse the repository at this point in the history
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Jirka Borovec <[email protected]>
Co-authored-by: awaelchli <[email protected]>
  • Loading branch information
3 people authored Jul 5, 2024
1 parent 3730e98 commit a40affb
Show file tree
Hide file tree
Showing 20 changed files with 33 additions and 33 deletions.
2 changes: 1 addition & 1 deletion .github/CODE_OF_CONDUCT.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
level of experience, education, socioeconomic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.

## Our Standards
Expand Down
6 changes: 3 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ ci:

repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
rev: v4.6.0
hooks:
- id: end-of-file-fixer
- id: trailing-whitespace
Expand Down Expand Up @@ -51,7 +51,7 @@ repos:
- id: detect-private-key

- repo: https://github.com/codespell-project/codespell
rev: v2.2.6
rev: v2.3.0
hooks:
- id: codespell
additional_dependencies: [tomli]
Expand All @@ -70,7 +70,7 @@ repos:
- id: sphinx-lint

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.3.5
rev: v0.5.0
hooks:
# try to fix what is possible
- id: ruff
Expand Down
2 changes: 1 addition & 1 deletion docs/source-pytorch/past_versions.rst
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ Past PyTorch Lightning versions

PyTorch Lightning :doc:`evolved over time <versioning>`. Here's the history of versions with links to their respective docs.

To help you with keeping up to spead, check :doc:`Migration guide <upgrade/migration_guide>`.
To help you with keeping up to speed, check :doc:`Migration guide <upgrade/migration_guide>`.

.. list-table:: Past versions
:widths: 5 50 30 15
Expand Down
2 changes: 1 addition & 1 deletion examples/fabric/reinforcement_learning/train_fabric.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def main(args: argparse.Namespace):
# Log hyperparameters
fabric.logger.experiment.add_text(
"hyperparameters",
"|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
"|param|value|\n|-|-|\n{}".format("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
)

# Environment setup
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def player(args, world_collective: TorchCollective, player_trainer_collective: T
# Log hyperparameters
logger.experiment.add_text(
"hyperparameters",
"|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
"|param|value|\n|-|-|\n{}".format("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
)

# Environment setup
Expand Down
2 changes: 1 addition & 1 deletion examples/fabric/reinforcement_learning/train_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def main(args: argparse.Namespace):
if global_rank == 0:
logger.add_text(
"hyperparameters",
"|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
"|param|value|\n|-|-|\n{}".format("\n".join([f"|{key}|{value}|" for key, value in vars(args).items()])),
)

# Environment setup
Expand Down
2 changes: 1 addition & 1 deletion src/lightning/fabric/loggers/tensorboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def __init__(
self._prefix = prefix
self._fs = get_filesystem(root_dir)

self._experiment: Optional["SummaryWriter"] = None
self._experiment: Optional[SummaryWriter] = None
self._kwargs = kwargs

@property
Expand Down
2 changes: 1 addition & 1 deletion src/lightning/fabric/strategies/deepspeed.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ def __init__(
self.hysteresis = hysteresis
self.min_loss_scale = min_loss_scale

self._deepspeed_engine: Optional["DeepSpeedEngine"] = None
self._deepspeed_engine: Optional[DeepSpeedEngine] = None

@property
def zero_stage_3(self) -> bool:
Expand Down
2 changes: 1 addition & 1 deletion src/lightning/fabric/strategies/model_parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def __init__(
self._timeout: Optional[timedelta] = timeout
self._backward_sync_control = _ParallelBackwardSyncControl()

self._device_mesh: Optional["DeviceMesh"] = None
self._device_mesh: Optional[DeviceMesh] = None

@property
def device_mesh(self) -> "DeviceMesh":
Expand Down
2 changes: 1 addition & 1 deletion src/lightning/pytorch/callbacks/progress/progress_bar.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
"""

def __init__(self) -> None:
self._trainer: Optional["pl.Trainer"] = None
self._trainer: Optional[pl.Trainer] = None
self._current_eval_dataloader_idx: Optional[int] = None

@property
Expand Down
14 changes: 7 additions & 7 deletions src/lightning/pytorch/callbacks/progress/rich_progress.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def __init__(
self._trainer = trainer
self._tasks: Dict[Union[int, TaskID], Any] = {}
self._current_task_id = 0
self._metrics: Dict[Union[str, "Style"], Any] = {}
self._metrics: Dict[Union[str, Style], Any] = {}
self._style = style
self._text_delimiter = text_delimiter
self._metrics_format = metrics_format
Expand Down Expand Up @@ -274,13 +274,13 @@ def __init__(
self._console_kwargs = console_kwargs or {}
self._enabled: bool = True
self.progress: Optional[CustomProgress] = None
self.train_progress_bar_id: Optional["TaskID"]
self.val_sanity_progress_bar_id: Optional["TaskID"] = None
self.val_progress_bar_id: Optional["TaskID"]
self.test_progress_bar_id: Optional["TaskID"]
self.predict_progress_bar_id: Optional["TaskID"]
self.train_progress_bar_id: Optional[TaskID]
self.val_sanity_progress_bar_id: Optional[TaskID] = None
self.val_progress_bar_id: Optional[TaskID]
self.test_progress_bar_id: Optional[TaskID]
self.predict_progress_bar_id: Optional[TaskID]
self._reset_progress_bar_ids()
self._metric_component: Optional["MetricsTextColumn"] = None
self._metric_component: Optional[MetricsTextColumn] = None
self._progress_stopped: bool = False
self.theme = theme
self._update_for_light_colab_theme()
Expand Down
2 changes: 1 addition & 1 deletion src/lightning/pytorch/callbacks/stochastic_weight_avg.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def __init__(
self._avg_fn = avg_fn or self.avg_fn
self._device = device
self._model_contains_batch_norm: Optional[bool] = None
self._average_model: Optional["pl.LightningModule"] = None
self._average_model: Optional[pl.LightningModule] = None
self._initialized = False
self._swa_scheduler: Optional[LRScheduler] = None
self._scheduler_state: Optional[Dict] = None
Expand Down
2 changes: 1 addition & 1 deletion src/lightning/pytorch/core/datamodule.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def teardown(self):
def __init__(self) -> None:
super().__init__()
# Pointer to the trainer object
self.trainer: Optional["pl.Trainer"] = None
self.trainer: Optional[pl.Trainer] = None

@classmethod
def from_datasets(
Expand Down
6 changes: 3 additions & 3 deletions src/lightning/pytorch/core/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)

# pointer to the trainer object
self._trainer: Optional["pl.Trainer"] = None
self._trainer: Optional[pl.Trainer] = None

# attributes that can be set by user
self._example_input_array: Optional[Union[Tensor, Tuple, Dict]] = None
Expand All @@ -142,11 +142,11 @@ def __init__(self, *args: Any, **kwargs: Any) -> None:
self._compiler_ctx: Optional[Dict[str, Any]] = None

# attributes only used when using fabric
self._fabric: Optional["lf.Fabric"] = None
self._fabric: Optional[lf.Fabric] = None
self._fabric_optimizers: List[_FabricOptimizer] = []

# access to device mesh in `conigure_model()` hook
self._device_mesh: Optional["DeviceMesh"] = None
self._device_mesh: Optional[DeviceMesh] = None

@overload
def optimizers(
Expand Down
4 changes: 2 additions & 2 deletions src/lightning/pytorch/plugins/precision/deepspeed.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def backward( # type: ignore[override]
"You have overridden the `LightningModule.backward` hook but it will be ignored since DeepSpeed handles"
" the backward logic internally."
)
deepspeed_engine: "deepspeed.DeepSpeedEngine" = model.trainer.model
deepspeed_engine: deepspeed.DeepSpeedEngine = model.trainer.model
deepspeed_engine.backward(tensor, *args, **kwargs)

@override
Expand All @@ -135,7 +135,7 @@ def optimizer_step( # type: ignore[override]
"Skipping backward by returning `None` from your `training_step` is not supported by `DeepSpeed`"
)
# DeepSpeed handles the optimizer step internally
deepspeed_engine: "deepspeed.DeepSpeedEngine" = model.trainer.model
deepspeed_engine: deepspeed.DeepSpeedEngine = model.trainer.model
return deepspeed_engine.step(**kwargs)

@override
Expand Down
6 changes: 3 additions & 3 deletions src/lightning/pytorch/profilers/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,8 +301,8 @@ def __init__(
self._table_kwargs = table_kwargs if table_kwargs is not None else {}

self.profiler: Optional[_PROFILER] = None
self.function_events: Optional["EventList"] = None
self._lightning_module: Optional["LightningModule"] = None # set by ProfilerConnector
self.function_events: Optional[EventList] = None
self._lightning_module: Optional[LightningModule] = None # set by ProfilerConnector
self._register: Optional[RegisterRecordFunction] = None
self._parent_profiler: Optional[ContextManager] = None
self._recording_map: Dict[str, record_function] = {}
Expand Down Expand Up @@ -400,7 +400,7 @@ def _default_schedule() -> Optional[Callable]:
return None

def _default_activities(self) -> List["ProfilerActivity"]:
activities: List["ProfilerActivity"] = []
activities: List[ProfilerActivity] = []
if not _KINETO_AVAILABLE:
return activities
if self._profiler_kwargs.get("use_cpu", True):
Expand Down
2 changes: 1 addition & 1 deletion src/lightning/pytorch/strategies/model_parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def __init__(
self._save_distributed_checkpoint = save_distributed_checkpoint
self._process_group_backend: Optional[str] = process_group_backend
self._timeout: Optional[timedelta] = timeout
self._device_mesh: Optional["DeviceMesh"] = None
self._device_mesh: Optional[DeviceMesh] = None
self.num_nodes = 1

@property
Expand Down
2 changes: 1 addition & 1 deletion src/lightning/pytorch/strategies/strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def __init__(
checkpoint_io: Optional[CheckpointIO] = None,
precision_plugin: Optional[Precision] = None,
) -> None:
self._accelerator: Optional["pl.accelerators.Accelerator"] = accelerator
self._accelerator: Optional[pl.accelerators.Accelerator] = accelerator
self._checkpoint_io: Optional[CheckpointIO] = checkpoint_io
self._precision_plugin: Optional[Precision] = None
# Call the precision setter for input validation
Expand Down
2 changes: 1 addition & 1 deletion src/lightning/pytorch/utilities/_pytree.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def _tree_flatten(pytree: PyTree) -> Tuple[List[Any], TreeSpec]:
child_pytrees, context = flatten_fn(pytree)

result: List[Any] = []
children_specs: List["TreeSpec"] = []
children_specs: List[TreeSpec] = []
for child in child_pytrees:
flat, child_spec = _tree_flatten(child)
result += flat
Expand Down
2 changes: 1 addition & 1 deletion tests/tests_pytorch/helpers/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def get_data_path(expt_logger, path_dir):
name, version = expt_logger.name, expt_logger.version

# the other experiments...
path_expt = os.path.join(path_dir, name, "version_%s" % version)
path_expt = os.path.join(path_dir, name, f"version_{version}")

# try if the new sub-folder exists, typical case for test-tube
if not os.path.isdir(path_expt):
Expand Down

0 comments on commit a40affb

Please sign in to comment.