From 73f5df0a0a8c9715873d8f835b8f1637664bffac Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Oct 2023 14:11:54 +0200 Subject: [PATCH] Bump torch from 2.0.1 to 2.1.0 in /requirements (#18752) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Jirka Co-authored-by: Carlos MocholĂ­ --- .github/workflows/code-checks.yml | 2 +- pyproject.toml | 13 +++------ requirements/data/data.txt | 2 +- requirements/fabric/examples.txt | 2 +- requirements/pytorch/examples.txt | 2 +- requirements/typing.txt | 2 +- src/lightning/app/core/api.py | 22 +++++++-------- .../plugins/collectives/torch_collective.py | 4 +-- .../fabric/plugins/precision/fsdp.py | 2 +- src/lightning/fabric/strategies/xla_fsdp.py | 2 +- src/lightning/fabric/utilities/types.py | 27 ++++++++++++++++--- .../pytorch/_graveyard/_torchmetrics.py | 18 +++++-------- src/lightning/pytorch/callbacks/finetuning.py | 2 +- src/lightning/pytorch/core/optimizer.py | 14 +++++++--- src/lightning/pytorch/demos/transformer.py | 4 +-- .../pytorch/overrides/distributed.py | 4 +-- src/lightning/pytorch/utilities/compile.py | 23 ++++++++-------- src/lightning/pytorch/utilities/imports.py | 1 + 18 files changed, 81 insertions(+), 65 deletions(-) diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml index 8d143cfdc1628..ad7f0002b1b9f 100644 --- a/.github/workflows/code-checks.yml +++ b/.github/workflows/code-checks.yml @@ -49,4 +49,4 @@ jobs: pip list - name: Check typing - run: mypy --no-warn-unused-ignores + run: mypy diff --git a/pyproject.toml b/pyproject.toml index 43ef2fc0195f7..7fcb08a439c33 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -127,17 +127,10 @@ files = [ ] # This section is for folders with "-" as they are not valid python modules exclude = [ - "src/lightning_app/__about__.py", - "src/lightning_app/__setup__.py", - "src/lightning_app/__version__.py", - "src/lightning_fabric/__about__.py", - "src/lightning_fabric/__setup__.py", - "src/lightning_fabric/__version__.py", "src/lightning/app/cli/app-template", "src/lightning/app/cli/component-template", "src/lightning/app/cli/pl-app-template", "src/lightning/app/cli/react-ui-template", - "src/lightning/app/launcher", ] install_types = "True" non_interactive = "True" @@ -192,6 +185,9 @@ module = [ "lightning.app.frontend.stream_lit", "lightning.app.frontend.utils", "lightning.app.frontend.web", + "lightning.app.launcher.launcher", + "lightning.app.launcher.lightning_backend", + "lightning.app.launcher.lightning_hybrid_backend", "lightning.app.pdb.pdb", "lightning.app.runners.backends.backend", "lightning.app.runners.backends.cloud", @@ -240,9 +236,6 @@ module = [ "lightning.app.utilities.state", "lightning.app.utilities.tracer", "lightning.app.utilities.tree", - "lightning.store.authentication", - "lightning.store.cloud_api", - "lightning.store.save", "lightning.store.utils", ] ignore_errors = "True" diff --git a/requirements/data/data.txt b/requirements/data/data.txt index 4813af9523aa2..40d3f24d9ab83 100644 --- a/requirements/data/data.txt +++ b/requirements/data/data.txt @@ -5,4 +5,4 @@ lightning-utilities >=0.8.0, <0.10.0 # to be able to include also 0.6 and preserve `>` needed for CI min version bypass torchdata >0.5.9, <=0.7.0 # to be able to include also PL 2.0 and preserve `>` needed for CI min version bypass -torch >0.14.0, <=2.1.0 +torch >0.14.0, <2.2.0 diff --git a/requirements/fabric/examples.txt b/requirements/fabric/examples.txt index a09b12cfe6de7..c93942aded975 100644 --- a/requirements/fabric/examples.txt +++ b/requirements/fabric/examples.txt @@ -1,6 +1,6 @@ # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package # in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment -torchvision >=0.13.0, <0.16.0 +torchvision >=0.13.0, <0.17.0 torchmetrics >=0.10.0, <1.3.0 lightning-utilities >=0.8.0, <0.10.0 diff --git a/requirements/pytorch/examples.txt b/requirements/pytorch/examples.txt index 19b9fb4f15133..15cf3b7288ece 100644 --- a/requirements/pytorch/examples.txt +++ b/requirements/pytorch/examples.txt @@ -1,7 +1,7 @@ # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package # in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment -torchvision >=0.13.0, <0.16.0 +torchvision >=0.13.0, <0.17.0 gym[classic_control] >=0.17.0, <0.27.0 ipython[all] <8.15.0 torchmetrics >=0.10.0, <1.3.0 diff --git a/requirements/typing.txt b/requirements/typing.txt index dbcc5328da8e7..33fdbc2bd8ed1 100644 --- a/requirements/typing.txt +++ b/requirements/typing.txt @@ -1,5 +1,5 @@ mypy==1.5.1 -torch==2.0.1 +torch==2.1.0 types-Markdown types-PyYAML diff --git a/src/lightning/app/core/api.py b/src/lightning/app/core/api.py index 0e8c34415ed23..11205ec7e894e 100644 --- a/src/lightning/app/core/api.py +++ b/src/lightning/app/core/api.py @@ -196,9 +196,9 @@ class StateUpdate(BaseModel): @fastapi_service.get("/api/v1/state", response_class=JSONResponse) async def get_state( response: Response, - x_lightning_type: Optional[str] = Header(None), # type: ignore[assignment] - x_lightning_session_uuid: Optional[str] = Header(None), # type: ignore[assignment] - x_lightning_session_id: Optional[str] = Header(None), # type: ignore[assignment] + x_lightning_type: Optional[str] = Header(None), + x_lightning_session_uuid: Optional[str] = Header(None), + x_lightning_session_id: Optional[str] = Header(None), ) -> Mapping: if x_lightning_session_uuid is None: raise Exception("Missing X-Lightning-Session-UUID header") @@ -246,8 +246,8 @@ async def get_layout() -> str: @fastapi_service.get("/api/v1/spec", response_class=JSONResponse) async def get_spec( response: Response, - x_lightning_session_uuid: Optional[str] = Header(None), # type: ignore[assignment] - x_lightning_session_id: Optional[str] = Header(None), # type: ignore[assignment] + x_lightning_session_uuid: Optional[str] = Header(None), + x_lightning_session_id: Optional[str] = Header(None), ) -> Union[List, Dict]: if x_lightning_session_uuid is None: raise Exception("Missing X-Lightning-Session-UUID header") @@ -266,9 +266,9 @@ async def get_spec( async def post_delta( request: Request, response: Response, - x_lightning_type: Optional[str] = Header(None), # type: ignore[assignment] - x_lightning_session_uuid: Optional[str] = Header(None), # type: ignore[assignment] - x_lightning_session_id: Optional[str] = Header(None), # type: ignore[assignment] + x_lightning_type: Optional[str] = Header(None), + x_lightning_session_uuid: Optional[str] = Header(None), + x_lightning_session_id: Optional[str] = Header(None), ) -> Optional[Dict]: """This endpoint is used to make an update to the app state using delta diff, mainly used by streamlit to update the state.""" @@ -292,9 +292,9 @@ async def post_delta( async def post_state( request: Request, response: Response, - x_lightning_type: Optional[str] = Header(None), # type: ignore[assignment] - x_lightning_session_uuid: Optional[str] = Header(None), # type: ignore[assignment] - x_lightning_session_id: Optional[str] = Header(None), # type: ignore[assignment] + x_lightning_type: Optional[str] = Header(None), + x_lightning_session_uuid: Optional[str] = Header(None), + x_lightning_session_id: Optional[str] = Header(None), ) -> Optional[Dict]: if x_lightning_session_uuid is None: raise Exception("Missing X-Lightning-Session-UUID header") diff --git a/src/lightning/fabric/plugins/collectives/torch_collective.py b/src/lightning/fabric/plugins/collectives/torch_collective.py index 50b9a4997554a..506462bd54b15 100644 --- a/src/lightning/fabric/plugins/collectives/torch_collective.py +++ b/src/lightning/fabric/plugins/collectives/torch_collective.py @@ -84,10 +84,10 @@ def all_to_all(self, output_tensor_list: List[Tensor], input_tensor_list: List[T return output_tensor_list def send(self, tensor: Tensor, dst: int, tag: int = 0) -> None: - dist.send(tensor, dst, tag=tag, group=self.group) # type: ignore[arg-type] + dist.send(tensor, dst, tag=tag, group=self.group) def recv(self, tensor: Tensor, src: Optional[int] = None, tag: int = 0) -> Tensor: - dist.recv(tensor, src, tag=tag, group=self.group) # type: ignore[arg-type] + dist.recv(tensor, src, tag=tag, group=self.group) return tensor def all_gather_object(self, object_list: List[Any], obj: Any) -> List[Any]: diff --git a/src/lightning/fabric/plugins/precision/fsdp.py b/src/lightning/fabric/plugins/precision/fsdp.py index 054aa23c64314..ebdafcd651d93 100644 --- a/src/lightning/fabric/plugins/precision/fsdp.py +++ b/src/lightning/fabric/plugins/precision/fsdp.py @@ -143,7 +143,7 @@ def unscale_gradients(self, optimizer: Optimizer) -> None: if scaler is not None: if _optimizer_handles_unscaling(optimizer): raise NotImplementedError("Gradient clipping is not implemented for optimizers handling the unscaling.") - scaler.unscale_(optimizer) # type: ignore[arg-type] # ShardedGradScaler has wrong type annotation + scaler.unscale_(optimizer) def state_dict(self) -> Dict[str, Any]: if self.scaler is not None: diff --git a/src/lightning/fabric/strategies/xla_fsdp.py b/src/lightning/fabric/strategies/xla_fsdp.py index d34eb5caebb37..8518536e8cf99 100644 --- a/src/lightning/fabric/strategies/xla_fsdp.py +++ b/src/lightning/fabric/strategies/xla_fsdp.py @@ -277,7 +277,7 @@ def clip_gradients_norm( ) -> Tensor: """Clip gradients by norm.""" self.precision.unscale_gradients(optimizer) - return module.clip_grad_norm_(max_norm=max_norm, norm_type=norm_type) # type: ignore[operator] + return module.clip_grad_norm_(max_norm=max_norm, norm_type=norm_type) def clip_gradients_value(self, module: Module, optimizer: Optimizer, clip_val: Union[float, int]) -> None: """Clip gradients by value.""" diff --git a/src/lightning/fabric/utilities/types.py b/src/lightning/fabric/utilities/types.py index cf4d35b7e8032..3192b8a269018 100644 --- a/src/lightning/fabric/utilities/types.py +++ b/src/lightning/fabric/utilities/types.py @@ -12,12 +12,24 @@ # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path -from typing import Any, Callable, Dict, Iterator, List, Optional, Protocol, TypeVar, Union, runtime_checkable +from typing import ( + Any, + Callable, + DefaultDict, + Dict, + Iterator, + List, + Optional, + Protocol, + TypeVar, + Union, + runtime_checkable, +) import torch from torch import Tensor from torch.optim import Optimizer -from typing_extensions import TypeAlias +from typing_extensions import TypeAlias, overload from lightning.fabric.utilities.imports import _TORCH_GREATER_EQUAL_1_13, _TORCH_GREATER_EQUAL_2_0 @@ -117,7 +129,14 @@ def step(self, metrics: Union[float, int, Tensor], epoch: Optional[int] = None) class Steppable(Protocol): """To structurally type ``optimizer.step()``""" - # Inferred from `torch.optim.optimizer.pyi` + @overload + def step(self, closure: None = ...) -> None: + ... + + @overload + def step(self, closure: Callable[[], float]) -> float: + ... + def step(self, closure: Optional[Callable[[], float]] = ...) -> Optional[float]: ... @@ -128,7 +147,7 @@ class Optimizable(Steppable, Protocol): param_groups: List[Dict[Any, Any]] defaults: Dict[Any, Any] - state: Dict[Any, Any] + state: DefaultDict[Tensor, Any] def state_dict(self) -> Dict[str, Dict[Any, Any]]: ... diff --git a/src/lightning/pytorch/_graveyard/_torchmetrics.py b/src/lightning/pytorch/_graveyard/_torchmetrics.py index 6bbc66887bc7e..82e3ad2dcf549 100644 --- a/src/lightning/pytorch/_graveyard/_torchmetrics.py +++ b/src/lightning/pytorch/_graveyard/_torchmetrics.py @@ -1,9 +1,9 @@ -import contextlib from typing import Callable import torchmetrics from lightning_utilities.core.imports import compare_version as _compare_version +from lightning.pytorch.utilities.imports import _TORCHMETRICS_GREATER_EQUAL_0_8_0 from lightning.pytorch.utilities.migration.utils import _patch_pl_to_mirror_if_necessary @@ -12,13 +12,9 @@ def compare_version(package: str, op: Callable, version: str, use_base_version: return _compare_version(new_package, op, version, use_base_version) -# patching is necessary, since up to v.0.7.3 torchmetrics has a hardcoded reference to lightning.pytorch, -# which has to be redirected to the unified package: -# https://github.com/Lightning-AI/metrics/blob/v0.7.3/torchmetrics/metric.py#L96 -with contextlib.suppress(AttributeError): - if hasattr(torchmetrics.utilities.imports, "_compare_version"): - torchmetrics.utilities.imports._compare_version = compare_version - -with contextlib.suppress(AttributeError): - if hasattr(torchmetrics.metric, "_compare_version"): - torchmetrics.metric._compare_version = compare_version +if not _TORCHMETRICS_GREATER_EQUAL_0_8_0: + # up to v0.8.0 torchmetrics had a hardcoded reference to lightning.pytorch which has to be redirected to the + # unified package. this was removed in + # https://github.com/Lightning-AI/torchmetrics/commit/b225889b34b83272117b758cbc28772a5c2356d9 + torchmetrics.utilities.imports._compare_version = compare_version + torchmetrics.metric._compare_version = compare_version diff --git a/src/lightning/pytorch/callbacks/finetuning.py b/src/lightning/pytorch/callbacks/finetuning.py index 5bb97bcf35786..91b38f12e9fd9 100644 --- a/src/lightning/pytorch/callbacks/finetuning.py +++ b/src/lightning/pytorch/callbacks/finetuning.py @@ -127,7 +127,7 @@ def flatten_modules(modules: Union[Module, Iterable[Union[Module, Iterable]]]) - if isinstance(modules, Iterable): _flatten_modules = [] - for m in modules: + for m in modules: # type: ignore[union-attr] _flatten_modules.extend(BaseFinetuning.flatten_modules(m)) _modules = iter(_flatten_modules) diff --git a/src/lightning/pytorch/core/optimizer.py b/src/lightning/pytorch/core/optimizer.py index 080b2008f357e..81fb648c6360d 100644 --- a/src/lightning/pytorch/core/optimizer.py +++ b/src/lightning/pytorch/core/optimizer.py @@ -13,7 +13,7 @@ # limitations under the License. from contextlib import contextmanager from dataclasses import fields -from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union, overload from weakref import proxy import torch @@ -393,9 +393,17 @@ def load_state_dict(self, state_dict: Dict[Any, Any]) -> None: def state_dict(self) -> Dict[str, Any]: return {} # Return Empty - def step(self, closure: Optional[Callable] = None) -> None: + @overload + def step(self, closure: None = ...) -> None: + ... + + @overload + def step(self, closure: Callable[[], float]) -> float: + ... + + def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: if closure is not None: - closure() + return closure() def zero_grad(self, set_to_none: Optional[bool] = True) -> None: pass # Do Nothing diff --git a/src/lightning/pytorch/demos/transformer.py b/src/lightning/pytorch/demos/transformer.py index 9cf7e226e1372..6c2ad6defce1d 100644 --- a/src/lightning/pytorch/demos/transformer.py +++ b/src/lightning/pytorch/demos/transformer.py @@ -75,10 +75,10 @@ def __init__(self, dim: int, dropout: float = 0.1, max_len: int = 5000) -> None: self.register_parameter("pe", nn.Parameter(pe, requires_grad=False)) def reset_parameters(self) -> None: - self.pe.copy_(self._init_pos_encoding()) # type: ignore[operator] + self.pe.copy_(self._init_pos_encoding()) def forward(self, x: Tensor) -> Tensor: - x + self.pe[: x.size(0), :] # type: ignore[index] + x + self.pe[: x.size(0), :] return self.dropout(x) def _init_pos_encoding(self) -> Tensor: diff --git a/src/lightning/pytorch/overrides/distributed.py b/src/lightning/pytorch/overrides/distributed.py index b349f320bb734..7c6578006c502 100644 --- a/src/lightning/pytorch/overrides/distributed.py +++ b/src/lightning/pytorch/overrides/distributed.py @@ -163,9 +163,7 @@ def _register_ddp_comm_hook( def _sync_module_states(module: torch.nn.Module) -> None: """Taken from https://github.com/pytorch/pytorch/blob/v2.0.0/torch/nn/parallel/distributed.py#L675-L682.""" parameters_to_ignore = ( - set(module._ddp_params_and_buffers_to_ignore) # type: ignore[arg-type] - if hasattr(module, "_ddp_params_and_buffers_to_ignore") - else set() + set(module._ddp_params_and_buffers_to_ignore) if hasattr(module, "_ddp_params_and_buffers_to_ignore") else set() ) from torch.distributed.distributed_c10d import _get_default_group from torch.distributed.utils import _sync_module_states as torch_sync_module_states diff --git a/src/lightning/pytorch/utilities/compile.py b/src/lightning/pytorch/utilities/compile.py index e7f919b4f9252..0b38c4d794cf8 100644 --- a/src/lightning/pytorch/utilities/compile.py +++ b/src/lightning/pytorch/utilities/compile.py @@ -88,8 +88,8 @@ def to_uncompiled(model: Union["pl.LightningModule", "torch._dynamo.OptimizedMod from torch._dynamo import OptimizedModule if isinstance(model, OptimizedModule): - model = model._orig_mod - if not isinstance(model, pl.LightningModule): + original = model._orig_mod + if not isinstance(original, pl.LightningModule): raise TypeError( f"Unexpected error, the wrapped model should be a LightningModule, found {type(model).__name__}" ) @@ -99,20 +99,21 @@ def to_uncompiled(model: Union["pl.LightningModule", "torch._dynamo.OptimizedMod raise ValueError( "`model` is required to be a compiled LightningModule. Found a non-compiled LightningModule instead." ) + original = model else: raise ValueError("`model` must either be an instance of OptimizedModule or LightningModule") - ctx = model._compiler_ctx + ctx = original._compiler_ctx if ctx is not None: - model.forward = ctx["original_forward"] # type: ignore[method-assign] - model.training_step = ctx["original_training_step"] # type: ignore[method-assign] - model.validation_step = ctx["original_validation_step"] # type: ignore[method-assign] - model.test_step = ctx["original_test_step"] # type: ignore[method-assign] - model.predict_step = ctx["original_predict_step"] # type: ignore[method-assign] - model._compiler_ctx = None - - return model + original.forward = ctx["original_forward"] # type: ignore[method-assign] + original.training_step = ctx["original_training_step"] # type: ignore[method-assign] + original.validation_step = ctx["original_validation_step"] # type: ignore[method-assign] + original.test_step = ctx["original_test_step"] # type: ignore[method-assign] + original.predict_step = ctx["original_predict_step"] # type: ignore[method-assign] + original._compiler_ctx = None + + return original def _maybe_unwrap_optimized(model: object) -> "pl.LightningModule": diff --git a/src/lightning/pytorch/utilities/imports.py b/src/lightning/pytorch/utilities/imports.py index 159b0b7758644..d872e271532e0 100644 --- a/src/lightning/pytorch/utilities/imports.py +++ b/src/lightning/pytorch/utilities/imports.py @@ -19,6 +19,7 @@ from lightning_utilities.core.rank_zero import rank_zero_warn _PYTHON_GREATER_EQUAL_3_11_0 = (sys.version_info.major, sys.version_info.minor) >= (3, 11) +_TORCHMETRICS_GREATER_EQUAL_0_8_0 = RequirementCache("torchmetrics>=0.8.0") _TORCHMETRICS_GREATER_EQUAL_0_9_1 = RequirementCache("torchmetrics>=0.9.1") _TORCHMETRICS_GREATER_EQUAL_0_11 = RequirementCache("torchmetrics>=0.11.0") # using new API with task _TORCHMETRICS_GREATER_EQUAL_1_0_0 = RequirementCache("torchmetrics>=1.0.0")