Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Typing][B-50,B-51] Add type annotations for python/paddle/static/{io,input.py} #67047

Merged
merged 6 commits into from
Aug 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 12 additions & 2 deletions python/paddle/static/input.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,13 @@

if TYPE_CHECKING:
from paddle import Tensor
from paddle._typing import DTypeLike, ShapeLike, Size1
from paddle._typing import (
DTypeLike,
ShapeLike,
Size1,
TensorIndex,
TensorLike,
)

__all__ = []

Expand Down Expand Up @@ -419,7 +425,11 @@ def __ne__(self, other) -> bool:
return not self == other


def setitem(x, index, value):
def setitem(
x: Tensor,
index: TensorIndex,
value: TensorLike,
) -> Tensor:
"""
x(Tensor): input Tensor.
index(Scalar|Tuple|List|Tensor): Where should be set value.
Expand Down
161 changes: 130 additions & 31 deletions python/paddle/static/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations

import errno
import inspect
Expand All @@ -19,6 +20,7 @@
import pickle
import sys
import warnings
from typing import TYPE_CHECKING, Any, TypedDict, overload

import numpy as np

Expand Down Expand Up @@ -69,6 +71,37 @@
save_vars_pir,
)

if TYPE_CHECKING:
from collections.abc import Callable, Sequence

import numpy.typing as npt
from typing_extensions import NotRequired, Unpack

from paddle import Tensor

class _NormalizeProgramKwargs(TypedDict):
skip_prune_program: NotRequired[bool]

class _SerializeProgramKwargs(TypedDict):
program: NotRequired[Program]
legacy_format: NotRequired[bool]

class _SerializePersistablesKwargs(TypedDict):
program: NotRequired[Program]

class _SaveInferenceModelKwargs(TypedDict):
program: NotRequired[Program]
clip_extra: NotRequired[bool]
legacy_format: NotRequired[bool]

class _LoadInferenceModelKwargs(TypedDict):
model_filename: NotRequired[str]
params_filename: NotRequired[str]

class _SaveKwargs(TypedDict):
pickle_protocol: NotRequired[int]


__all__ = []

_logger = get_logger(
Expand Down Expand Up @@ -98,8 +131,10 @@ def _clone_var_in_block(block, var):


def prepend_feed_ops(
inference_program, feed_target_names, feed_holder_name='feed'
):
inference_program: Program,
feed_target_names: Sequence[str],
feed_holder_name: str = 'feed',
) -> None:
if len(feed_target_names) == 0:
return

Expand Down Expand Up @@ -127,8 +162,10 @@ def prepend_feed_ops(


def append_fetch_ops(
inference_program, fetch_target_names, fetch_holder_name='fetch'
):
inference_program: Program,
fetch_target_names: Sequence[str],
fetch_holder_name: str = 'fetch',
) -> None:
global_block = inference_program.global_block()
fetch_var = global_block.create_var(
name=fetch_holder_name,
Expand All @@ -145,7 +182,12 @@ def append_fetch_ops(
)


def normalize_program(program, feed_vars, fetch_vars, **kwargs):
def normalize_program(
program: Program,
feed_vars: Tensor | list[Tensor],
fetch_vars: Tensor | list[Tensor],
**kwargs: Unpack[_NormalizeProgramKwargs],
) -> Program:
"""

Normalize/Optimize a program according to feed_vars and fetch_vars.
Expand Down Expand Up @@ -267,7 +309,11 @@ def normalize_program(program, feed_vars, fetch_vars, **kwargs):


@static_only
def serialize_program(feed_vars, fetch_vars, **kwargs):
def serialize_program(
feed_vars: Tensor | list[Tensor],
fetch_vars: Tensor | list[Tensor],
**kwargs: Unpack[_SerializeProgramKwargs],
) -> bytes:
"""

Serialize default main program according to feed_vars and fetch_vars.
Expand Down Expand Up @@ -319,15 +365,20 @@ def serialize_program(feed_vars, fetch_vars, **kwargs):
return _serialize_program(program, legacy_format=legacy_format)


def _serialize_program(program, legacy_format=False):
def _serialize_program(program: Program, legacy_format: bool = False) -> bytes:
"""
serialize given program to bytes.
"""
return program.desc.serialize_to_string(legacy_format=legacy_format)


@static_only
def serialize_persistables(feed_vars, fetch_vars, executor, **kwargs):
def serialize_persistables(
feed_vars: Tensor | list[Tensor],
fetch_vars: Tensor | list[Tensor],
executor: Executor,
**kwargs: Unpack[_SerializePersistablesKwargs],
) -> bytes:
"""

Serialize parameters using given executor and default main program according to feed_vars and fetch_vars.
Expand Down Expand Up @@ -378,7 +429,7 @@ def serialize_persistables(feed_vars, fetch_vars, executor, **kwargs):
return _serialize_persistables(program, executor)


def _serialize_persistables(program, executor):
def _serialize_persistables(program: Program, executor: Executor) -> bytes:
"""
Serialize parameters using given program and executor.
"""
Expand Down Expand Up @@ -425,7 +476,7 @@ def _serialize_persistables(program, executor):
return global_scope().find_var(out_var_name).get_bytes()


def save_to_file(path, content):
def save_to_file(path: str, content: bytes) -> None:
"""
Save content to given path.

Expand Down Expand Up @@ -467,8 +518,12 @@ def save_to_file(path, content):

@static_only
def save_inference_model(
path_prefix, feed_vars, fetch_vars, executor, **kwargs
):
path_prefix: str,
feed_vars: Tensor | list[Tensor],
fetch_vars: Tensor | list[Tensor],
executor: Executor,
**kwargs: Unpack[_SaveInferenceModelKwargs],
) -> None:
"""
Save current model and its parameters to given path. i.e.
Given ``path_prefix = "PATH/modelname"``, after invoking
Expand Down Expand Up @@ -594,7 +649,7 @@ def save_inference_model(


@static_only
def deserialize_program(data):
def deserialize_program(data: bytes) -> Program:
"""

Deserialize given data to a program.
Expand Down Expand Up @@ -641,7 +696,9 @@ def deserialize_program(data):

# NOTE(liuyuanle): Due to load from memory, deserialize_persistables does not support loading weights with file sizes exceeding 2GB.
@static_only
def deserialize_persistables(program, data, executor):
def deserialize_persistables(
program: Program, data: bytes, executor: Executor
) -> Program:
"""

Deserialize given data to parameters according to given program and executor.
Expand Down Expand Up @@ -742,7 +799,7 @@ def deserialize_persistables(program, data, executor):
)


def load_from_file(path):
def load_from_file(path: str) -> bytes:
"""
Load file in binary mode.

Expand Down Expand Up @@ -784,7 +841,11 @@ def load_from_file(path):


@static_only
def load_inference_model(path_prefix, executor, **kwargs):
def load_inference_model(
path_prefix: str | None,
executor: Executor,
**kwargs: Unpack[_LoadInferenceModelKwargs],
) -> list[Program | list[str] | list[Tensor]]:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

原代码写的有点奇怪,一般没有把返回的 tuple 转为 list 的,没什么意义 ... ...

这也导致示例代码中无法正确判断返回值的类型。

ignore 一下示例吧

            >>> results = exe.run(inference_program,  # type: ignore[arg-type]
            ...               feed={feed_target_names[0]: tensor_img},  # type: ignore[index,dict-item]
            ...               fetch_list=fetch_targets)  # type: ignore[arg-type]

"""

Load inference model from a given path. By this API, you can get the model
Expand Down Expand Up @@ -838,9 +899,9 @@ def load_inference_model(path_prefix, executor, **kwargs):
>>> [inference_program, feed_target_names, fetch_targets] = (
... paddle.static.load_inference_model(path_prefix, exe))
>>> tensor_img = np.array(np.random.random((64, 784)), dtype=np.float32) # type: ignore[var-annotated]
>>> results = exe.run(inference_program,
... feed={feed_target_names[0]: tensor_img},
... fetch_list=fetch_targets)
>>> results = exe.run(inference_program, # type: ignore[arg-type]
... feed={feed_target_names[0]: tensor_img}, # type: ignore[index,dict-item]
... fetch_list=fetch_targets) # type: ignore[arg-type]

# In this example, the inference program was saved in file
# "./infer_model.pdmodel" and parameters were saved in file
Expand Down Expand Up @@ -979,6 +1040,30 @@ def load_inference_model(path_prefix, executor, **kwargs):
return [program, feed_target_names, fetch_targets]


@overload
@dygraph_not_support
def save_vars(
executor: Executor,
dirname: None,
main_program: Program | None = ...,
vars: list[Tensor] | None = ...,
predicate: Callable[[Tensor], bool] | None = ...,
filename: None = ...,
) -> bytes: ...


@overload
@dygraph_not_support
def save_vars(
executor: Executor,
dirname: str,
main_program: Program | None = ...,
vars: list[Tensor] | None = ...,
predicate: Callable[[Tensor], bool] | None = ...,
filename: str = ...,
) -> None: ...


@dygraph_not_support
def save_vars(
executor,
Expand Down Expand Up @@ -1145,13 +1230,13 @@ def save_vars(


def load_vars(
executor,
dirname,
main_program=None,
vars=None,
predicate=None,
filename=None,
):
executor: Executor,
dirname: str,
main_program: Program | None = None,
vars: list[Tensor] | None = None,
predicate: Callable[[Tensor], bool] | None = None,
filename: str | None = None,
) -> None:
"""
:api_attr: Static Graph

Expand Down Expand Up @@ -1403,7 +1488,12 @@ def load_vars(


@static_only
def save(program, model_path, protocol=4, **configs):
def save(
program: Program,
model_path: str,
protocol: int = 4,
**configs: Unpack[_SaveKwargs],
) -> None:
"""

This function save parameters, optimizer information and network description to model_path.
Expand Down Expand Up @@ -1504,7 +1594,12 @@ def get_tensor(var):


@static_only
def load(program, model_path, executor=None, var_list=None):
def load(
program: Program,
model_path: str,
executor: Executor | None = None,
var_list: Sequence[Tensor] | None = None,
) -> None:
"""
:api_attr: Static Graph

Expand Down Expand Up @@ -1721,7 +1816,9 @@ def set_var(var, ndarray):


@static_only
def set_program_state(program, state_dict):
def set_program_state(
program: Program, state_dict: dict[str, npt.NDArray[Any]]
) -> None:
"""
Set program parameter from state_dict

Expand Down Expand Up @@ -1818,7 +1915,7 @@ def set_program_state(program, state_dict):


@dygraph_not_support
def get_program_persistable_vars(program):
def get_program_persistable_vars(program: Program) -> list[Tensor]:
"""
Get all the persistable vars from Program.
Args:
Expand All @@ -1839,7 +1936,9 @@ def get_program_persistable_vars(program):
return list(filter(is_persistable, program.list_vars()))


def load_program_state(model_path, var_list=None):
def load_program_state(
model_path: str, var_list: Sequence[Tensor] | None = None
) -> dict[str, npt.NDArray[Any]]:
"""

Load program state from local file
Expand Down