Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Typing][C-121,C-122,C-123,C-124] Add type annotations for python/paddle/quantization/{base_observer,factory,ptq,qat}.py #66693

Merged
merged 3 commits into from
Jul 31, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions python/paddle/quantization/base_observer.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations

import abc

Expand All @@ -24,9 +25,9 @@ class BaseObserver(BaseQuanter, metaclass=abc.ABCMeta):
and implement abstract methods.
"""

def __init__(self):
def __init__(self) -> None:
super().__init__()

@abc.abstractmethod
def cal_thresholds(self):
def cal_thresholds(self) -> None:
pass
19 changes: 12 additions & 7 deletions python/paddle/quantization/factory.py
enkilee marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -11,19 +11,21 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

import abc
import inspect
from functools import partial
from typing import TYPE_CHECKING, Any, Callable

from paddle.nn import Layer
if TYPE_CHECKING:
from paddle.nn import Layer

from .base_quanter import BaseQuanter
from .base_quanter import BaseQuanter


class ClassWithArguments(metaclass=abc.ABCMeta):
def __init__(self, *args, **kwargs):
def __init__(self, *args: Any, **kwargs: Any) -> None:
self._args = args
self._kwargs = kwargs

Expand Down Expand Up @@ -55,7 +57,7 @@ class QuanterFactory(ClassWithArguments):
the arguments used to create quanter instance.
"""

def __init__(self, *args, **kwargs):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.partial_class = None

Expand All @@ -73,7 +75,9 @@ def _instance(self, layer: Layer) -> BaseQuanter:
ObserverFactory = QuanterFactory


def quanter(class_name):
def quanter(
class_name: str,
) -> Callable[[type[BaseQuanter]], type[BaseQuanter]]:
r"""
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

2024-07-29 18:07:11 ----------------Check results--------------------
2024-07-29 18:07:11 paddle.quantization.quanter:1
2024-07-29 18:07:11 <string>:10:11: error: Name "CustomizedQuanter" is not defined  [name-defined]
2024-07-29 18:07:11 <string>:11:35: error: Argument "activation" to "QuantConfig" has incompatible type "Callable[[str], BaseQuanter]"; expected "QuanterFactory"  [arg-type]
2024-07-29 18:07:11 <string>:11:51: error: Argument "weight" to "QuantConfig" has incompatible type "Callable[[str], BaseQuanter]"; expected "QuanterFactory"  [arg-type]
2024-07-29 18:07:11 Found 3 errors in 1 file (checked 1 source file)
2024-07-29 18:07:11 
2024-07-29 18:07:11 
2024-07-29 18:07:11 >>> Mistakes found in type checking!
2024-07-29 18:07:11 >>> Please recheck the type annotations. Run `tools/type_checking.py` to check the typing issues:
2024-07-29 18:07:11 > python tools/type_checking.py paddle.quantization.quanter
2024-07-29 18:07:11 ----------------End of the Check--------------------

这示例代码看样子本来就不能跑,直接在最前面加个 # type: ignore 吧,在 # doctest: +SKIP('need 2 file to run example') 下一行单独写一行

Copy link
Contributor Author

@enkilee enkilee Jul 31, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

收到,我看其他都是加在 # doctest: +SKIP('xxxxxxx') 上面来着?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

# type: ignore# doctest: +SKIP('xxxxxxx') 互不影响,前者是应对 type checking ,后者是应对 example code running check ~ 注意一下,针对整段代码,都需要独占一行 ~

Annotation to declare a factory class for quanter.

Expand All @@ -83,6 +87,7 @@ def quanter(class_name):
Examples:
.. code-block:: python

>>> # type: ignore
>>> # doctest: +SKIP('need 2 file to run example')
>>> # Given codes in ./customized_quanter.py
>>> from paddle.quantization import quanter
Expand All @@ -102,7 +107,7 @@ def quanter(class_name):

"""

def wrapper(target_class):
def wrapper(target_class: type[BaseQuanter]) -> type[BaseQuanter]:
init_function_str = f"""
def init_function(self, *args, **kwargs):
super(type(self), self).__init__(*args, **kwargs)
Expand Down
13 changes: 9 additions & 4 deletions python/paddle/quantization/ptq.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,22 +11,27 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations

import copy
from typing import TYPE_CHECKING

from paddle.distributed import fleet
from paddle.nn import Layer

from .config import QuantConfig
from .quantize import Quantization

if TYPE_CHECKING:
from paddle.nn import Layer

from .config import QuantConfig


class PTQ(Quantization):
"""
Applying post training quantization to the model.
"""

def __init__(self, config: QuantConfig):
def __init__(self, config: QuantConfig) -> None:
super().__init__(config)

def _is_parallel_training(self):
Expand All @@ -38,7 +43,7 @@ def _is_parallel_training(self):
except Exception: # fleet is not initialized
return False

def quantize(self, model: Layer, inplace=False):
def quantize(self, model: Layer, inplace: bool = False) -> Layer:
r"""
Create a model for post-training quantization.

Expand Down
14 changes: 9 additions & 5 deletions python/paddle/quantization/qat.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,18 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations

import copy
from typing import TYPE_CHECKING

from paddle.nn import Layer

from .config import QuantConfig
from .quantize import Quantization

if TYPE_CHECKING:
from paddle.nn import Layer

from .config import QuantConfig


class QAT(Quantization):
r"""
Expand All @@ -36,10 +40,10 @@ class QAT(Quantization):
>>> qat = QAT(q_config)
"""

def __init__(self, config: QuantConfig):
def __init__(self, config: QuantConfig) -> None:
super().__init__(config)

def quantize(self, model: Layer, inplace=False):
def quantize(self, model: Layer, inplace: bool = False) -> Layer:
r"""
Create a model for quantization-aware training.

Expand Down