Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/develop' into dl/fix/nncf_grpah_…
Browse files Browse the repository at this point in the history
…parallel_edges
  • Loading branch information
daniil-lyakhov committed Sep 5, 2023
2 parents 1c926d8 + f6d958f commit d950c52
Show file tree
Hide file tree
Showing 38 changed files with 1,132 additions and 420 deletions.
14 changes: 11 additions & 3 deletions nncf/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
"""
Neural Network Compression Framework (NNCF) for enhanced OpenVINO™ inference.
"""

from nncf.common.logging import nncf_logger
from nncf.common.logging.logger import disable_logging
from nncf.common.logging.logger import set_log_level
Expand All @@ -32,21 +33,28 @@


from importlib.util import find_spec as _find_spec # pylint:disable=wrong-import-position
from pathlib import Path as _Path # pylint:disable=wrong-import-position

_AVAILABLE_FRAMEWORKS = {}

for fw_name in _SUPPORTED_FRAMEWORKS:
spec = _find_spec(fw_name)
# if the framework is not present, spec may still be not None because it found our nncf.*backend_name* subpackage
framework_present = spec is not None and spec.origin is not None and "nncf" not in spec.origin
framework_present = False
if spec is not None and spec.origin is not None:
origin_path = _Path(spec.origin)
here = _Path(__file__)
if origin_path not in here.parents:
# if the framework is not present, spec may still be not None because
# it found our nncf.*backend_name* subpackage, and spec.origin will point to a folder in NNCF code
framework_present = True
_AVAILABLE_FRAMEWORKS[fw_name] = framework_present

if not any(_AVAILABLE_FRAMEWORKS.values()):
nncf_logger.error(
"Neither PyTorch, TensorFlow, ONNX or OpenVINO Python packages have been found in your Python "
"environment.\n"
"Please install one of the supported frameworks above in order to use NNCF on top of it.\n"
"See the installation guide at https://github.com/openvinotoolkit/nncf#installation for help."
"See the installation guide at https://github.com/openvinotoolkit/nncf#installation-guide for help."
)
else:
nncf_logger.info(
Expand Down
122 changes: 122 additions & 0 deletions nncf/onnx/graph/metatypes/groups.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from nncf.onnx.graph.metatypes import onnx_metatypes

QUANTIZE_AGNOSTIC_OPERATIONS = [
onnx_metatypes.ONNXMaxPoolMetatype,
onnx_metatypes.ONNXReduceMaxMetatype,
onnx_metatypes.ONNXReshapeMetatype,
onnx_metatypes.ONNXTransposeMetatype,
onnx_metatypes.ONNXSqueezeMetatype,
onnx_metatypes.ONNXUnsqueezeMetatype,
onnx_metatypes.ONNXSplitMetatype,
onnx_metatypes.ONNXTileMetatype,
onnx_metatypes.ONNXCenterCropPadMetatype,
onnx_metatypes.ONNXSliceMetatype,
onnx_metatypes.ONNXPadMetatype,
onnx_metatypes.ONNXGatherMetatype,
onnx_metatypes.ONNXGatherNDMetatype,
onnx_metatypes.ONNXGatherElementsMetatype,
onnx_metatypes.ONNXDepthToSpaceMetatype,
onnx_metatypes.ONNXSpaceToDepthMetatype,
onnx_metatypes.ONNXScatterElementsMetatype,
onnx_metatypes.ONNXScatterNDMetatype,
onnx_metatypes.ONNXScatterMetatype,
onnx_metatypes.ONNXCastLikeMetatype,
onnx_metatypes.ONNXDropoutMetatype,
onnx_metatypes.ONNXFlattenMetatype,
onnx_metatypes.ONNXExpandMetatype,
onnx_metatypes.ONNXIdentityMetatype,
# ONNXReluMetatype is not considered to be QUANTIZATION_AGNOSTIC, because:
# 1. Runtime doesn't provide performance benefits by quantizing the stand-alone RELU's (ticket: 59548)
# 2. It's frequently better for the end accuracy to have quantizers set up after the RELU
# so that the input distribution to the quantizer is non-negative
# and we can therefore have better quantization resolution while preserving the original dynamic range
]


MATMUL_METATYPES = [onnx_metatypes.ONNXGemmMetatype, onnx_metatypes.ONNXMatMulMetatype]


INPUTS_QUANTIZABLE_OPERATIONS = [
onnx_metatypes.ONNXConvolutionMetatype,
onnx_metatypes.ONNXDepthwiseConvolutionMetatype,
onnx_metatypes.ONNXConvolutionTransposeMetatype,
*MATMUL_METATYPES,
onnx_metatypes.ONNXAveragePoolMetatype,
onnx_metatypes.ONNXGlobalAveragePoolMetatype,
onnx_metatypes.ONNXAddLayerMetatype,
onnx_metatypes.ONNXSubMetatype,
onnx_metatypes.ONNXMulLayerMetatype,
onnx_metatypes.ONNXBatchNormMetatype,
onnx_metatypes.ONNXHardSigmoidMetatype,
onnx_metatypes.ONNXResizeMetatype,
onnx_metatypes.ONNXPowMetatype,
onnx_metatypes.ONNXReciprocalMetatype,
onnx_metatypes.ONNXMaximumMetatype,
onnx_metatypes.ONNXMinimumMetatype,
]


CONSTANT_WEIGHT_LAYER_METATYPES = [
onnx_metatypes.ONNXConvolutionMetatype,
onnx_metatypes.ONNXDepthwiseConvolutionMetatype,
onnx_metatypes.ONNXConvolutionTransposeMetatype,
onnx_metatypes.ONNXEmbeddingMetatype,
]


LINEAR_OPERATIONS = [
onnx_metatypes.ONNXConvolutionMetatype,
onnx_metatypes.ONNXDepthwiseConvolutionMetatype,
onnx_metatypes.ONNXConvolutionTransposeMetatype,
onnx_metatypes.ONNXDeformableConvolutionMetatype,
*MATMUL_METATYPES,
]


ATOMIC_ACTIVATIONS_OPERATIONS = [
onnx_metatypes.ONNXReluMetatype,
onnx_metatypes.ONNXLeakyReluMetatype,
onnx_metatypes.ONNXThresholdedReluMetatype,
onnx_metatypes.ONNXEluMetatype,
onnx_metatypes.ONNXPReluMetatype,
onnx_metatypes.ONNXSigmoidMetatype,
onnx_metatypes.ONNXHardSigmoidMetatype,
onnx_metatypes.ONNXHardSwishMetatype,
]


ARITHMETIC_OPERATIONS = [
onnx_metatypes.ONNXAddLayerMetatype,
onnx_metatypes.ONNXSubMetatype,
onnx_metatypes.ONNXMulLayerMetatype,
onnx_metatypes.ONNXDivLayerMetatype,
]


OPERATIONS_WITH_WEIGHTS = [
*CONSTANT_WEIGHT_LAYER_METATYPES,
*MATMUL_METATYPES,
]


BATCH_NORMALIZATION_OPERATIONS = [
onnx_metatypes.ONNXBatchNormMetatype,
]


# Contains the operation metatypes for which bias can be applied.
OPERATIONS_WITH_BIAS = [
onnx_metatypes.ONNXConvolutionMetatype,
onnx_metatypes.ONNXDepthwiseConvolutionMetatype,
]
55 changes: 0 additions & 55 deletions nncf/onnx/graph/metatypes/onnx_metatypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -610,24 +610,6 @@ class ONNXDeformableConvolutionMetatype(ONNXOpMetatype):
op_names = ["DeformConv"]


CONSTANT_WEIGHT_LAYER_METATYPES = [
ONNXConvolutionMetatype,
ONNXDepthwiseConvolutionMetatype,
ONNXConvolutionTransposeMetatype,
ONNXEmbeddingMetatype,
]

MATMUL_METATYPES = [ONNXGemmMetatype, ONNXMatMulMetatype]

GENERAL_WEIGHT_LAYER_METATYPES = CONSTANT_WEIGHT_LAYER_METATYPES + MATMUL_METATYPES

# Contains the operation metatypes for which bias can be applied.
OPERATIONS_WITH_BIAS_METATYPES = [
ONNXConvolutionMetatype,
ONNXDepthwiseConvolutionMetatype,
]


def get_operator_metatypes() -> List[Type[OperatorMetatype]]:
"""
Returns a list of the operator metatypes.
Expand All @@ -653,43 +635,6 @@ def get_metatype(model: onnx.ModelProto, node: onnx.NodeProto) -> ONNXOpMetatype
return metatype


def get_constant_weight_port_ids(metatype: ONNXOpMetatype) -> List[int]:
"""
Returns port ids on which metatype must have a weight based on Operation definition.
:param metatype: Metatype.
:return: Port ids.
"""
if metatype in CONSTANT_WEIGHT_LAYER_METATYPES:
return metatype.weight_port_ids
return []


def get_possible_weight_port_ids(metatype: ONNXOpMetatype) -> List[int]:
"""
Returns weight port ids on which metatype could have a weight.
Example: ONNXMatMulMetatype could have activations or weights on input port ids: 0, 1
:param metatype: Metatype.
:return: Port ids.
"""
if metatype in MATMUL_METATYPES:
return metatype.possible_weight_ports
return []


def get_bias_tensor_port_id(metatype: ONNXOpWithWeightsMetatype) -> Optional[int]:
"""
Returns input port id, where a bias tensor should output.
:param node: Node, for which input port id is returned,
:return: Input port id, where a weight bias should output or None if node can not have bias.
"""
if metatype in OPERATIONS_WITH_BIAS_METATYPES:
return metatype.bias_port_id
return None


def get_tensor_edge_name(onnx_graph: ONNXGraph, node: onnx.NodeProto, port_id: int) -> Optional[str]:
"""
Returns an edge name associated with a weight of a node laying on an input port_id.
Expand Down
47 changes: 43 additions & 4 deletions nncf/onnx/graph/nncf_graph_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from typing import Any, Dict, Optional, Set
from typing import Any, Dict, List, Optional, Set

import onnx

Expand All @@ -21,11 +21,13 @@
from nncf.common.graph.layer_attributes import Dtype
from nncf.common.graph.operator_metatypes import InputNoopMetatype
from nncf.common.graph.operator_metatypes import OutputNoopMetatype
from nncf.onnx.graph.metatypes.groups import CONSTANT_WEIGHT_LAYER_METATYPES
from nncf.onnx.graph.metatypes.groups import MATMUL_METATYPES
from nncf.onnx.graph.metatypes.groups import OPERATIONS_WITH_BIAS
from nncf.onnx.graph.metatypes.onnx_metatypes import ONNXGemmMetatype
from nncf.onnx.graph.metatypes.onnx_metatypes import get_bias_tensor_port_id
from nncf.onnx.graph.metatypes.onnx_metatypes import get_constant_weight_port_ids
from nncf.onnx.graph.metatypes.onnx_metatypes import ONNXOpMetatype
from nncf.onnx.graph.metatypes.onnx_metatypes import ONNXOpWithWeightsMetatype
from nncf.onnx.graph.metatypes.onnx_metatypes import get_metatype
from nncf.onnx.graph.metatypes.onnx_metatypes import get_possible_weight_port_ids
from nncf.onnx.graph.metatypes.onnx_metatypes import get_tensor_edge_name
from nncf.onnx.graph.onnx_graph import ONNXGraph

Expand Down Expand Up @@ -64,6 +66,43 @@ def has_node_attrs(self) -> bool:
return bool(self.node_attrs)


def get_constant_weight_port_ids(metatype: ONNXOpMetatype) -> List[int]:
"""
Returns port ids on which metatype must have a weight based on Operation definition.
:param metatype: Metatype.
:return: Port ids.
"""
if metatype in CONSTANT_WEIGHT_LAYER_METATYPES:
return metatype.weight_port_ids
return []


def get_possible_weight_port_ids(metatype: ONNXOpMetatype) -> List[int]:
"""
Returns weight port ids on which metatype could have a weight.
Example: ONNXMatMulMetatype could have activations or weights on input port ids: 0, 1
:param metatype: Metatype.
:return: Port ids.
"""
if metatype in MATMUL_METATYPES:
return metatype.possible_weight_ports
return []


def get_bias_tensor_port_id(metatype: ONNXOpWithWeightsMetatype) -> Optional[int]:
"""
Returns input port id, where a bias tensor should output.
:param node: Node, for which input port id is returned,
:return: Input port id, where a weight bias should output or None if node can not have bias.
"""
if metatype in OPERATIONS_WITH_BIAS:
return metatype.bias_port_id
return None


def _get_weight_port_ids(node: onnx.NodeProto, onnx_graph: ONNXGraph) -> Set[int]:
"""
Returns all weight input ports.
Expand Down
20 changes: 12 additions & 8 deletions nncf/onnx/hardware/fused_patterns.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@
from nncf.common.graph.patterns import HWFusedPatternNames
from nncf.common.utils.registry import Registry
from nncf.onnx.graph.metatypes import onnx_metatypes as om
from nncf.onnx.hardware.pattern_operations import ARITHMETIC_OPERATIONS
from nncf.onnx.hardware.pattern_operations import ATOMIC_ACTIVATIONS_OPERATIONS
from nncf.onnx.hardware.pattern_operations import BATCH_NORMALIZATION_OPERATIONS
from nncf.onnx.hardware.pattern_operations import LINEAR_OPERATIONS
from nncf.onnx.graph.metatypes.groups import ARITHMETIC_OPERATIONS
from nncf.onnx.graph.metatypes.groups import ATOMIC_ACTIVATIONS_OPERATIONS
from nncf.onnx.graph.metatypes.groups import BATCH_NORMALIZATION_OPERATIONS
from nncf.onnx.graph.metatypes.groups import LINEAR_OPERATIONS

ONNX_HW_FUSED_PATTERNS = Registry("onnx")

Expand Down Expand Up @@ -383,19 +383,23 @@ def create_linear_scale_shift() -> GraphPattern:

def linear_operations() -> GraphPattern:
pattern = GraphPattern()
pattern.add_node(**LINEAR_OPERATIONS)
pattern.add_node(**{GraphPattern.METATYPE_ATTR: LINEAR_OPERATIONS, GraphPattern.LABEL_ATTR: "LINEAR"})
return pattern


def batch_normalization_operations() -> GraphPattern:
pattern = GraphPattern()
pattern.add_node(**BATCH_NORMALIZATION_OPERATIONS)
pattern.add_node(
**{GraphPattern.METATYPE_ATTR: BATCH_NORMALIZATION_OPERATIONS, GraphPattern.LABEL_ATTR: "BATCH_NORMALIZATION"}
)
return pattern


def atomic_activations_operations() -> GraphPattern:
pattern = GraphPattern()
pattern.add_node(**ATOMIC_ACTIVATIONS_OPERATIONS)
pattern.add_node(
**{GraphPattern.METATYPE_ATTR: ATOMIC_ACTIVATIONS_OPERATIONS, GraphPattern.LABEL_ATTR: "ATOMIC_ACTIVATIONS"}
)

swish_sigmoid = create_swish_with_sigmoid()
pattern.add_pattern_alternative(swish_sigmoid)
Expand All @@ -413,7 +417,7 @@ def atomic_activations_operations() -> GraphPattern:

def arithmetic_operations() -> GraphPattern:
pattern = GraphPattern()
pattern.add_node(**ARITHMETIC_OPERATIONS)
pattern.add_node(**{GraphPattern.METATYPE_ATTR: ARITHMETIC_OPERATIONS, GraphPattern.LABEL_ATTR: "ARITHMETIC"})
return pattern


Expand Down
Loading

0 comments on commit d950c52

Please sign in to comment.