Skip to content

Commit

Permalink
Comments
Browse files Browse the repository at this point in the history
  • Loading branch information
daniil-lyakhov committed Jun 27, 2024
1 parent 8205d9c commit 2d5a02b
Show file tree
Hide file tree
Showing 15 changed files with 23 additions and 60 deletions.
44 changes: 0 additions & 44 deletions .github/workflows/precommit.yml
Original file line number Diff line number Diff line change
Expand Up @@ -95,50 +95,6 @@ jobs:
token: ${{ secrets.CODECOV_TOKEN }}
name: coverage_openvino
flags: OPENVINO
torchFX:
timeout-minutes: 40
defaults:
run:
shell: bash
runs-on: ubuntu-20.04-8-cores
env:
DEBIAN_FRONTEND: noninteractive
steps:
- name: Install dependencies
run : |
sudo apt-get update
sudo apt-get --assume-yes install gcc g++ build-essential ninja-build libgl1-mesa-dev libglib2.0-0
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
lfs: true
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
with:
python-version: 3.8.18
cache: pip
- name: Runner info
continue-on-error: true
run: |
cat /etc/*release
cat /proc/cpuinfo
- name: Install NNCF and test requirements
run: make install-torch-fx-test
- name: Run TorchFX precommit test scope
run: |
make test-torch-fx
env:
NNCF_COVERAGE: 1
NUM_WORKERS: 4
- name: Upload coverage report as artifact
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: coverage_fx_cpu
path: ./coverage.xml
- name: Upload coverage report to codecov
uses: codecov/codecov-action@125fc84a9a348dbcf27191600683ec096ec9021c # v4.4.1
with:
token: ${{ secrets.CODECOV_TOKEN }}
name: coverage_fx_cpu
flags: TORCH
pytorch-cpu:
timeout-minutes: 40
defaults:
Expand Down
1 change: 1 addition & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,7 @@ test-torch-cuda:

test-torch-nightly:
pytest ${COVERAGE_ARGS} tests/torch -m nightly --junitxml ${JUNITXML_PATH} $(DATA_ARG)
test-torch-fx

test-torch-weekly:
pytest ${COVERAGE_ARGS} tests/torch -m weekly \
Expand Down
8 changes: 4 additions & 4 deletions nncf/common/factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def create(model: TModel) -> NNCFGraph:

return GraphConverter.create_nncf_graph(model)
if model_backend == BackendType.TORCH_FX:
from nncf.experimental.torch_fx.nncf_graph_builder import GraphConverter
from nncf.experimental.torch.fx.nncf_graph_builder import GraphConverter

return GraphConverter.create_nncf_graph(model)
if model_backend == BackendType.TORCH:
Expand Down Expand Up @@ -77,7 +77,7 @@ def create(model: TModel, inplace: bool = False) -> ModelTransformer:

return PTModelTransformer(model)
if model_backend == BackendType.TORCH_FX:
from nncf.experimental.torch_fx.model_transformer import FXModelTransformer
from nncf.experimental.torch.fx.model_transformer import FXModelTransformer

return FXModelTransformer(model)
raise nncf.UnsupportedBackendError(
Expand Down Expand Up @@ -108,7 +108,7 @@ def create(model: TModel) -> Engine:

return PTEngine(model)
if model_backend == BackendType.TORCH_FX:
from nncf.experimental.torch_fx.engine import FXEngine
from nncf.experimental.torch.fx.engine import FXEngine

return FXEngine(model)
raise nncf.UnsupportedBackendError(
Expand Down Expand Up @@ -164,7 +164,7 @@ def create(model: TModel, dataset: Dataset) -> aggregator.StatisticsAggregator:

return PTStatisticsAggregator(dataset)
if model_backend == BackendType.TORCH_FX:
from nncf.experimental.torch_fx.statistics.aggregator import FXStatisticsAggregator
from nncf.experimental.torch.fx.statistics.aggregator import FXStatisticsAggregator

return FXStatisticsAggregator(dataset)
raise nncf.UnsupportedBackendError(
Expand Down
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@
from nncf.common.graph.layer_attributes import Dtype
from nncf.common.graph.operator_metatypes import UnknownMetatype
from nncf.common.logging import nncf_logger
from nncf.experimental.torch_fx.transformations import separate_conv_and_bias
from nncf.experimental.torch_fx.transformations import separate_linear_and_bias
from nncf.experimental.torch_fx.transformations import view_to_reshape
from nncf.experimental.torch.fx.transformations import separate_conv_and_bias
from nncf.experimental.torch.fx.transformations import separate_linear_and_bias
from nncf.experimental.torch.fx.transformations import view_to_reshape
from nncf.torch.graph.graph import PTNNCFGraph
from nncf.torch.graph.operator_metatypes import PT_OPERATOR_METATYPES

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,11 @@

import nncf
from nncf.common.factory import NNCFGraphFactory
from nncf.common.logging import nncf_logger
from nncf.common.quantization.structs import QuantizationPreset
from nncf.common.quantization.structs import QuantizationScheme
from nncf.data import Dataset
from nncf.experimental.torch_fx.transformations import merge_conv_and_bias
from nncf.experimental.torch.fx.transformations import merge_conv_and_bias
from nncf.parameters import ModelType
from nncf.parameters import QuantizationMode
from nncf.parameters import TargetDevice
Expand All @@ -53,6 +54,11 @@ def quantize_impl(
"""
Implementation of the `quantize()` method for the Torch FX backend.
"""
nncf_logger.warning(
"Experimental Torch FX quantization backend is being used for the given torch.fx.GraphModule model."
" Torch FX PTQ is an experimental feature, consider using Torch or OpenVino PTQ backends"
" in case of errors or a poor model performance."
)
if fast_bias_correction is False:
raise ValueError(f"fast_bias_correction={fast_bias_correction} is not supported")
if target_device == TargetDevice.CPU_SPR:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from nncf.common.tensor_statistics.aggregator import StatisticPointsContainer
from nncf.common.tensor_statistics.aggregator import StatisticsAggregator
from nncf.experimental.common.tensor_statistics.collectors import TensorCollector
from nncf.experimental.torch_fx.model_transformer import FXModuleInsertionCommand
from nncf.experimental.torch.fx.model_transformer import FXModuleInsertionCommand
from nncf.tensor import Tensor
from nncf.torch.nncf_network import NNCFNetwork
from nncf.torch.return_types import maybe_get_values_from_torch_return_type
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

from nncf.common.graph.graph import NNCFNode
from nncf.common.graph.transformations.commands import TargetType
from nncf.experimental.torch_fx.model_transformer import FXModelTransformer
from nncf.experimental.torch.fx.model_transformer import FXModelTransformer
from nncf.torch.graph.transformations.commands import PTTargetPoint


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@
from nncf.common.graph.definitions import NNCFGraphNodeType
from nncf.common.graph.transformations.commands import TargetType
from nncf.experimental.common.tensor_statistics.collectors import TensorCollector
from nncf.experimental.torch_fx.model_transformer import FXApplyTransformationCommand
from nncf.experimental.torch_fx.transformations import bias_update_transformation_builder
from nncf.experimental.torch.fx.model_transformer import FXApplyTransformationCommand
from nncf.experimental.torch.fx.transformations import bias_update_transformation_builder
from nncf.quantization.algorithms.fast_bias_correction.backend import FastBiasCorrectionAlgoBackend
from nncf.tensor import Tensor
from nncf.torch.graph.transformations.commands import PTModelExtractionCommand
Expand Down Expand Up @@ -83,7 +83,7 @@ def create_input_data(shape: Tuple[int], data: List[Tensor], input_name: str, ch
@staticmethod
def get_bias_value(node: NNCFNode, nncf_graph: NNCFGraph, model: torch.fx.GraphModule) -> Tensor:
# TODO: make a node_name_vs_node map to speed up the process
from nncf.experimental.torch_fx.model_transformer import FXModelTransformer
from nncf.experimental.torch.fx.model_transformer import FXModelTransformer

bias_node = nncf_graph.get_next_nodes(node)[0]
graph_bias_node = FXModelTransformer.get_graph_node_by_name(model.graph, bias_node.node_name)
Expand Down
4 changes: 2 additions & 2 deletions nncf/quantization/algorithms/min_max/torch_fx_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@
from nncf.experimental.common.tensor_statistics.collectors import AGGREGATORS_MAP
from nncf.experimental.common.tensor_statistics.collectors import TensorCollector
from nncf.experimental.common.tensor_statistics.statistics import MinMaxTensorStatistic
from nncf.experimental.torch_fx.model_transformer import FXApplyTransformationCommand
from nncf.experimental.torch_fx.transformations import qdq_insertion_tranformation_builder
from nncf.experimental.torch.fx.model_transformer import FXApplyTransformationCommand
from nncf.experimental.torch.fx.transformations import qdq_insertion_tranformation_builder
from nncf.parameters import ModelType
from nncf.parameters import TargetDevice
from nncf.quantization.advanced_parameters import StatisticsType
Expand Down
2 changes: 1 addition & 1 deletion nncf/quantization/quantize_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ def quantize(
advanced_parameters=advanced_parameters,
)
if backend == BackendType.TORCH_FX:
from nncf.experimental.torch_fx.quantization.quantize_model import quantize_impl
from nncf.experimental.torch.fx.quantization.quantize_model import quantize_impl

return quantize_impl(
model=model,
Expand Down

0 comments on commit 2d5a02b

Please sign in to comment.