Skip to content

Commit

Permalink
[MO] Initial support of nGraph Frontend API (#6001)
Browse files Browse the repository at this point in the history
* [NG Frontend] Introduce classes and macros for Frontend-specific exceptions

* [nGraph Frontend] Python bindings

Exposing Python API for nGraph FrontEndManager introduced in PR #5470

* Exposing 'Dimension' class to Python nGraph API

* Model optimizer code from pdpd_frontend

Removed 'use_legacy_frontend' option as not needed for PDPD integration

* Added XML/BIN prints for 'ngraph-FE' serialization path

* Update python bindings to latest code from PR #5995

* Add OV_FRONTEND_PATH to LD_LIBRARY_PATH by default for Model Optimizer

* Test with 'mock' frontend

* Added meta info to generated IR

Added more tests to cover currently supported scenarios
Clang config for MO mock frontend

* some flake8 and pylint fixes

* Try to 'import' ngraph-related packages on top of python files

This will help to specify type of function arguments, but can break unit tests if they're using functions directly without ngraph

* CI fixes

* Run tests from 'main_test.py' inside ngraph environment

* Fixed review comments

* Fixed comments
Added more tests for checking if inputs and outputs are same

* Renamed front_ng to moc_frontend

* Update package_BOM.txt
  • Loading branch information
nosovmik authored Jun 24, 2021
1 parent 4ed0cdb commit d49405a
Show file tree
Hide file tree
Showing 28 changed files with 1,247 additions and 42 deletions.
7 changes: 7 additions & 0 deletions model-optimizer/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,13 @@ elseif(NOT ENABLE_PYTHON)
message(WARNING "Please enable IE Python API (ie_api and offline_transformations_api) targets to enable Model Optimizer target")
else()
add_custom_target(model_optimizer DEPENDS ie_api offline_transformations_api inference_engine_ir_reader)
if(ENABLE_TESTS)
add_subdirectory(unit_tests/mock_mo_frontend/mock_mo_ngraph_frontend)
add_dependencies(model_optimizer mock_mo_ngraph_frontend)

add_subdirectory(unit_tests/mock_mo_frontend/mock_mo_python_api)
add_dependencies(model_optimizer mock_mo_python_api)
endif()
endif()

# install
Expand Down
5 changes: 5 additions & 0 deletions model-optimizer/automation/package_BOM.txt
Original file line number Diff line number Diff line change
Expand Up @@ -947,6 +947,7 @@ mo/main_caffe.py
mo/main_kaldi.py
mo/main_mxnet.py
mo/main_onnx.py
mo/main_pdpd.py
mo/main_tf.py
mo/middle/__init__.py
mo/middle/passes/__init__.py
Expand All @@ -966,6 +967,10 @@ mo/middle/passes/infer.py
mo/middle/passes/tensor_names.py
mo/middle/pattern_match.py
mo/middle/replacement.py
mo/moc_frontend/__init__.py
mo/moc_frontend/extractor.py
mo/moc_frontend/pipeline.py
mo/moc_frontend/serialize.py
mo/ops/__init__.py
mo/ops/activation.py
mo/ops/assign.py
Expand Down
50 changes: 41 additions & 9 deletions model-optimizer/mo/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@

from extensions.back.SpecialNodesFinalization import RemoveConstOps, CreateConstNodesReplacement, NormalizeTI
from mo.back.ie_ir_ver_2.emitter import append_ir_info
from mo.moc_frontend.pipeline import moc_pipeline
from mo.moc_frontend.serialize import moc_emit_ir
from mo.graph.graph import Graph
from mo.middle.pattern_match import for_graph_and_each_sub_graph_recursively
from mo.pipeline.common import prepare_emit_ir, get_ir_version
Expand All @@ -41,6 +43,9 @@
from mo.utils.version import get_version, get_simplified_mo_version, get_simplified_ie_version
from mo.utils.versions_checker import check_requirements # pylint: disable=no-name-in-module

# pylint: disable=no-name-in-module,import-error
from ngraph.frontend import FrontEndManager


def replace_ext(name: str, old: str, new: str):
base, ext = os.path.splitext(name)
Expand Down Expand Up @@ -94,9 +99,17 @@ def print_argv(argv: argparse.Namespace, is_caffe: bool, is_tf: bool, is_mxnet:
def prepare_ir(argv: argparse.Namespace):
is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx = deduce_framework_by_namespace(argv)

fem = argv.feManager
new_front_ends = []
if fem is not None: # in future, check of 'use_legacy_frontend' in argv can be added here
new_front_ends = fem.get_available_front_ends()

if not any([is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx]):
raise Error('Framework {} is not a valid target. Please use --framework with one from the list: caffe, tf, '
'mxnet, kaldi, onnx. ' + refer_to_faq_msg(15), argv.framework)
frameworks = ['tf', 'caffe', 'mxnet', 'kaldi', 'onnx']
frameworks = list(set(frameworks + new_front_ends))
if argv.framework not in frameworks:
raise Error('Framework {} is not a valid target. Please use --framework with one from the list: {}. ' +
refer_to_faq_msg(15), argv.framework, frameworks)

if is_tf and not argv.input_model and not argv.saved_model_dir and not argv.input_meta_graph:
raise Error('Path to input model or saved model dir is required: use --input_model, --saved_model_dir or '
Expand Down Expand Up @@ -160,7 +173,9 @@ def raise_ie_not_found():
if argv.legacy_ir_generation and len(argv.transform) != 0:
raise Error("--legacy_ir_generation and --transform keys can not be used at the same time.")

ret_code = check_requirements(framework=argv.framework)
use_legacy_fe = argv.framework not in new_front_ends
# For C++ frontends there is no specific python installation requirements, thus check only generic ones
ret_code = check_requirements(framework=argv.framework if use_legacy_fe else None)
if ret_code:
raise Error('check_requirements exit with return code {}'.format(ret_code))

Expand Down Expand Up @@ -243,19 +258,30 @@ def raise_ie_not_found():
send_framework_info('kaldi')
from mo.front.kaldi.register_custom_ops import get_front_classes
import_extensions.load_dirs(argv.framework, extensions, get_front_classes)
elif is_onnx:
elif is_onnx: # in future check of 'use_legacy_frontend' can be added here
send_framework_info('onnx')
from mo.front.onnx.register_custom_ops import get_front_classes
import_extensions.load_dirs(argv.framework, extensions, get_front_classes)
graph = unified_pipeline(argv)
return graph

graph = None
ngraph_function = None

# In future check of use_legacy_frontend option can be added here
if argv.feManager is None or argv.framework not in new_front_ends:
graph = unified_pipeline(argv)
else:
ngraph_function = moc_pipeline(argv)
return graph, ngraph_function


def emit_ir(graph: Graph, argv: argparse.Namespace):
NormalizeTI().find_and_replace_pattern(graph)
for_graph_and_each_sub_graph_recursively(graph, RemoveConstOps().find_and_replace_pattern)
for_graph_and_each_sub_graph_recursively(graph, CreateConstNodesReplacement().find_and_replace_pattern)

if 'feManager' in argv:
del argv.feManager

mean_data = deepcopy(graph.graph['mf']) if 'mf' in graph.graph else None
input_names = deepcopy(graph.graph['input_names']) if 'input_names' in graph.graph else []

Expand Down Expand Up @@ -328,7 +354,11 @@ def driver(argv: argparse.Namespace):

start_time = datetime.datetime.now()

ret_res = emit_ir(prepare_ir(argv), argv)
graph, ngraph_function = prepare_ir(argv)
if graph is not None:
ret_res = emit_ir(graph, argv)
else:
ret_res = moc_emit_ir(ngraph_function, argv)

if ret_res != 0:
return ret_res
Expand All @@ -348,7 +378,7 @@ def driver(argv: argparse.Namespace):
return ret_res


def main(cli_parser: argparse.ArgumentParser, framework: str):
def main(cli_parser: argparse.ArgumentParser, fem: FrontEndManager, framework: str):
telemetry = tm.Telemetry(app_name='Model Optimizer', app_version=get_simplified_mo_version())
telemetry.start_session('mo')
telemetry.send_event('mo', 'version', get_simplified_mo_version())
Expand All @@ -362,6 +392,7 @@ def main(cli_parser: argparse.ArgumentParser, framework: str):

if framework:
argv.framework = framework
argv.feManager = fem

ov_update_message = None
if not hasattr(argv, 'silent') or not argv.silent:
Expand Down Expand Up @@ -404,4 +435,5 @@ def main(cli_parser: argparse.ArgumentParser, framework: str):

if __name__ == "__main__":
from mo.utils.cli_parser import get_all_cli_parser
sys.exit(main(get_all_cli_parser(), None))
fem = FrontEndManager()
sys.exit(main(get_all_cli_parser(fem), fem, None))
2 changes: 1 addition & 1 deletion model-optimizer/mo/main_caffe.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@

if __name__ == "__main__":
from mo.main import main
sys.exit(main(get_caffe_cli_parser(), 'caffe'))
sys.exit(main(get_caffe_cli_parser(), None, 'caffe'))
2 changes: 1 addition & 1 deletion model-optimizer/mo/main_kaldi.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@

if __name__ == "__main__":
from mo.main import main
sys.exit(main(get_kaldi_cli_parser(), 'kaldi'))
sys.exit(main(get_kaldi_cli_parser(), None, 'kaldi'))
2 changes: 1 addition & 1 deletion model-optimizer/mo/main_mxnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@

if __name__ == "__main__":
from mo.main import main
sys.exit(main(get_mxnet_cli_parser(), 'mxnet'))
sys.exit(main(get_mxnet_cli_parser(), None, 'mxnet'))
2 changes: 1 addition & 1 deletion model-optimizer/mo/main_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@

if __name__ == "__main__":
from mo.main import main
sys.exit(main(get_onnx_cli_parser(), 'onnx'))
sys.exit(main(get_onnx_cli_parser(), None, 'onnx'))
14 changes: 14 additions & 0 deletions model-optimizer/mo/main_pdpd.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import sys

from mo.utils.cli_parser import get_all_cli_parser

from ngraph.frontend import FrontEndManager # pylint: disable=no-name-in-module,import-error


if __name__ == "__main__":
from mo.main import main
fem = FrontEndManager()
sys.exit(main(get_all_cli_parser(fem), fem, 'pdpd'))
2 changes: 1 addition & 1 deletion model-optimizer/mo/main_tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@

if __name__ == "__main__":
from mo.main import main
sys.exit(main(get_tf_cli_parser(), 'tf'))
sys.exit(main(get_tf_cli_parser(), None, 'tf'))
31 changes: 21 additions & 10 deletions model-optimizer/mo/middle/passes/infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,19 +206,30 @@ def override_batch(graph: Graph, batch: int):
if batch is not None:
for node_id, data in graph.nodes(data=True):
if 'op' in data and data['op'] == 'Parameter' and not data.get('fixed_batch', False):
if len(data['shape']) == 0 or data['shape'][0] not in (-1, 0, 1):
raise Error(('The input layer {} has a shape {} defined in the model. \n\n' +
'When you use -b (--batch) option, Model Optimizer applies its value to the first ' +
'element of the shape if it is equal to -1, 0 or 1. Otherwise, this is the ambiguous ' +
'situation - Model Optimizer can not know in advance whether the layer has the batch ' +
'dimension or not.\n\n For example, you want to set batch dimension equals 100 ' +
'for the input layer "data" with shape (10,34). Although you can not use --batch, ' +
'you should pass --input_shape (100,34) instead of --batch 100. \n\n' +
refer_to_faq_msg(39))
.format(data['name'], data['shape']))
validate_batch_in_shape(data['shape'], data['name'])
data['shape'][0] = batch


def validate_batch_in_shape(shape, layer_name: str):
"""
Raises Error #39 if shape is not valid for setting batch size
Parameters
----------
shape: current shape of layer under validation
layer_name: name of layer under validation
"""
if len(shape) == 0 or shape[0] not in (-1, 0, 1):
raise Error(('The input layer {} has a shape {} defined in the model. \n\n' +
'When you use -b (--batch) option, Model Optimizer applies its value to the first ' +
'element of the shape if it is equal to -1, 0 or 1. Otherwise, this is the ambiguous ' +
'situation - Model Optimizer can not know in advance whether the layer has the batch ' +
'dimension or not.\n\n For example, you want to set batch dimension equals 100 ' +
'for the input layer "data" with shape (10,34). Although you can not use --batch, ' +
'you should pass --input_shape (100,34) instead of --batch 100. \n\n' +
refer_to_faq_msg(39))
.format(layer_name, shape))


def override_placeholder_shapes(graph: Graph, user_shapes: dict, batch=None):
"""
This function overrides shapes for nodes with 'op' param set to 'Parameter' with shapes defined by users (only
Expand Down
2 changes: 2 additions & 0 deletions model-optimizer/mo/moc_frontend/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
144 changes: 144 additions & 0 deletions model-optimizer/mo/moc_frontend/extractor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import logging as log
import re
from collections import defaultdict
from copy import copy

import numpy as np

from mo.utils.error import Error

from ngraph.frontend import InputModel # pylint: disable=no-name-in-module,import-error


def decode_name_with_port(input_model: InputModel, node_name: str):
"""
Decode name with optional port specification w/o traversing all the nodes in the graph
TODO: in future node_name can specify input/output port groups and indices (58562)
:param input_model: Input Model
:param node_name: user provided node name
:return: decoded place in the graph
"""
# Check exact match with one of the names in the graph first
node = input_model.get_place_by_tensor_name(node_name)
if node:
return node

# TODO: Add support for input/output group name and port index here (58562)
# Legacy frontends use format "number:name:number" to specify input and output port indices
# For new frontends this logic shall be extended to additionally support input and output group names
raise Error('There is no node with name {}'.format(node_name))


def fe_input_user_data_repack(input_model: InputModel, input_user_shapes: [None, list, dict, np.ndarray],
freeze_placeholder: dict, input_user_data_types=dict()):
"""
Restructures user input cutting request. Splits ports out of node names.
Transforms node names to node ids.
:param input_model: current input model
:param input_user_shapes: data structure representing user input cutting request. It may be:
# None value if user did not provide neither --input nor --input_shape keys
# list instance which contains input layer names with or without ports if user provided
only --input key
# dict instance which contains input layer names with or without ports as keys and shapes as
values if user provided both --input and --input_shape
# np.ndarray if user provided only --input_shape key
:param freeze_placeholder: dictionary with placeholder names as keys and freezing value as values
:param input_user_data_types: dictionary with input nodes and its data types
:return: restructured input shapes and freeze placeholder shapes information
Example of input dictionary:
_input_shapes =
{
'node_ID':
[
{'shape': None, 'in': 0},
{'shape': None, 'in': 1},
],
'node_1_ID':
[
{'shape': [1, 227, 227, 3], 'port': None, 'data_type': np.int32}
],
'node_2_ID':
[
{'shape': None, 'out': 3}
]
}
Example of freeze placeholder dictionary:
_freeze_placeholder =
{
'phase_train' : False
}
"""
_input_shapes = []
if isinstance(input_user_shapes, list) or isinstance(input_user_shapes, dict):
for input_name in input_user_shapes:
node = decode_name_with_port(input_model, input_name)
if node is None:
raise Error('Cannot find location {} in the input model'.format(input_name))
shape = None if isinstance(input_user_shapes, list) else input_user_shapes[input_name]
if input_user_data_types.get(input_name) is not None:
data_type = input_user_data_types[input_name]
_input_shapes.append({'node': node, 'shape': shape, 'data_type': data_type})
else:
_input_shapes.append({'node': node, 'shape': shape})
elif isinstance(input_user_shapes, np.ndarray):
model_inputs = input_model.get_inputs()
assert len(model_inputs) == 1
_input_shapes.append({'node': model_inputs[0], 'shape': input_user_shapes})
else:
assert input_user_shapes is None
# TODO: implement freeze_placeholder (issue 58560)
return _input_shapes, dict()


def fe_output_user_data_repack(input_model: InputModel, outputs: list):
"""
:param input_model: Input Model to operate on
:param outputs: list of node names provided by user
:return: dictionary with node IDs as keys and list of port dictionaries as values
Example of outputs dictionary:
_outputs =
{
'node_ID':
[
{'out': 0},
{'out': 1},
],
'node_1_ID':
[
{'port': None}
],
'node_2_ID':
[
{'in': 3}
]
}
"""
_outputs = []
if outputs is not None:
for output in outputs:
node = decode_name_with_port(input_model, output)
if node is None:
raise Error('Cannot find location {} in the graph'.format(output))
_outputs.append({'node': node})
return _outputs


def fe_user_data_repack(input_model: InputModel, input_user_shapes: [None, list, dict, np.array],
input_user_data_types: dict, outputs: list, freeze_placeholder: dict):
"""
:param input_model: Input Model to operate on
:param input_user_shapes: data structure representing user input cutting request
:param input_user_data_types: dictionary with input nodes and its data types
:param outputs: list of node names to treat as outputs
:param freeze_placeholder: dictionary with placeholder names as keys and freezing value as values
:return: restructured input, output and freeze placeholder dictionaries or None values
"""
_input_shapes, _freeze_placeholder = fe_input_user_data_repack(
input_model, input_user_shapes, freeze_placeholder, input_user_data_types=input_user_data_types)
_outputs = fe_output_user_data_repack(input_model, outputs)

return _input_shapes, _outputs, _freeze_placeholder
Loading

0 comments on commit d49405a

Please sign in to comment.