Skip to content

Commit

Permalink
Squashed merge of pdpd_frontend and static_protobuf
Browse files Browse the repository at this point in the history
Removed unnecessary files (TF frontend, pdpd_poc)

NGRAPH_PDPD_FRONTEND_ENABLE=OFF by default
  • Loading branch information
nosovmik committed May 28, 2021
1 parent 04f42d8 commit 8d3ddf6
Show file tree
Hide file tree
Showing 188 changed files with 12,824 additions and 55 deletions.
2 changes: 2 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ function(build_ngraph)
ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE OFF)
endif()
ngraph_set(NGRAPH_INTERPRETER_ENABLE ON)
ngraph_set(NGRAPH_PDPD_FRONTEND_ENABLE OFF)

if(TREAT_WARNING_AS_ERROR)
ngraph_set(NGRAPH_WARNINGS_AS_ERRORS ON)
Expand Down Expand Up @@ -108,6 +109,7 @@ function(build_ngraph)
set(SDL_cmake_included ON)
add_subdirectory(ngraph)
set(NGRAPH_LIBRARIES ngraph PARENT_SCOPE)
set(FRONTEND_LIBRARIES frontend_manager PARENT_SCOPE)
set(NGRAPH_REF_LIBRARIES ngraph_reference PARENT_SCOPE)
endfunction()

Expand Down
6 changes: 6 additions & 0 deletions model-optimizer/automation/package_BOM.txt
Original file line number Diff line number Diff line change
Expand Up @@ -933,6 +933,11 @@ mo/front/tf/partial_infer/__init__.py
mo/front/tf/partial_infer/tf.py
mo/front/tf/register_custom_ops.py
mo/front/tf/replacement.py
mo/front_ng/__init__.py
mo/front_ng/extractor.py
mo/front_ng/frontendmanager_wrapper.py
mo/front_ng/pipeline.py
mo/front_ng/serialize.py
mo/graph/__init__.py
mo/graph/connection.py
mo/graph/graph.py
Expand All @@ -943,6 +948,7 @@ mo/main_caffe.py
mo/main_kaldi.py
mo/main_mxnet.py
mo/main_onnx.py
mo/main_pdpd.py
mo/main_tf.py
mo/middle/__init__.py
mo/middle/passes/__init__.py
Expand Down
2 changes: 2 additions & 0 deletions model-optimizer/mo/front_ng/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
157 changes: 157 additions & 0 deletions model-optimizer/mo/front_ng/extractor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,157 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import logging as log
import re
from collections import defaultdict
from copy import copy

import numpy as np

from mo.utils.error import Error


def fe_decodeNameWithPort (inputModel, node_name: str):
"""
Decode name with optional port specification w/o traversing all the nodes in the graph
:param inputModel: Input Model
:param node_name:
:return: decoded place in the graph
"""
# Check exact match with one of the names in the graph first
node = inputModel.get_place_by_tensor_name(node_name)
if node:
return node
# TODO: not tested for available frontends
regexpPost = r'(.*)(:(\d+))'
matchPost = re.search(regexpPost, node_name)
nodePost = inputModel.get_place_by_tensor_name(matchPost.group(1)) if matchPost else None
regexpPre = r'((\d+):)(.*)'
matchPre = re.search(regexpPre, node_name)
nodePre = inputModel.get_place_by_tensor_name(matchPre.group(3)) if matchPost else None
if nodePost and nodePre:
raise Error('Name collision for {}'.format(node_name))
if nodePost:
return node.get_output_port(int(matchPost.group(3)))
if nodePre:
return node.get_input_port(int(matchPre.group(1)))
raise Error('There is no node with name {}'.format(node_name))


def fe_input_user_data_repack(inputModel, input_user_shapes: [None, list, dict, np.ndarray],
freeze_placeholder: dict, input_user_data_types = dict()):
"""
Restructures user input cutting request. Splits ports out of node names. Transforms node names to node ids.
:param graph: graph to operate on
:param input_user_shapes: data structure representing user input cutting request. It may be:
# None value if user did not provide neither --input nor --input_shape keys
# list instance which contains input layer names with or without ports if user provided only --input key
# dict instance which contains input layer names with or without ports as keys and shapes as values if user
provided both --input and --input_shape
# np.ndarray if user provided only --input_shape key
:param freeze_placeholder: dictionary with placeholder names as keys and freezing value as values
:param input_user_data_types: dictionary with input nodes and its data types
:return: restructured input shapes and freeze placeholder shapes information
Example of input dictionary:
_input_shapes =
{
'node_ID':
[
{'shape': None, 'in': 0},
{'shape': None, 'in': 1},
],
'node_1_ID':
[
{'shape': [1, 227, 227, 3], 'port': None, 'data_type': np.int32}
],
'node_2_ID':
[
{'shape': None, 'out': 3}
]
}
Example of freeze placeholder dictionary:
_freeze_placeholder =
{
'phase_train' : False
}
"""
_input_shapes = []
# New version of FrontEnd is activated
print("Inside input_user_data_repack")
if isinstance(input_user_shapes, list) or isinstance(input_user_shapes, dict):
for input_name in input_user_shapes:
node = fe_decodeNameWithPort(inputModel, input_name)
if node is None:
raise Error('Cannot find location {} in the input model'.format(input_name))
shape = None if isinstance(input_user_shapes, list) else input_user_shapes[input_name]
if input_name in input_user_data_types and input_user_data_types[input_name] is not None:
data_type = input_user_data_types[input_name]
_input_shapes.append({'node': node, 'shape': shape, 'data_type': data_type})
else:
_input_shapes.append({'node': node, 'shape': shape})
elif isinstance(input_user_shapes, np.ndarray):
model_inputs = inputModel.get_inputs()
assert len(model_inputs) == 1
_input_shapes.append({'node': model_inputs[0], 'shape': input_user_shapes})
else:
assert input_user_shapes is None
# TODO: add logic for freeze_placeholder
return _input_shapes, dict()


def fe_output_user_data_repack(inputModel, outputs: list):
"""
:param inputModel: Input Model to operate on
:param outputs: list of node names provided by user
:return: dictionary with node IDs as keys and list of port dictionaries as values
Example of outputs dictionary:
_outputs =
{
'node_ID':
[
{'out': 0},
{'out': 1},
],
'node_1_ID':
[
{'port': None}
],
'node_2_ID':
[
{'in': 3}
]
}
"""
_outputs = []
# New version of FrontEnd is activated
print("Frontend_ng - output_user_data_repack")
if outputs is not None and len(outputs) > 0:
for output in outputs:
node = fe_decodeNameWithPort(inputModel, output)
if node is None:
raise Error('Cannot find location {} in the graph'.format(output))
_outputs.append({'node': node})
return _outputs


def fe_user_data_repack(inputModel, input_user_shapes: [None, list, dict, np.array],
input_user_data_types: dict, outputs: list, freeze_placeholder: dict):
"""
:param inputModel: Input Model to operate on
:param input_user_shapes: data structure representing user input cutting request
:param outputs: list of node names to treat as outputs
:param freeze_placeholder: dictionary with placeholder names as keys and freezing value as values
:return: restructured input, output and freeze placeholder dictionaries or None values
"""
_input_shapes, _freeze_placeholder = fe_input_user_data_repack(inputModel, input_user_shapes, freeze_placeholder,
input_user_data_types=input_user_data_types)
_outputs = fe_output_user_data_repack(inputModel, outputs)

print('---------- Inputs/outpus/freezePlaceholder -----------')
print(_input_shapes)
print(_outputs)
print(freeze_placeholder)
print('------------------------------------')

return _input_shapes, _outputs, _freeze_placeholder
23 changes: 23 additions & 0 deletions model-optimizer/mo/front_ng/frontendmanager_wrapper.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
#!/usr/bin/env python3

# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import os
import sys


def create_fem():
fem = None
try:
from ngraph.frontend import FrontEndManager # pylint: disable=no-name-in-module,import-error
fem = FrontEndManager()
except Exception:
print("nGraph FrontEndManager is not initialized")
pass
return fem


if __name__ == "__main__":
if not create_fem():
exit(1)
97 changes: 97 additions & 0 deletions model-optimizer/mo/front_ng/pipeline.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import argparse

import logging as log
from mo.front_ng.extractor import fe_user_data_repack
from mo.middle.passes.infer import validate_batch_in_shape


def moc_pipeline(argv: argparse.Namespace):
from ngraph import Dimension, PartialShape # pylint: disable=no-name-in-module,import-error
from ngraph.utils.types import get_element_type # pylint: disable=no-name-in-module,import-error
log.info('New MOC pipeline')
fem = argv.feManager
log.info(f'fem.availableFrontEnds: {str(fem.get_available_front_ends())}')
log.info(f'Initializing new FE for framework {argv.framework}')
fe = fem.load_by_framework(argv.framework)
inputModel = fe.load_from_file(argv.input_model)

user_shapes, outputs, freeze_placeholder = fe_user_data_repack(
inputModel, argv.placeholder_shapes, argv.placeholder_data_types,
argv.output, argv.freeze_placeholder_with_value)

def compare_nodes(old, new):
eq = len(old) == len(new)
if eq:
for item in old:
found = [x for x in new if x['node'].is_equal(item)]
if not found:
eq = False
break
return eq

inputsEqual = True
if user_shapes:
inputsEqual = compare_nodes(inputModel.get_inputs(), user_shapes)

outputsEqual = True
if outputs:
outputsEqual = compare_nodes(inputModel.get_outputs(), outputs)
log.debug(f"Inputs are same: {inputsEqual}, outputs are same: {outputsEqual}")

if not inputsEqual and not outputsEqual:
# Use ExtractSubgraph
newInputPlaces = [x['node'] for x in user_shapes]
newOutputPlaces = [x['node'] for x in outputs]
log.debug("Using extract subgraph")
log.debug(f"Inputs: {newInputPlaces}")
log.debug(f"Outputs: {newOutputPlaces}")
inputModel.extract_subgraph(newInputPlaces, newOutputPlaces)
elif not inputsEqual:
newInputPlaces = [x['node'] for x in user_shapes]
log.debug("Using override_all_inputs")
log.debug(f"Inputs: {newInputPlaces}")
inputModel.override_all_inputs(newInputPlaces)
elif not outputsEqual:
newOutputPlaces = [x['node'] for x in outputs]
log.debug("Using override_all_outputs")
log.debug(f"Outputs: {newOutputPlaces}")
inputModel.override_all_outputs(newOutputPlaces)

if user_shapes:
for user_shape in user_shapes:
if 'shape' in user_shape and user_shape['shape'] is not None:
inputModel.set_partial_shape(user_shape['node'], PartialShape(user_shape['shape']))
if 'data_type' in user_shape and user_shape['data_type'] is not None:
data_type = get_element_type(user_shape['data_type'])
log.debug(f"Set data type: {data_type}")
inputModel.set_element_type(user_shape['node'], data_type)

# Set batch size
if argv.batch is not None and argv.batch > 0:
log.debug(f"Setting batch size to {argv.batch}")
for place in inputModel.get_inputs():
oldPartShape = inputModel.get_partial_shape(place)
newshape = []
oldshape_converted = []
joinedName = ' '.join(place.get_names())
if oldPartShape.rank.is_static:
for i in range(oldPartShape.rank.get_length()):
# Assume batch size is always 1-st dimension in shape
# Keep other dimensions unchanged
newshape.append(Dimension(argv.batch) if i is 0 else oldPartShape.get_dimension(i))
oldshape_converted.append(oldPartShape.get_dimension(i))

validate_batch_in_shape(oldshape_converted, joinedName)
else:
# In case of fully dynamic shape raise the same error as for invalid batch dimension
validate_batch_in_shape(oldshape_converted, joinedName)

newPartShape = PartialShape(newshape)
log.debug(f"Input: {joinedName}, Old shape: {oldshape_converted}, New shape: {newshape}")
inputModel.set_partial_shape(place, newPartShape)

nGraphFunction = fe.convert(inputModel)
return nGraphFunction
17 changes: 17 additions & 0 deletions model-optimizer/mo/front_ng/serialize.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import argparse
import os


def ngraph_emit_ir(nGraphFunction, argv: argparse.Namespace):
output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd()

from ngraph import function_to_cnn # pylint: disable=no-name-in-module,import-error
network = function_to_cnn(nGraphFunction)

orig_model_name = os.path.normpath(os.path.join(output_dir, argv.model_name))
network.serialize(orig_model_name + ".xml", orig_model_name + ".bin")
print('[ SUCCESS ] Converted with nGraph Serializer')
return 0
Loading

0 comments on commit 8d3ddf6

Please sign in to comment.