Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into mk/ov_pybind_poc
Browse files Browse the repository at this point in the history
  • Loading branch information
anastasia.kuporosova committed Mar 30, 2021
2 parents a676dc8 + 170223d commit 7382f6c
Show file tree
Hide file tree
Showing 28 changed files with 405 additions and 92 deletions.
58 changes: 1 addition & 57 deletions inference-engine/ie_bridges/python/tests/test_Blob.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def test_get_buffer():
("BIN", np.int8),
("BF16", np.float16),
])
def test_writes_to_buffer(precision, numpy_precision):
def test_write_to_buffer(precision, numpy_precision):
tensor_desc = TensorDesc(precision, [1, 3, 127, 127], "NCHW")
array = np.zeros(shape=(1, 3, 127, 127), dtype=numpy_precision)
blob = Blob(tensor_desc, array)
Expand All @@ -63,62 +63,6 @@ def test_writes_to_buffer(precision, numpy_precision):
assert np.array_equal(blob.buffer, ones_arr)


def write_to_buffer(precision, numpy_precision):
tensor_desc = TensorDesc(precision, [1, 3, 127, 127], "NCHW")
array = np.zeros(shape=(1, 3, 127, 127), dtype=numpy_precision)
blob = Blob(tensor_desc, array)
ones_arr = np.ones(shape=(1, 3, 127, 127), dtype=numpy_precision)
blob.buffer[:] = ones_arr
assert np.array_equal(blob.buffer, ones_arr)

def test_write_to_buffer_fp32():
write_to_buffer("FP32", np.float32)


def test_write_to_buffer_fp64():
write_to_buffer("FP64", np.float64)


def test_write_to_buffer_fp16():
write_to_buffer("FP16", np.float16)


def test_write_to_buffer_int8():
write_to_buffer("I8", np.int8)


def test_write_to_buffer_uint8():
write_to_buffer("U8", np.uint8)


def test_write_to_buffer_int32():
write_to_buffer("I32", np.int32)


def test_write_to_buffer_int16():
write_to_buffer("I16", np.int16)


def test_write_to_buffer_uint16():
write_to_buffer("U16", np.uint16)


def test_write_to_buffer_int64():
write_to_buffer("I64", np.int64)


def test_write_to_buffer_bool():
write_to_buffer("BOOL", np.uint8)


def test_write_to_buffer_bin():
write_to_buffer("BIN", np.int8)


def test_write_to_buffer_bf16():
write_to_buffer("BF16", np.float16)


def test_write_numpy_scalar_int64():
tensor_desc = TensorDesc("I64", [], "SCALAR")
scalar = np.array(0, dtype=np.int64)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -234,10 +234,11 @@ def test_plugin_accessible_after_deletion(device):
del ie_core


@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") == "ARM",
reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}")
def test_exec_graph(device):
ie_core = ie.IECore()
if device == "CPU":
if ie_core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to get_exec_graph_info method isn't implemented")
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie_core.load_network(net, device)
img = read_image()
Expand Down Expand Up @@ -294,9 +295,11 @@ def test_get_metric(device):
assert network_name == "test_model"


@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test")
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device dependent test")
def test_get_config(device):
ie_core = ie.IECore()
if ie_core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to CPU dependent test")
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie_core.load_network(net, device)
config = exec_net.get_config("PERF_COUNT")
Expand Down
23 changes: 18 additions & 5 deletions inference-engine/ie_bridges/python/tests/test_IECore.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import os
import pytest
from sys import platform
import numpy as np
from pathlib import Path

from openvino.inference_engine import IENetwork, IECore, ExecutableNetwork
Expand Down Expand Up @@ -61,8 +60,11 @@ def test_load_network_wrong_device():


def test_query_network(device):
import ngraph as ng
ie = IECore()
if device == "CPU":
if ie.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to ngraph")
import ngraph as ng
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
query_res = ie.query_network(net, device)
func_net = ng.function_from_cnn(net)
Expand All @@ -73,18 +75,22 @@ def test_query_network(device):
assert next(iter(set(query_res.values()))) == device, "Wrong device for some layers"


@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test")
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device dependent test")
def test_register_plugin():
ie = IECore()
if ie.get_metric("CPU", "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to MKLDNNPlugin specific test")
ie.register_plugin("MKLDNNPlugin", "BLA")
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie.load_network(net, "BLA")
assert isinstance(exec_net, ExecutableNetwork), "Cannot load the network to the registered plugin with name 'BLA'"


@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test")
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device dependent test")
def test_register_plugins():
ie = IECore()
if ie.get_metric("CPU", "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to MKLDNNPlugin specific test")
if platform == "linux" or platform == "linux2":
ie.register_plugins(plugins_xml)
elif platform == "darwin":
Expand Down Expand Up @@ -126,11 +132,12 @@ def test_get_metric_list_of_str():
"metric are strings!"



@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test")
def test_get_metric_tuple_of_two_ints():
ie = IECore()
if ie.get_metric("CPU", "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to unsupported device metric")
param = ie.get_metric("CPU", "RANGE_FOR_STREAMS")
assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_STREAMS' " \
f"metric must be tuple but {type(param)} is returned"
Expand All @@ -142,6 +149,8 @@ def test_get_metric_tuple_of_two_ints():
reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test")
def test_get_metric_tuple_of_three_ints():
ie = IECore()
if ie.get_metric("CPU", "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to unsupported device metric")
param = ie.get_metric("CPU", "RANGE_FOR_ASYNC_INFER_REQUESTS")
assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_ASYNC_INFER_REQUESTS' " \
f"metric must be tuple but {type(param)} is returned"
Expand Down Expand Up @@ -185,21 +194,25 @@ def test_read_network_from_onnx():
net = ie.read_network(model=test_net_onnx)
assert isinstance(net, IENetwork)


def test_read_network_from_onnx_as_path():
ie = IECore()
net = ie.read_network(model=Path(test_net_onnx))
assert isinstance(net, IENetwork)


def test_read_network_from_prototxt():
ie = IECore()
net = ie.read_network(model=test_net_prototxt)
assert isinstance(net, IENetwork)


def test_read_network_from_prototxt_as_path():
ie = IECore()
net = ie.read_network(model=Path(test_net_prototxt))
assert isinstance(net, IENetwork)


def test_incorrect_xml():
ie = IECore()
with pytest.raises(Exception) as e:
Expand Down
8 changes: 5 additions & 3 deletions inference-engine/ie_bridges/python/tests/test_IENetwork.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import os
import pytest
import warnings
import numpy as np

from openvino.inference_engine import IECore, IENetwork, DataPtr, InputInfoPtr, PreProcessInfo
from conftest import model_path
Expand Down Expand Up @@ -183,9 +182,12 @@ def test_batch_size_after_reshape():
assert net.input_info['data'].input_data.shape == [8, 3, 32, 32]


def test_serialize():
import ngraph as ng
def test_serialize(device):
ie = IECore()
if device == "CPU":
if ie.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to ngraph")
import ngraph as ng
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
net.serialize("./serialized_net.xml", "./serialized_net.bin")
serialized_net = ie.read_network(model="./serialized_net.xml", weights="./serialized_net.bin")
Expand Down
8 changes: 8 additions & 0 deletions inference-engine/ie_bridges/python/tests/test_InferRequest.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,6 +376,9 @@ def execute(self, input_data):

def test_get_perf_counts(device):
ie_core = ie.IECore()
if device == "CPU":
if ie_core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to ngraph")
net = ie_core.read_network(test_net_xml, test_net_bin)
ie_core.set_config({"PERF_COUNT": "YES"}, device)
exec_net = ie_core.load_network(net, device)
Expand All @@ -395,6 +398,8 @@ def test_get_perf_counts(device):
"Dynamic batch fully supported only on CPU")
def test_set_batch_size(device):
ie_core = ie.IECore()
if ie_core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to dynamic batch isn't supported")
ie_core.set_config({"DYN_BATCH_ENABLED": "YES"}, device)
net = ie_core.read_network(test_net_xml, test_net_bin)
net.batch_size = 10
Expand Down Expand Up @@ -438,6 +443,9 @@ def test_set_negative_batch_size(device):

def test_blob_setter(device):
ie_core = ie.IECore()
if device == "CPU":
if ie_core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin")
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net_1 = ie_core.load_network(network=net, device_name=device, num_requests=1)

Expand Down
6 changes: 6 additions & 0 deletions inference-engine/src/cldnn_engine/cldnn_engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
#include <transformations/common_optimizations/lin_op_sequence_fusion.hpp>
#include <transformations/common_optimizations/weights_dequantize_to_fake_quantize.hpp>
#include "transformations/common_optimizations/convert_quantize_dequantize.hpp"
#include "transformations/common_optimizations/softmax_fusion.hpp"
#include <transformations/op_conversions/convert_depth_to_space.hpp>
#include <transformations/op_conversions/convert_space_to_depth.hpp>
#include <transformations/op_conversions/convert_gelu.hpp>
Expand Down Expand Up @@ -323,6 +324,11 @@ InferenceEngine::CNNNetwork clDNNEngine::CloneAndTransformNetwork(const Inferenc
return false;
});

pass_config->set_callback<ngraph::pass::SoftmaxFusion>(
[](const_node_ptr &node) -> bool {
return node->input_value(0).get_partial_shape().rank().get_length() > 5;
});

// List of enabled/disabled transformations
pass_config->disable<ngraph::pass::ConvertGELU>();
pass_config->disable<ngraph::pass::ConvertMod>();
Expand Down
6 changes: 6 additions & 0 deletions inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
#include <transformations/common_optimizations/weights_dequantize_to_fake_quantize.hpp>
#include "transformations/common_optimizations/convert_quantize_dequantize.hpp"
#include <transformations/common_optimizations/depth_to_space_fusion.hpp>
#include <transformations/common_optimizations/softmax_fusion.hpp>
#include <transformations/op_conversions/convert_depth_to_space.hpp>
#include <transformations/op_conversions/convert_space_to_depth.hpp>
#include <transformations/op_conversions/convert_gelu.hpp>
Expand Down Expand Up @@ -260,6 +261,11 @@ static void Transformation(CNNNetwork& clonedNetwork, const Config& conf) {
return MKLDNNMVNNode::checkAxesSuitability(node);
});

pass_config->set_callback<ngraph::pass::SoftmaxFusion>(
[](const_node_ptr &node) -> bool {
return node->input_value(0).get_partial_shape().rank().get_length() > 5;
});

// List of enabled/disabled transformations
pass_config->disable<ngraph::pass::ConvertGELU>();
pass_config->disable<ngraph::pass::Gelu7Downgrade>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@ ngraph::pass::SoftmaxFusion::SoftmaxFusion() {
auto div_pattern = ngraph::pattern::wrap_type<opset6::Divide>({exp_pattern, reduce_sum_pattern});

ngraph::matcher_pass_callback callback = [=](pattern::Matcher& m) {
if (transformation_callback(m.get_match_root()))
return false;

const auto& pattern_map = m.get_pattern_value_map();

auto reduce_max_axes = std::dynamic_pointer_cast<opset6::Constant>(pattern_map.at(reduce_max_axes_pattern).get_node_shared_ptr());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ class ReduceOpsLayerTest : public testing::WithParamInterface<reduceMeanParams>,
virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<reduceMeanParams> obj);
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override;

protected:
void SetUp() override;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,21 @@ void ReduceOpsLayerTest::SetUp() {
const ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(reduce)};
function = std::make_shared<ngraph::Function>(results, params, "Reduce");
}
InferenceEngine::Blob::Ptr ReduceOpsLayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const {
ngraph::helpers::ReductionType reductionType = std::get<3>(GetParam());
InferenceEngine::Precision netPrecision = std::get<4>(GetParam());
if (reductionType == ngraph::helpers::ReductionType::LogicalOr ||
reductionType == ngraph::helpers::ReductionType::LogicalAnd) {
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, 0);
} else if (!netPrecision.is_float()) {
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 5, 0);
}
auto td = info.getTensorDesc();
auto blob = make_blob_with_precision(td);
blob->allocate();
CommonTestUtils::fill_data_random_float<InferenceEngine::Precision::FP32>(blob, 5, 0, 1000);
return blob;
}

InferenceEngine::Blob::Ptr ReduceOpsLayerWithSpecificInputTest::GenerateInput(const InferenceEngine::InputInfo &info) const {
auto axis_vec = std::get<0>(GetParam());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
# SPDX-License-Identifier: Apache-2.0

import xml.etree.ElementTree as ET
from jinja2 import Environment, FileSystemLoader
import argparse
import os
from datetime import datetime
Expand Down Expand Up @@ -81,7 +80,7 @@ def merge_xml(input_folder_paths: list, output_folder_paths: str):
logger.error(f" {folder_path} is not a directory!")
continue

xml_reports = glob.glob(os.path.join(folder_path, 'report*.xml'))
xml_reports = glob.glob(os.path.join(folder_path, '**/report*.xml'))

xml_root = ET.parse(xml_reports[0]).getroot()
for op in xml_root.find("ops_list"):
Expand All @@ -93,6 +92,8 @@ def merge_xml(input_folder_paths: list, output_folder_paths: str):
summary.set("timestamp", timestamp)
logger.info(f" Processing is finished")

if not os.path.exists(output_folder_paths):
os.mkdir(output_folder_paths)
out_file_path = os.path.join(output_folder_paths, "report.xml")
with open(out_file_path, "w") as xml_file:
xml_file.write(ET.tostring(summary).decode('utf8'))
Expand Down
1 change: 1 addition & 0 deletions model-optimizer/automation/package_BOM.txt
Original file line number Diff line number Diff line change
Expand Up @@ -797,6 +797,7 @@ mo/front/caffe/python_layer_extractor.py
mo/front/caffe/register_custom_ops.py
mo/front/common/__init__.py
mo/front/common/custom_replacement_registry.py
mo/front/common/extractors/__init__.py
mo/front/common/extractors/utils.py
mo/front/common/find_unsupported_ops.py
mo/front/common/layout.py
Expand Down
6 changes: 4 additions & 2 deletions model-optimizer/extensions/front/tf/AutomlEfficientDet.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@

class EfficientDet(FrontReplacementFromConfigFileGeneral):
replacement_id = 'AutomlEfficientDet'
run_not_recursively = True

def run_before(self):
from extensions.front.ExpandDimsToUnsqueeze import ExpandDimsToUnsqueeze
Expand Down Expand Up @@ -57,10 +58,11 @@ def transform_graph(self, graph: Graph, replacement_descriptions: dict):
# which includes padding and resizing from the model
preprocessing_input_node_id = replacement_descriptions['preprocessing_input_node']
assert preprocessing_input_node_id in graph.nodes, 'The node with name "{}" is not found in the graph. This ' \
'node should provide scaled image output and is specified' \
'should be a last node before image normalization and is specified' \
' in the json file.'.format(preprocessing_input_node_id)
preprocessing_input_node = Node(graph, preprocessing_input_node_id)
preprocessing_input_node.in_port(0).get_connection().set_source(parameter_node.out_port(0))
consumer_node = preprocessing_input_node.out_port(0).get_connection().get_destination().node
consumer_node.in_port(0).get_connection().set_source(parameter_node.out_port(0))

preprocessing_output_node_id = replacement_descriptions['preprocessing_output_node']
assert preprocessing_output_node_id in graph.nodes, 'The node with name "{}" is not found in the graph. This ' \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
{
"id": "AutomlEfficientDet",
"custom_attributes": {
"preprocessing_input_node": "convert_image",
"preprocessing_input_node": "strided_slice_1",
"preprocessing_output_node": "truediv",
"aspect_ratios": [1.0, 1.0, 1.4, 0.7, 0.7, 1.4],
"variance": [1.0, 1.0, 1.0, 1.0],
Expand Down
Empty file.
Loading

0 comments on commit 7382f6c

Please sign in to comment.