Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into CVS-53713
Browse files Browse the repository at this point in the history
  • Loading branch information
ilya-lavrenov committed May 25, 2021
2 parents 0d28855 + 7d429e2 commit a9bf863
Show file tree
Hide file tree
Showing 14 changed files with 55 additions and 33 deletions.
6 changes: 6 additions & 0 deletions inference-engine/src/inference_engine/file_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,12 @@ long long FileUtils::fileSize(const char* charfilepath) {
#if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
std::wstring widefilename = FileUtils::multiByteCharToWString(charfilepath);
const wchar_t* fileName = widefilename.c_str();
#elif defined(__ANDROID__) || defined(ANDROID)
std::string fileName = charfilepath;
std::string::size_type pos = fileName.find('!');
if (pos != std::string::npos) {
fileName = fileName.substr(0, pos);
}
#else
const char* fileName = charfilepath;
#endif
Expand Down
7 changes: 4 additions & 3 deletions inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -524,9 +524,10 @@ void MKLDNNGraphOptimizer::FuseConvolutionAndZeroPoints(MKLDNNGraph &graph) {
IE_THROW() << "weightsBlob has not allocated buffer";

ptrdiff_t G = convNode->getGroupNum();
ptrdiff_t OC = weightsConstant->outDims[0][0] / G;
ptrdiff_t IC = weightsConstant->outDims[0][1];
ptrdiff_t KD = weightsConstant->outDims[0].ndims() == 5 ? weightsConstant->outDims[0][2] : 1;
const int groupOffset = convNode->getAlgorithm() == ConvolutionGrouped ? 1 : 0;
ptrdiff_t OC = weightsConstant->outDims[0][0 + groupOffset];
ptrdiff_t IC = weightsConstant->outDims[0][1 + groupOffset];
ptrdiff_t KD = weightsConstant->outDims[0].ndims() == (5 + groupOffset) ? weightsConstant->outDims[0][weightsConstant->outDims[0].ndims() - 3] : 1;
ptrdiff_t KH = weightsConstant->outDims[0][weightsConstant->outDims[0].ndims() - 2];
ptrdiff_t KW = weightsConstant->outDims[0][weightsConstant->outDims[0].ndims() - 1];

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//

#include "execution_graph_tests/keep_assing.hpp"
#include "execution_graph_tests/keep_assign.hpp"
#include "common_test_utils/test_constants.hpp"

using namespace ExecutionGraphTests;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//

#include "execution_graph_tests/keep_assing.hpp"
#include "execution_graph_tests/keep_assign.hpp"
#include "functional_test_utils/skip_tests_config.hpp"

#include <ngraph/ngraph.hpp>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ JitConstants MVNKernelRef::GetJitConstants(const mvn_params& params, DispatchDat

std::string MVNKernelRef::GetKernelName(const mvn_params& params) const {
if (params.mvnMode == MVNMode::ACROSS_CHANNELS)
return kernelName + "_accross_channels";
return kernelName + "_across_channels";
else
return kernelName + "_within_channels";
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
#include "include/data_types.cl"


KERNEL (mvn_gpu_ref_accross_channels)(
KERNEL (mvn_gpu_ref_across_channels)(
const __global INPUT0_TYPE* input,
__global OUTPUT_TYPE* restrict output
#if HAS_FUSED_OPS_DECLS
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3429,7 +3429,7 @@ struct mvn_test_params {
tensor elwise_size;
data_types input_type;
format input_format;
bool accross_channels;
bool across_channels;
bool normalize_variance;
data_types default_type;
format default_format;
Expand Down
38 changes: 19 additions & 19 deletions inference-engine/thirdparty/clDNN/tests/test_cases/mvn_gpu_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ using namespace cldnn;
class mvn_gpu_test : public ::testing::TestWithParam<cldnn::format> {};

template <typename T>
void mvn_compute_mean_accross_channels(cldnn::memory& output, bool normalize_variance) {
void mvn_compute_mean_across_channels(cldnn::memory& output, bool normalize_variance) {
auto output_size = output.get_layout().size;

uint32_t batch_size = output_size.batch[0];
Expand Down Expand Up @@ -108,7 +108,7 @@ void mvn_compute_mean_within_channels(cldnn::memory& output, bool normalize_vari
}

TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx) {
// mvn accross channels fp32 test with normalize_variance set to false
// mvn across channels fp32 test with normalize_variance set to false
using namespace cldnn;
using namespace tests;

Expand All @@ -131,11 +131,11 @@ TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx) {
EXPECT_EQ(outputs.begin()->first, "mvn");

auto output = outputs.begin()->second.get_memory();
mvn_compute_mean_accross_channels<float>(output, false);
mvn_compute_mean_across_channels<float>(output, false);
}

TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx) {
// mvn accross channels fp32 test with normalize_variance set to false
// mvn across channels fp32 test with normalize_variance set to false
using namespace cldnn;
using namespace tests;

Expand All @@ -158,11 +158,11 @@ TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx) {
EXPECT_EQ(outputs.begin()->first, "mvn");

auto output = outputs.begin()->second.get_memory();
mvn_compute_mean_accross_channels<float>(output, false);
mvn_compute_mean_across_channels<float>(output, false);
}

TEST(mvn_gpu_test, mvn_test_across_channels_bfyx_outside_sqrt_fp16) {
// mvn accross channels fp16 test with normalize_variance set to false
// mvn across channels fp16 test with normalize_variance set to false
using namespace cldnn;
using namespace tests;

Expand All @@ -185,11 +185,11 @@ TEST(mvn_gpu_test, mvn_test_across_channels_bfyx_outside_sqrt_fp16) {
EXPECT_EQ(outputs.begin()->first, "mvn");

auto output = outputs.begin()->second.get_memory();
mvn_compute_mean_accross_channels<FLOAT16>(output, false);
mvn_compute_mean_across_channels<FLOAT16>(output, false);
}

TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_fp16) {
// mvn accross channels fp16 test with normalize_variance set to false
// mvn across channels fp16 test with normalize_variance set to false
using namespace cldnn;
using namespace tests;

Expand All @@ -212,11 +212,11 @@ TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_fp16) {
EXPECT_EQ(outputs.begin()->first, "mvn");

auto output = outputs.begin()->second.get_memory();
mvn_compute_mean_accross_channels<FLOAT16>(output, false);
mvn_compute_mean_across_channels<FLOAT16>(output, false);
}

TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx_normalize_variance) {
// mvn accross channels fp32 test with normalize_variance set to true
// mvn across channels fp32 test with normalize_variance set to true
using namespace cldnn;
using namespace tests;

Expand All @@ -239,11 +239,11 @@ TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx_normalize_variance
EXPECT_EQ(outputs.begin()->first, "mvn");

auto output = outputs.begin()->second.get_memory();
mvn_compute_mean_accross_channels<float>(output, true);
mvn_compute_mean_across_channels<float>(output, true);
}

TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_normalize_variance) {
// mvn accross channels fp32 test with normalize_variance set to true
// mvn across channels fp32 test with normalize_variance set to true
using namespace cldnn;
using namespace tests;

Expand All @@ -266,11 +266,11 @@ TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_normalize_variance)
EXPECT_EQ(outputs.begin()->first, "mvn");

auto output = outputs.begin()->second.get_memory();
mvn_compute_mean_accross_channels<float>(output, true);
mvn_compute_mean_across_channels<float>(output, true);
}

TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx_normalize_variance_fp16) {
// mvn accross channels fp16 test with normalize_variance set to true
// mvn across channels fp16 test with normalize_variance set to true
using namespace cldnn;
using namespace tests;

Expand All @@ -293,11 +293,11 @@ TEST(mvn_gpu_test, mvn_test_across_channels_outside_sqrt_bfyx_normalize_variance
EXPECT_EQ(outputs.begin()->first, "mvn");

auto output = outputs.begin()->second.get_memory();
mvn_compute_mean_accross_channels<FLOAT16>(output, true);
mvn_compute_mean_across_channels<FLOAT16>(output, true);
}

TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_normalize_variance_fp16) {
// mvn accross channels fp16 test with normalize_variance set to true
// mvn across channels fp16 test with normalize_variance set to true
using namespace cldnn;
using namespace tests;

Expand All @@ -320,7 +320,7 @@ TEST(mvn_gpu_test, mvn_test_across_channels_inside_sqrt_bfyx_normalize_variance_
EXPECT_EQ(outputs.begin()->first, "mvn");

auto output = outputs.begin()->second.get_memory();
mvn_compute_mean_accross_channels<FLOAT16>(output, true);
mvn_compute_mean_across_channels<FLOAT16>(output, true);
}

TEST(mvn_gpu_test, mvn_test_within_channels_outside_sqrt_bfyx) {
Expand Down Expand Up @@ -586,13 +586,13 @@ struct mvn_random_test : ::testing::TestWithParam<mvn_basic_test_params> {
void check_result(memory& output, bool across_channels, bool normalize_variance) {
if (output.get_layout().data_type == data_types::f32) {
if (across_channels) {
mvn_compute_mean_accross_channels<float>(output, normalize_variance);
mvn_compute_mean_across_channels<float>(output, normalize_variance);
} else {
mvn_compute_mean_within_channels<float>(output, normalize_variance);
}
} else if (output.get_layout().data_type == data_types::f16) {
if (across_channels) {
mvn_compute_mean_accross_channels<FLOAT16>(output, normalize_variance);
mvn_compute_mean_across_channels<FLOAT16>(output, normalize_variance);
} else {
mvn_compute_mean_within_channels<FLOAT16>(output, normalize_variance);
}
Expand Down
8 changes: 7 additions & 1 deletion model-optimizer/extensions/front/AttributedPadToPad.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

from extensions.ops.ConvertLike import ConvertLike
from mo.front.common.replacement import FrontReplacementPattern
from mo.front.tf.graph_utils import create_op_with_const_inputs
from mo.graph.graph import Graph, rename_nodes
from mo.ops.const import Const
from mo.ops.pad import Pad
Expand All @@ -26,7 +28,11 @@ def find_and_replace_pattern(self, graph: Graph):
new_pad.in_port(1).connect(Const(graph, {'value': attr_pad.pads[:, 0]}).create_node().out_port(0))
new_pad.in_port(2).connect(Const(graph, {'value': attr_pad.pads[:, 1]}).create_node().out_port(0))
if attr_pad.soft_get('mode') == 'constant':
new_pad.in_port(3).connect(Const(graph, {'value': attr_pad.fill_value}).create_node().out_port(0))
# create Constant node of proper data type (equal to the data type of the Pad first input)
convert_pad_value = create_op_with_const_inputs(graph, ConvertLike, {0: attr_pad.fill_value},
{'name': original_name + '/pad_value_convert'})
convert_pad_value.in_port(1).connect(new_pad.in_port(0).get_source())
new_pad.in_port(3).connect(convert_pad_value.out_port(0))

attr_pad.out_port(0).get_connection().set_source(new_pad.out_port(0))
graph.remove_node(attr_pad.id)
9 changes: 6 additions & 3 deletions model-optimizer/extensions/front/tf/pad_tf_to_pad.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

from extensions.ops.ConvertLike import ConvertLike
from extensions.ops.split import Split
from extensions.ops.transpose import Transpose
from mo.front.common.partial_infer.utils import int64_array
from mo.front.common.replacement import FrontReplacementPattern
from mo.front.tf.graph_utils import create_op_with_const_inputs
from mo.graph.graph import Graph, rename_node
from mo.ops.const import Const
from mo.ops.pad import Pad
from mo.ops.squeeze import Squeeze

Expand Down Expand Up @@ -35,8 +35,11 @@ def find_and_replace_pattern(self, graph: Graph):
if not tfpad.in_port(2).disconnected():
tfpad.in_port(2).get_connection().set_destination(new_pad.in_port(3))
else:
new_pad.in_port(3).connect(Const(graph, {'value': 0.0, 'name': new_pad.name + '/value'}
).create_node().out_port(0))
# create Constant node of proper data type (equal to the data type of the Pad first input)
convert_pad_value = create_op_with_const_inputs(graph, ConvertLike, {0: 0.0},
{'name': original_name + '/pad_value_convert'})
convert_pad_value.in_port(1).connect(new_pad.in_port(0).get_source())
new_pad.in_port(3).connect(convert_pad_value.out_port(0))

# convert TF representation of the pads as [N, 2] to MO representation: [N] and [N]
transposed_pads = create_op_with_const_inputs(graph, Transpose, {1: int64_array([1, 0])})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

# new Pad layer and inputs
'pad': {'type': 'Pad', 'kind': 'op', 'op': 'Pad', 'mode': 'constant'},
'convert_like': {'type': 'ConvertLike', 'kind': 'op', 'op': 'ConvertLike'},
**const('pad_begin', int64_array([1, 3, 5])),
**const('pad_end', int64_array([2, 4, 6])),
**const('pad_fill', np.array(0.75)),
Expand All @@ -36,7 +37,9 @@ def test_mode_constant(self):
[('placeholder', 'pad', {'in': 0, 'out': 0}),
('pad_begin', 'pad', {'in': 1, 'out': 0}),
('pad_end', 'pad', {'in': 2, 'out': 0}),
('pad_fill', 'pad', {'in': 3, 'out': 0}),
('pad_fill', 'convert_like', {'in': 0, 'out': 0}),
('placeholder', 'convert_like', {'in': 1, 'out': 0}),
('convert_like', 'pad', {'in': 3, 'out': 0}),
('pad', 'result')
],
{}, nodes_with_edges_only=True)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
**const('squeeze_1_axis', int64_array([0])),
'squeeze_2': {'type': 'Squeeze', 'kind': 'op', 'op': 'Squeeze'},
**const('squeeze_2_axis', int64_array([0])),
'convert_like': {'type': 'ConvertLike', 'kind': 'op', 'op': 'ConvertLike'},

**const('pad_fill', np.array(0.0)),
}
Expand Down Expand Up @@ -73,7 +74,9 @@ def test_2_inputs(self):
{}, nodes_with_edges_only=True)
graph.get_op_nodes(op='TFPad')[0].add_input_port(2)

graph_ref = build_graph(nodes_attributes, common_edges + [('pad_fill', 'pad', {'in': 3, 'out': 0})],
graph_ref = build_graph(nodes_attributes, common_edges + [('pad_fill', 'convert_like', {'in': 0, 'out': 0}),
('placeholder', 'convert_like', {'in': 1, 'out': 0}),
('convert_like', 'pad', {'in': 3, 'out': 0})],
{}, nodes_with_edges_only=True)
self._run_test(graph, graph_ref)

Expand Down

0 comments on commit a9bf863

Please sign in to comment.