From 9d2acbc8a5f3267de474822ef8dbedc9cdd3b3cf Mon Sep 17 00:00:00 2001 From: Jozef Daniecki Date: Wed, 21 Jul 2021 07:13:40 +0200 Subject: [PATCH] Select operation revision (#6483) * Add visitor test. * Add Serialization SLT. * Add Select-1 to summarize.py. * Remove select from evaluates map. * Remove few Select cases from manifest. * Fix style. * Refactor CoordinateTransform usage for NUMPY. * Refactor CoordinateTransform usage for PDPD. * Migrate backend tests to template_plugin. * Revert "Fix style." This reverts commit 8298c90104a59344d51c83f00d96eb45bfd0ef94. * Add more template plugin tests. * Fixes for PDPD broadcasting. * Align Select type prop tests with PDPP broadcasting to new implementation. * Remove ngraph:: from types in tests. --- .../tests/functional/op_reference/select.cpp | 139 ++++++++++++++++++ .../serialization/single_layer/select.cpp | 41 ++++++ .../layer_tests_summary/utils/constants.py | 1 + .../runtime/reference/autobroadcast_binop.hpp | 87 +++++++---- ngraph/core/src/op/select.cpp | 55 ++++--- ngraph/test/CMakeLists.txt | 2 +- ngraph/test/backend/select.in.cpp | 95 ------------ ngraph/test/runtime/ie/unit_test.manifest | 10 -- .../runtime/interpreter/evaluates_map.cpp | 19 --- ngraph/test/type_prop/select.cpp | 4 +- ngraph/test/visitors/op/select.cpp | 33 +++++ 11 files changed, 311 insertions(+), 175 deletions(-) create mode 100644 docs/template_plugin/tests/functional/op_reference/select.cpp create mode 100644 inference-engine/tests/functional/inference_engine/serialization/single_layer/select.cpp delete mode 100644 ngraph/test/backend/select.in.cpp create mode 100644 ngraph/test/visitors/op/select.cpp diff --git a/docs/template_plugin/tests/functional/op_reference/select.cpp b/docs/template_plugin/tests/functional/op_reference/select.cpp new file mode 100644 index 00000000000000..c4dec3f8172e83 --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/select.cpp @@ -0,0 +1,139 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include +#include + +#include "base_reference_test.hpp" + +using namespace ngraph; +using namespace InferenceEngine; + +struct SelectParams { + template + SelectParams(const element::Type& data_type, const op::AutoBroadcastSpec& broadcast, const PartialShape& select_input_pshape, + const std::vector& select_input, const PartialShape& if_input_pshape, const std::vector& if_input, + const PartialShape& else_input_pshape, const std::vector& else_input, const std::vector& expected_output) + : data_type(data_type), + broadcast(broadcast), + select_input_pshape(select_input_pshape), + select_input(CreateBlob(element::boolean, select_input)), + if_input_pshape(if_input_pshape), + if_input(CreateBlob(data_type, if_input)), + else_input_pshape(else_input_pshape), + else_input(CreateBlob(data_type, else_input)), + expected_output(CreateBlob(data_type, expected_output)) {} + + element::Type data_type; + op::AutoBroadcastSpec broadcast; + PartialShape select_input_pshape; + InferenceEngine::Blob::Ptr select_input; + PartialShape if_input_pshape; + InferenceEngine::Blob::Ptr if_input; + PartialShape else_input_pshape; + InferenceEngine::Blob::Ptr else_input; + InferenceEngine::Blob::Ptr expected_output; +}; + +class ReferenceSelectLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params.data_type, params.broadcast, params.select_input_pshape, params.if_input_pshape, params.else_input_pshape); + inputData = {params.select_input, params.if_input, params.else_input}; + refOutData = {params.expected_output}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "data_type=" << param.data_type << "_"; + result << "broadcast=" << param.broadcast.m_type << "_"; + result << "select_shape=" << param.select_input_pshape << "_"; + result << "if_shape=" << param.if_input_pshape << "_"; + result << "else_shape=" << param.else_input_pshape; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const element::Type& data_type, const op::AutoBroadcastSpec& broadcast, + const PartialShape& select_pshape, const PartialShape& if_pshape, const PartialShape& else_pshape) { + auto A = std::make_shared(element::boolean, select_pshape); + auto B = std::make_shared(data_type, if_pshape); + auto C = std::make_shared(data_type, else_pshape); + return std::make_shared(std::make_shared(A, B, C, broadcast), ParameterVector {A, B, C}); + } +}; + +TEST_P(ReferenceSelectLayerTest, CompareWithHardcodedRefs) { + Exec(); +} + +INSTANTIATE_TEST_SUITE_P(smoke_Select_With_Hardcoded_Refs, ReferenceSelectLayerTest, + ::testing::Values( + // fp32, no brodcasting + SelectParams(element::f32, // if/else/output data type + op::AutoBroadcastType::NONE, // broadcasting type + PartialShape {2, 2, 2}, // select shape + std::vector {0, 1, 1, 0, 0, 1, 0, 1}, // select data + PartialShape {2, 2, 2}, // if shape + std::vector {1, 2, 3, 4, 5, 6, 7, 8}, // if data + PartialShape {2, 2, 2}, // else shape + std::vector {11, 12, 13, 14, 15, 16, 17, 18}, // else data + std::vector {11, 2, 3, 14, 15, 6, 17, 8}), // expected output data + // i32, no brodcasting + SelectParams(element::i32, // if/else/output data type + op::AutoBroadcastType::NONE, // broadcasting type + PartialShape {2, 2, 2}, // select shape + std::vector {0, 1, 1, 0, 0, 1, 0, 1}, // select data + PartialShape {2, 2, 2}, // if shape + std::vector {1, 2, 3, 4, 5, 6, 7, 8}, // if data + PartialShape {2, 2, 2}, // else shape + std::vector {11, 12, 13, 14, 15, 16, 17, 18}, // else data + std::vector {11, 2, 3, 14, 15, 6, 17, 8}), // expected output data + // fp32, numpy brodcasting + SelectParams(element::f32, // if/else/output data type + op::AutoBroadcastType::NUMPY, // broadcasting type + PartialShape {4}, // select shape + std::vector {0, 1, 1, 0}, // select data + PartialShape {4}, // if shape + std::vector {1, 2, 3, 4}, // if data + PartialShape {2, 4}, // else shape + std::vector {11, 12, 13, 14, 15, 16, 17, 18}, // else data + std::vector {11, 2, 3, 14, 15, 2, 3, 18}), // expected output data + // i32, numpy brodcasting + SelectParams(element::i32, // if/else/output data type + op::AutoBroadcastType::NUMPY, // broadcasting type + PartialShape {4}, // select shape + std::vector {0, 1, 1, 0}, // select data + PartialShape {4}, // if shape + std::vector {1, 2, 3, 4}, // if data + PartialShape {2, 4}, // else shape + std::vector {11, 12, 13, 14, 15, 16, 17, 18}, // else data + std::vector {11, 2, 3, 14, 15, 2, 3, 18}), // expected output data + // fp32, pdpd brodcasting + SelectParams(element::f32, // if/else/output data type + {op::AutoBroadcastType::PDPD, -1}, // broadcasting type + PartialShape {2, 4}, // select shape + std::vector {0, 0, 0, 0, 0, 1, 1, 1}, // select data + PartialShape {2, 4}, // if shape + std::vector {1, 2, 3, 4, 5, 6, 7, 8}, // if data + PartialShape {4}, // else shape + std::vector {11, 12, 13, 14}, // else data + std::vector {11, 12, 13, 14, 11, 6, 7, 8}), // expected output data + // i32, pdpd brodcasting + SelectParams(element::i32, // if/else/output data type + {op::AutoBroadcastType::PDPD, -1}, // broadcasting type + PartialShape {2, 4}, // select shape + std::vector {0, 0, 0, 0, 0, 1, 1, 1}, // select data + PartialShape {2, 4}, // if shape + std::vector {1, 2, 3, 4, 5, 6, 7, 8}, // if data + PartialShape {4}, // else shape + std::vector {11, 12, 13, 14}, // else data + std::vector {11, 12, 13, 14, 11, 6, 7, 8})), // expected output data + ReferenceSelectLayerTest::getTestCaseName); diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/select.cpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/select.cpp new file mode 100644 index 00000000000000..ad4712a0b0b3ad --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/select.cpp @@ -0,0 +1,41 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_layer/select.hpp" + +#include +using namespace LayerTestsDefinitions; + +const std::vector inputPrecision = { + InferenceEngine::Precision::I8, InferenceEngine::Precision::I16, + InferenceEngine::Precision::I32, InferenceEngine::Precision::FP16, + InferenceEngine::Precision::FP32}; + +const std::vector>> noneShapes = { + {{2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}, {2, 3, 4, 5, 6}}}; + +const auto noneCases = ::testing::Combine( + ::testing::ValuesIn(noneShapes), ::testing::ValuesIn(inputPrecision), + ::testing::Values(ngraph::op::AutoBroadcastSpec::NONE), + ::testing::Values(CommonTestUtils::DEVICE_CPU)); + +const std::vector>> numpyShapes = { + {{5, 1, 2, 1}, {8, 1, 9, 1, 1}, {5, 1, 2, 1}}}; + +const auto numpyCases = ::testing::Combine( + ::testing::ValuesIn(numpyShapes), ::testing::ValuesIn(inputPrecision), + ::testing::Values(ngraph::op::AutoBroadcastSpec::NUMPY), + ::testing::Values(CommonTestUtils::DEVICE_CPU)); + +TEST_P(SelectLayerTest, Serialize) { + Serialize(); +} + +INSTANTIATE_TEST_SUITE_P(smoke_Serialization_SelectLayerTest_none, + SelectLayerTest, noneCases, + SelectLayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Serialization_SelectLayerTest_numpy, + SelectLayerTest, numpyCases, + SelectLayerTest::getTestCaseName); diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py index d22707a1eb4f7f..fe6cc530fb4e5b 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py @@ -93,6 +93,7 @@ 'Round-5', 'SpaceToDepth-1', 'ScatterNDUpdate-4', + 'Select-1', 'ShapeOf-1', 'ShapeOf-3', 'ShuffleChannels-1', diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/autobroadcast_binop.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/autobroadcast_binop.hpp index 46604bf3865a71..f34fbbc5254548 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/autobroadcast_binop.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/autobroadcast_binop.hpp @@ -5,6 +5,7 @@ #pragma once #include +#include #include #include "ngraph/coordinate_transform.hpp" @@ -439,23 +440,38 @@ namespace ngraph arg1_padded_shape[i]})); } - NGRAPH_SUPPRESS_DEPRECATED_START - CoordinateTransform arg0_transform(arg0_squeezed_shape); - CoordinateTransform arg1_transform(arg1_squeezed_shape); - CoordinateTransform arg2_transform(arg2_squeezed_shape); - CoordinateTransform output_transform(output_shape); + CoordinateTransformBasic arg0_transform(arg0_squeezed_shape); + CoordinateTransformBasic arg1_transform(arg1_squeezed_shape); + CoordinateTransformBasic arg2_transform(arg2_squeezed_shape); + CoordinateTransformBasic output_transform(output_shape); + + const auto arg0_strides = row_major_strides(arg0_squeezed_shape); + const auto arg1_strides = row_major_strides(arg1_squeezed_shape); + const auto arg2_strides = row_major_strides(arg2_squeezed_shape); + const auto output_strides = row_major_strides(output_shape); for (const Coordinate& output_coord : output_transform) { - Coordinate arg0_coord = reduce(output_coord, arg0_squeezed_axes, false); - Coordinate arg1_coord = reduce(output_coord, arg1_squeezed_axes, false); - Coordinate arg2_coord = reduce(output_coord, arg2_squeezed_axes, false); - out[output_transform.index(output_coord)] = - elementwise_functor(arg0[arg0_transform.index(arg0_coord)], - arg1[arg1_transform.index(arg1_coord)], - arg2[arg2_transform.index(arg2_coord)]); + const Coordinate arg0_coord = + reduce(output_coord, arg0_squeezed_axes, false); + const Coordinate arg1_coord = + reduce(output_coord, arg1_squeezed_axes, false); + const Coordinate arg2_coord = + reduce(output_coord, arg2_squeezed_axes, false); + + const size_t arg0_idx = std::inner_product( + arg0_coord.begin(), arg0_coord.end(), arg0_strides.begin(), 0); + const size_t arg1_idx = std::inner_product( + arg1_coord.begin(), arg1_coord.end(), arg1_strides.begin(), 0); + const size_t arg2_idx = std::inner_product( + arg2_coord.begin(), arg2_coord.end(), arg2_strides.begin(), 0); + const size_t output_idx = std::inner_product(output_coord.begin(), + output_coord.end(), + output_strides.begin(), + 0); + out[output_idx] = + elementwise_functor(arg0[arg0_idx], arg1[arg1_idx], arg2[arg2_idx]); } - NGRAPH_SUPPRESS_DEPRECATED_END } break; case op::AutoBroadcastType::PDPD: @@ -475,7 +491,9 @@ namespace ngraph arg0_padded_shape.pop_back(); } - for (int64_t i = 0; i < axis; ++i) + for (int64_t i = 0; + (i < axis) && (arg0_padded_shape.size() < arg1_shape.size()); + ++i) { arg0_padded_shape.insert(arg0_padded_shape.begin(), 1); } @@ -489,8 +507,9 @@ namespace ngraph { arg2_padded_shape.pop_back(); } - - for (int64_t i = 0; i < axis; ++i) + for (int64_t i = 0; + (i < axis) && (arg2_padded_shape.size() < arg1_shape.size()); + ++i) { arg2_padded_shape.insert(arg2_padded_shape.begin(), 1); } @@ -525,22 +544,34 @@ namespace ngraph } } - NGRAPH_SUPPRESS_DEPRECATED_START - CoordinateTransform arg0_transform(arg0_squeezed_shape); - CoordinateTransform arg1_transform(arg1_shape); - CoordinateTransform arg2_transform(arg2_squeezed_shape); - CoordinateTransform output_transform(arg1_shape); + CoordinateTransformBasic arg0_transform(arg0_squeezed_shape); + CoordinateTransformBasic arg1_transform(arg1_shape); + CoordinateTransformBasic arg2_transform(arg2_squeezed_shape); + CoordinateTransformBasic output_transform(arg1_shape); + + const auto arg0_strides = row_major_strides(arg0_squeezed_shape); + const auto arg2_strides = row_major_strides(arg2_squeezed_shape); + const auto output_strides = row_major_strides(arg1_shape); for (const Coordinate& output_coord : output_transform) { - Coordinate arg0_coord = reduce(output_coord, arg0_squeezed_axes, false); - Coordinate arg2_coord = reduce(output_coord, arg2_squeezed_axes, false); - out[output_transform.index(output_coord)] = - elementwise_functor(arg0[arg0_transform.index(arg0_coord)], - arg1[arg1_transform.index(output_coord)], - arg2[arg2_transform.index(arg2_coord)]); + const Coordinate arg0_coord = + reduce(output_coord, arg0_squeezed_axes, false); + const Coordinate arg2_coord = + reduce(output_coord, arg2_squeezed_axes, false); + + const size_t arg0_idx = std::inner_product( + arg0_coord.begin(), arg0_coord.end(), arg0_strides.begin(), 0); + const size_t arg1_idx = std::inner_product( + output_coord.begin(), output_coord.end(), output_strides.begin(), 0); + const size_t arg2_idx = std::inner_product( + arg2_coord.begin(), arg2_coord.end(), arg2_strides.begin(), 0); + const size_t output_idx = std::inner_product( + output_coord.begin(), output_coord.end(), output_strides.begin(), 0); + + out[output_idx] = + elementwise_functor(arg0[arg0_idx], arg1[arg1_idx], arg2[arg2_idx]); } - NGRAPH_SUPPRESS_DEPRECATED_END } } } diff --git a/ngraph/core/src/op/select.cpp b/ngraph/core/src/op/select.cpp index 657845b4cc95b5..2295b99be7cd9a 100644 --- a/ngraph/core/src/op/select.cpp +++ b/ngraph/core/src/op/select.cpp @@ -45,28 +45,43 @@ void op::v1::Select::validate_and_infer_types() element::Type::merge(result_et, get_input_element_type(1), get_input_element_type(2)), "Argument 1 and 2 element types must match."); - PartialShape result_shape = get_input_partial_shape(2); - for (int i = 1; i >= 0; i--) + PartialShape result_shape; + if (get_auto_broadcast().m_type == op::AutoBroadcastType::PDPD) { - if (get_auto_broadcast().m_type == op::AutoBroadcastType::NONE) - { - NODE_VALIDATION_CHECK( - this, - PartialShape::merge_into(result_shape, get_input_partial_shape(i)), - "Argument shapes are inconsistent."); - } - else if (get_auto_broadcast().m_type == op::AutoBroadcastType::NUMPY || - get_auto_broadcast().m_type == op::AutoBroadcastType::PDPD) - { - NODE_VALIDATION_CHECK(this, - PartialShape::broadcast_merge_into(result_shape, - get_input_partial_shape(i), - get_auto_broadcast()), - "Argument shapes are inconsistent."); - } - else + result_shape = get_input_partial_shape(1); // 'then' tensor + NODE_VALIDATION_CHECK(this, + PartialShape::broadcast_merge_into( + result_shape, get_input_partial_shape(2), get_auto_broadcast()), + "'Else' tensor shape is not broadcastable."); + NODE_VALIDATION_CHECK(this, + PartialShape::broadcast_merge_into( + result_shape, get_input_partial_shape(0), get_auto_broadcast()), + "'Cond' tensor shape is not broadcastable."); + } + else + { + result_shape = get_input_partial_shape(2); + for (int i = 1; i >= 0; i--) { - NODE_VALIDATION_CHECK(this, false, "Unsupported auto broadcast specification"); + if (get_auto_broadcast().m_type == op::AutoBroadcastType::NONE) + { + NODE_VALIDATION_CHECK( + this, + PartialShape::merge_into(result_shape, get_input_partial_shape(i)), + "Argument shapes are inconsistent."); + } + else if (get_auto_broadcast().m_type == op::AutoBroadcastType::NUMPY) + { + NODE_VALIDATION_CHECK(this, + PartialShape::broadcast_merge_into(result_shape, + get_input_partial_shape(i), + get_auto_broadcast()), + "Argument shapes are inconsistent."); + } + else + { + NODE_VALIDATION_CHECK(this, false, "Unsupported auto broadcast specification"); + } } } set_output_type(0, result_et, result_shape); diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index e27bcc369652b5..c8a895bda3771a 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -300,6 +300,7 @@ set(SRC visitors/op/rnn_cell.cpp visitors/op/roi_pooling.cpp visitors/op/round.cpp + visitors/op/select.cpp visitors/op/space_to_depth.cpp visitors/op/selu.cpp visitors/op/shuffle_channels.cpp @@ -487,7 +488,6 @@ set(MULTI_TEST_SRC backend/round.in.cpp backend/scatter_nd_update.in.cpp backend/space_to_depth.in.cpp - backend/select.in.cpp backend/selu.in.cpp backend/shape_of.in.cpp backend/shuffle_channels.in.cpp diff --git a/ngraph/test/backend/select.in.cpp b/ngraph/test/backend/select.in.cpp deleted file mode 100644 index 9da4363e110dfb..00000000000000 --- a/ngraph/test/backend/select.in.cpp +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "ngraph/runtime/tensor.hpp" -#include "runtime/backend.hpp" -#include "util/all_close.hpp" -#include "util/all_close_f.hpp" -#include "util/ndarray.hpp" -#include "util/test_control.hpp" -#include "util/test_tools.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; - -NGRAPH_TEST(${BACKEND_NAME}, select) -{ - Shape shape{2, 2, 2}; - auto A = make_shared(element::boolean, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); - auto f = make_shared(make_shared(A, B, C), ParameterVector{A, B, C}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); - copy_data(a, vector{0, 1, 1, 0, 0, 1, 0, 1}); - auto b = backend->create_tensor(element::f32, shape); - copy_data(b, vector{1, 2, 3, 4, 5, 6, 7, 8}); - auto c = backend->create_tensor(element::f32, shape); - copy_data(c, vector{11, 12, 13, 14, 15, 16, 17, 18}); - auto result = backend->create_tensor(element::f32, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a, b, c}); - EXPECT_TRUE(test::all_close_f((vector{11, 2, 3, 14, 15, 6, 17, 8}), - read_vector(result), - MIN_FLOAT_TOLERANCE_BITS)); -} - -NGRAPH_TEST(${BACKEND_NAME}, select_v1) -{ - auto A = make_shared(element::boolean, Shape{4}); - auto B = make_shared(element::f32, Shape{4}); - auto C = make_shared(element::f32, Shape{2, 4}); - auto f = make_shared(make_shared(A, B, C), ParameterVector{A, B, C}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, Shape{4}); - copy_data(a, vector{0, 1, 1, 0}); - auto b = backend->create_tensor(element::f32, Shape{4}); - copy_data(b, vector{1, 2, 3, 4}); - auto c = backend->create_tensor(element::f32, Shape{2, 4}); - copy_data(c, vector{11, 12, 13, 14, 15, 16, 17, 18}); - auto result = backend->create_tensor(element::f32, Shape{2, 4}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a, b, c}); - EXPECT_TRUE( - test::all_close_f((vector{11, 2, 3, 14, 15, 2, 3, 18}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, select_double) -{ - Shape shape{2, 2, 2}; - auto A = make_shared(element::boolean, shape); - auto B = make_shared(element::f64, shape); - auto C = make_shared(element::f64, shape); - auto f = make_shared(make_shared(A, B, C), ParameterVector{A, B, C}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); - copy_data(a, vector{0, 1, 1, 0, 0, 1, 0, 1}); - auto b = backend->create_tensor(element::f64, shape); - copy_data(b, vector{1, 2, 3, 4, 5, 6, 7, 8}); - auto c = backend->create_tensor(element::f64, shape); - copy_data(c, vector{11, 12, 13, 14, 15, 16, 17, 18}); - auto result = backend->create_tensor(element::f64, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a, b, c}); - EXPECT_TRUE(test::all_close_f((vector{11, 2, 3, 14, 15, 6, 17, 8}), - read_vector(result))); -} diff --git a/ngraph/test/runtime/ie/unit_test.manifest b/ngraph/test/runtime/ie/unit_test.manifest index bc65f4700a527a..dbd2f28aea5108 100644 --- a/ngraph/test/runtime/ie/unit_test.manifest +++ b/ngraph/test/runtime/ie/unit_test.manifest @@ -141,9 +141,6 @@ onnx_model_argmin_select_last_index # Constant has zero dimension that is not allowable onnx_dyn_shapes_transpose -# Select layer with name 'y' has 'Mask' input's rank more than broadcasted 'Then' and 'Else' inputs' ranks -onnx_model_where - # y Incorrect input precision. Only FP32 is supported! onnx_model_erf_int32 @@ -449,7 +446,6 @@ sum_trivial_in_double sum_stable_acc_double sum_stable_simple_double softmax_axis_3d_double -select_double quantize_clamp_int32 max_3d_to_scalar_double argmin_trivial_in_double @@ -485,7 +481,6 @@ sigmoid_bprop_n1c1h4 IE_CPU.onnx_roi_align_f32 # [NOT_IMPLEMENTED] Input image format BOOL is not supported yet... -select not logical_xor logical_or @@ -519,9 +514,6 @@ all_2x2x3_eliminate_dims_0_1_2 all_dynamic_axis all_change_axis -# Positive input shape should be the same as negative input shape -select_v1 - # Cannot cast ngraph node Reverse to CNNLayer! reverse_1d_0 reverse_2d_0 @@ -855,8 +847,6 @@ backwards_power backwards_relu backwards_replace_slice backwards_reshape -backwards_select -backwards_select_nested backwards_sigmoid backwards_sign backwards_sin diff --git a/ngraph/test/runtime/interpreter/evaluates_map.cpp b/ngraph/test/runtime/interpreter/evaluates_map.cpp index 8e29acd20fbfa6..80341055763f99 100644 --- a/ngraph/test/runtime/interpreter/evaluates_map.cpp +++ b/ngraph/test/runtime/interpreter/evaluates_map.cpp @@ -66,7 +66,6 @@ #include #include #include -#include #include #include #include @@ -1607,24 +1606,6 @@ namespace return true; } - template - bool evaluate(const shared_ptr& op, - const HostTensorVector& outputs, - const HostTensorVector& inputs) - { - using T = typename element_type_traits::value_type; - - runtime::reference::select(inputs[0]->get_data_ptr(), - inputs[1]->get_data_ptr(), - inputs[2]->get_data_ptr(), - outputs[0]->get_data_ptr(), - op->get_input_shape(0), - op->get_input_shape(1), - op->get_input_shape(2), - op->get_auto_broadcast()); - return true; - } - template bool evaluate(const shared_ptr& op, const HostTensorVector& outputs, diff --git a/ngraph/test/type_prop/select.cpp b/ngraph/test/type_prop/select.cpp index 64313c2be1a000..1eea5a5b94c974 100644 --- a/ngraph/test/type_prop/select.cpp +++ b/ngraph/test/type_prop/select.cpp @@ -298,13 +298,13 @@ INSTANTIATE_TEST_SUITE_P( SelectParams({{4}, {4}, {2, 4}, {2, 4}}, {element::dynamic, element::dynamic, element::i8, element::i8}, op::AutoBroadcastType::NUMPY), - SelectParams({{2}, {2}, {2, 4}, {2, 4}}, + SelectParams({{2}, {2, 4}, {2}, {2, 4}}, {element::boolean, element::f32, element::dynamic, element::f32}, {op::AutoBroadcastType::PDPD, 0}), // TODO: Whats the right behavior here? // SelectParams({{2}, {2, 4}, {2}, {2, 4}}, {element::boolean, element::f32, // element::dynamic, element::f32}, {op::AutoBroadcastType::PDPD, 0}), - SelectParams({{4}, {4}, {2, 4}, {2, 4}}, + SelectParams({{4}, {2, 4}, {4}, {2, 4}}, {element::boolean, element::f32, element::dynamic, element::f32}, {op::AutoBroadcastType::PDPD, 1})), PrintToDummyParamName()); diff --git a/ngraph/test/visitors/op/select.cpp b/ngraph/test/visitors/op/select.cpp new file mode 100644 index 00000000000000..4d00ffa5c2e737 --- /dev/null +++ b/ngraph/test/visitors/op/select.cpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" + +#include "ngraph/ngraph.hpp" +#include "ngraph/op/util/attr_types.hpp" +#include "ngraph/opsets/opset1.hpp" +#include "util/visitor.hpp" + +using namespace ngraph; +using ngraph::test::NodeBuilder; +using ngraph::test::ValueMap; + +TEST(attributes, select) +{ + NodeBuilder::get_ops().register_factory(); + auto in_cond = std::make_shared(element::boolean, Shape{3, 2}); + auto in_then = std::make_shared(element::f32, Shape{3, 2}); + auto in_else = std::make_shared(element::f32, Shape{3, 2}); + + auto auto_broadcast = op::AutoBroadcastType::NUMPY; + + auto select = std::make_shared(in_cond, in_then, in_else, auto_broadcast); + NodeBuilder builder(select); + + const auto expected_attr_count = 1; + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); + + auto g_select = as_type_ptr(builder.create()); + EXPECT_EQ(g_select->get_autob(), select->get_autob()); +}