From 167073fc1f029f210853b915fad91c534da871a9 Mon Sep 17 00:00:00 2001 From: Mikhail Treskin Date: Thu, 10 Sep 2020 18:14:03 +0300 Subject: [PATCH] Some more fixes --- .../single_layer_tests/group_convolution.cpp | 2 +- .../include/single_layer_tests/cum_sum.hpp | 20 +- ngraph/test/backend/not.in.cpp | 4 +- ngraph/test/runtime/CMakeLists.txt | 4 + ngraph/test/runtime/ie/ie_executable.cpp | 6 +- ngraph/test/runtime/op/group_conv.cpp | 335 ++++++++++++++++++ ngraph/test/runtime/op/group_conv.hpp | 142 ++++++++ ngraph/test/runtime/opset0_tbl.hpp | 2 - ngraph/test/runtime/pass/opset0_downgrade.cpp | 50 --- ngraph/test/runtime/pass/opset1_upgrade.cpp | 55 --- ngraph/test/type_prop/binary_elementwise.cpp | 44 --- ngraph/test/type_prop/select.cpp | 42 --- 12 files changed, 498 insertions(+), 208 deletions(-) create mode 100644 ngraph/test/runtime/op/group_conv.cpp create mode 100644 ngraph/test/runtime/op/group_conv.hpp diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/group_convolution.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/group_convolution.cpp index e1a7d620f3c9bd..752b8d6584e1d7 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/group_convolution.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/group_convolution.cpp @@ -49,7 +49,7 @@ INSTANTIATE_TEST_CASE_P(GroupConvolution2D_ExplicitPadding, GroupConvolutionLaye ::testing::Combine( groupConv2DParams_ExplicitPadding, ::testing::ValuesIn(netPrecisions), - ::testing::Values(std::vector({1, 16, 10, 10})), + ::testing::Values(std::vector({1, 16, 30, 30})), ::testing::Values(CommonTestUtils::DEVICE_CPU)), GroupConvolutionLayerTest::getTestCaseName); diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/cum_sum.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/cum_sum.hpp index 3f82b1f1d2e8e7..2f170cab9d402b 100644 --- a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/cum_sum.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/cum_sum.hpp @@ -12,20 +12,20 @@ namespace LayerTestsDefinitions { - typedef std::tuple< - InferenceEngine::SizeVector, // Input shapes - InferenceEngine::Precision, // Input precision - int64_t, // Axis - bool, // Exclusive - bool, // Reverse - std::string> cumSumParams; // Device name +typedef std::tuple< + InferenceEngine::SizeVector, // Input shapes + InferenceEngine::Precision, // Input precision + int64_t, // Axis + bool, // Exclusive + bool, // Reverse + std::string> cumSumParams; // Device name class CumSumLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: static std::string getTestCaseName(testing::TestParamInfo obj); - protected: - void SetUp() override; - }; +protected: + void SetUp() override; +}; } // namespace LayerTestsDefinitions diff --git a/ngraph/test/backend/not.in.cpp b/ngraph/test/backend/not.in.cpp index 7816176d03876f..c59654b048275b 100644 --- a/ngraph/test/backend/not.in.cpp +++ b/ngraph/test/backend/not.in.cpp @@ -49,7 +49,7 @@ NGRAPH_TEST(${BACKEND_NAME}, not) { Shape shape{2, 2}; auto A = make_shared(element::boolean, shape); - auto f = make_shared(make_shared(A), ParameterVector{A}); + auto f = make_shared(make_shared(A), ParameterVector{A}); std::vector a{1, 0, 1, 0}; @@ -63,7 +63,7 @@ NGRAPH_TEST(${BACKEND_NAME}, not_i32) { Shape shape{2, 2}; auto A = make_shared(element::i32, shape); - auto f = make_shared(make_shared(A), ParameterVector{A}); + auto f = make_shared(make_shared(A), ParameterVector{A}); std::vector a{1, 0, 2, 0}; diff --git a/ngraph/test/runtime/CMakeLists.txt b/ngraph/test/runtime/CMakeLists.txt index cd59a03daece59..315913453df114 100644 --- a/ngraph/test/runtime/CMakeLists.txt +++ b/ngraph/test/runtime/CMakeLists.txt @@ -29,6 +29,8 @@ set (SRC op/avg_pool.hpp op/convolution.cpp op/convolution.hpp + op/group_conv.cpp + op/group_conv.hpp pass/dyn_elimination.cpp pass/dyn_elimination.hpp pass/fused_op_decomposition.cpp @@ -45,6 +47,8 @@ set (SRC pass/opset0_downgrade.hpp pass/opset1_downgrade.cpp pass/opset1_downgrade.hpp + pass/opset1_upgrade.cpp + pass/opset1_upgrade.hpp ) add_library(ngraph_backend SHARED ${SRC}) diff --git a/ngraph/test/runtime/ie/ie_executable.cpp b/ngraph/test/runtime/ie/ie_executable.cpp index b99278fd8296b2..ec3c0e01afaf79 100644 --- a/ngraph/test/runtime/ie/ie_executable.cpp +++ b/ngraph/test/runtime/ie/ie_executable.cpp @@ -20,7 +20,7 @@ #include "ngraph/pass/manager.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" -//#include "pass/opset1_upgrade.hpp" +#include "pass/opset1_upgrade.hpp" using namespace std; using namespace ngraph; @@ -93,7 +93,9 @@ runtime::ie::IE_Executable::IE_Executable(shared_ptr func, string devi : m_device{device} { static std::set ie_ops = get_ie_ops(); - + pass::Manager passes; + passes.register_pass(); + passes.run_passes(func); for (const auto& node : func->get_ops()) { if (ie_ops.find(node->get_type_info()) == ie_ops.end()) diff --git a/ngraph/test/runtime/op/group_conv.cpp b/ngraph/test/runtime/op/group_conv.cpp new file mode 100644 index 00000000000000..cd14a8c8470a84 --- /dev/null +++ b/ngraph/test/runtime/op/group_conv.cpp @@ -0,0 +1,335 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include + +#include "convolution.hpp" +#include "group_conv.hpp" +#include "ngraph/attribute_visitor.hpp" +#include "ngraph/builder/reshape.hpp" +#include "ngraph/builder/split.hpp" +#include "ngraph/op/concat.hpp" +#include "ngraph/op/convolution.hpp" +#include "ngraph/op/reshape.hpp" +#include "ngraph/op/slice.hpp" +#include "ngraph/validation_util.hpp" + +using namespace std; +using namespace ngraph; + +NGRAPH_SUPPRESS_DEPRECATED_START + +//------------------------------------------------------------------------------ +// v0::GroupConvolution +//------------------------------------------------------------------------------ + +constexpr NodeTypeInfo op::v0::GroupConvolution::type_info; + +op::v0::GroupConvolution::GroupConvolution(const Output& data_batch, + const Output& filters, + const Strides& window_movement_strides, + const Strides& window_dilation_strides, + const CoordinateDiff& padding_below, + const CoordinateDiff& padding_above, + const Strides& data_dilation_strides, + const size_t groups, + const PadType& pad_type) + : FusedOp({data_batch, filters}) + , m_window_movement_strides(window_movement_strides) + , m_window_dilation_strides(window_dilation_strides) + , m_padding_below(padding_below) + , m_padding_above(padding_above) + , m_data_dilation_strides(data_dilation_strides) + , m_groups(groups) + , m_pad_type(pad_type) + , m_groups_in_filters(false) +{ + constructor_validate_and_infer_types(); +} + +op::v0::GroupConvolution::GroupConvolution(const Output& data_batch, + const Output& filters, + const Strides& window_movement_strides, + const Strides& window_dilation_strides, + const CoordinateDiff& padding_below, + const CoordinateDiff& padding_above, + const Strides& data_dilation_strides, + const PadType& pad_type) + : FusedOp({data_batch, filters}) + , m_window_movement_strides(window_movement_strides) + , m_window_dilation_strides(window_dilation_strides) + , m_padding_below(padding_below) + , m_padding_above(padding_above) + , m_data_dilation_strides(data_dilation_strides) + , m_groups(0) + , m_pad_type(pad_type) + , m_groups_in_filters(true) +{ + constructor_validate_and_infer_types(); +} + +void op::v0::GroupConvolution::pre_validate_and_infer_types() +{ + auto data_shape = get_input_partial_shape(0); + auto filters_shape = get_input_partial_shape(1); + + if (data_shape.is_static() && filters_shape.is_static()) + { + // Update groups + if (m_groups_in_filters) + { + m_groups = get_input_partial_shape(1)[0].get_length(); + } + + // Data channels + NODE_VALIDATION_CHECK(this, + data_shape.to_shape()[1] % get_groups() == 0, + "Data channels not a multiple of group size"); + // Output channels + NODE_VALIDATION_CHECK(this, + filters_shape.to_shape()[0] % get_groups() == 0, + "# Filters not a multiple of group size"); + + // Input Filters + NODE_VALIDATION_CHECK(this, + (filters_shape.to_shape()[m_groups_in_filters ? 2 : 1] * + get_groups()) == data_shape.to_shape()[1], + "Incorrect number of channels per filter"); + } + else + { + set_output_type(0, get_input_element_type(0), PartialShape::dynamic()); + } +} + +void op::v0::GroupConvolution::post_validate_and_infer_types() +{ + auto data_shape = get_input_partial_shape(0); + auto filters_shape = get_input_partial_shape(1); + if (data_shape.is_static() && filters_shape.is_static()) + { + if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER) + { + m_padding_below.clear(); + m_padding_above.clear(); + auto filter_shape = filters_shape.to_shape(); + filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I} + infer_auto_padding(data_shape.to_shape(), + filter_shape, + m_window_movement_strides, + m_window_dilation_strides, + m_pad_type, + m_padding_above, + m_padding_below); + } + } +} + +Shape op::v0::GroupConvolution::get_weights_dimensions() const +{ + auto data_shape = get_input_shape(0); + auto weights_shape = get_input_shape(1); + // check if weights already includes groups + if (m_groups_in_filters) + { + return weights_shape; + } + // reshape weights into 5d tensors that includes groups + const size_t OC = 0; + const size_t OC_IN_OUTPUT = 1; + const size_t IC = 1; + Shape weights_shape_groups{weights_shape}; + // adjust output and channel given a number of groups + + weights_shape_groups.at(OC) = get_shape().at(OC_IN_OUTPUT) / get_groups(); + weights_shape_groups.at(IC) = data_shape.at(IC) / get_groups(); + // push_front the number of groups + weights_shape_groups.insert(weights_shape_groups.begin(), get_groups()); + return weights_shape_groups; +} + +shared_ptr op::v0::GroupConvolution::clone_with_new_inputs(const OutputVector& new_args) const +{ + check_new_args_count(this, new_args); + + if (m_groups_in_filters) + { + return make_shared(new_args.at(0), + new_args.at(1), + get_window_movement_strides(), + get_window_dilation_strides(), + get_padding_below(), + get_padding_above(), + get_data_dilation_strides(), + get_pad_type()); + } + else + { + return make_shared(new_args.at(0), + new_args.at(1), + get_window_movement_strides(), + get_window_dilation_strides(), + get_padding_below(), + get_padding_above(), + get_data_dilation_strides(), + get_groups(), + get_pad_type()); + } +} + +OutputVector op::v0::GroupConvolution::decompose_op() const +{ + auto data = input_value(0); + auto filters = input_value(1); + auto filters_shape = get_input_shape(1); + // Split one convolution op to N ops where N is the number of groups + // and concat results after computation. + NodeVector convolution_nodes; + + // slice data + auto sliced_data = builder::split(data, get_groups(), 1); + // slice filters + auto sliced_filters = builder::split(filters, get_groups(), 0); + for (std::size_t group{0}; group < get_groups(); ++group) + { + auto sliced_filter = sliced_filters[group]; + if (m_groups_in_filters) + { + // Remove group dimmension after slicing + sliced_filter = make_shared( + sliced_filters[group], + get_default_order(sliced_filters[group].get_shape().size()), + Shape(std::next(std::begin(filters_shape), 1), std::end(filters_shape))); + } + convolution_nodes.push_back( + std::make_shared(sliced_data[group], + sliced_filter, + m_window_movement_strides, + m_window_dilation_strides, + m_padding_below, + m_padding_above, + m_data_dilation_strides, + m_pad_type)); + } + std::size_t concatenation_axis = 1; + return {std::make_shared(convolution_nodes, concatenation_axis)}; +} + +//------------------------------------------------------------------------------ +// v0::GroupConvolutionBackpropData +//------------------------------------------------------------------------------ + +constexpr NodeTypeInfo op::v0::GroupConvolutionBackpropData::type_info; + +op::v0::GroupConvolutionBackpropData::GroupConvolutionBackpropData( + const Output& data_batch, + const Output& filters, + const Output& output_delta, + const Strides& window_movement_strides, + const Strides& window_dilation_strides, + const CoordinateDiff& padding_below, + const CoordinateDiff& padding_above, + const size_t groups) + : FusedOp({data_batch, filters, output_delta}) + , m_window_movement_strides(window_movement_strides) + , m_window_dilation_strides(window_dilation_strides) + , m_padding_below(padding_below) + , m_padding_above(padding_above) + , m_groups(groups) +{ + constructor_validate_and_infer_types(); +} + +void op::v0::GroupConvolutionBackpropData::pre_validate_and_infer_types() +{ + element::Type data_element_type = get_input_element_type(2); + element::Type filters_elem_type = get_input_element_type(1); + + NODE_VALIDATION_CHECK(this, + data_element_type.is_dynamic() || data_element_type.is_real(), + "Output delta element type must be f16, bf16, f32, f64 or dynamic (got ", + data_element_type, + ")."); + NODE_VALIDATION_CHECK(this, + filters_elem_type.is_dynamic() || filters_elem_type.is_real(), + "Filters element type must be f16, bf16, f32, f64 or dynamic (got ", + filters_elem_type, + ")."); + + PartialShape data_pshape = get_input_partial_shape(0); + PartialShape filters_pshape = get_input_partial_shape(1); + PartialShape delta_pshape = get_input_partial_shape(2); + + if (data_pshape.is_dynamic() || filters_pshape.is_dynamic() || delta_pshape.is_dynamic()) + { + set_output_type(0, data_element_type, PartialShape::dynamic()); + } +} + +shared_ptr + op::v0::GroupConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const +{ + if (new_args.size() != 3) + { + throw ngraph_error("Incorrect number of new arguments"); + } + + return make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + get_window_movement_strides(), + get_window_dilation_strides(), + get_padding_below(), + get_padding_above(), + get_groups()); +} + +OutputVector op::v0::GroupConvolutionBackpropData::decompose_op() const +{ + auto filters = input_value(1); + auto output_delta = input_value(2); + auto data_shape = get_input_shape(0); + + NodeVector sliced_inputs; + + auto groups = get_groups(); + // slice data shape + data_shape[1] /= groups; + // slice delta + auto sliced_delta = builder::split(output_delta, groups, 1); + // slice filters + auto sliced_filters = builder::split(filters, groups, 0); + + auto num_spatials = get_window_movement_strides().size(); + + for (size_t i = 0; i < groups; ++i) + { + auto sliced_conv = std::make_shared( + data_shape, + sliced_filters[i], + sliced_delta[i], + get_window_movement_strides(), + get_window_dilation_strides(), + get_padding_below(), + get_padding_above(), + Strides(num_spatials, 1)); // default data dilation strides + + sliced_inputs.push_back(sliced_conv); + } + + size_t concatenation_axis = 1; + return {std::make_shared(sliced_inputs, concatenation_axis)}; +} diff --git a/ngraph/test/runtime/op/group_conv.hpp b/ngraph/test/runtime/op/group_conv.hpp new file mode 100644 index 00000000000000..bc6cb336a12eb7 --- /dev/null +++ b/ngraph/test/runtime/op/group_conv.hpp @@ -0,0 +1,142 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "backend_visibility.hpp" +#include "ngraph/op/convolution.hpp" +#include "ngraph/op/op.hpp" +#include "ngraph/op/util/attr_types.hpp" +#include "ngraph/op/util/fused_op.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v0 + { + /// \brief Group Convolution + class BACKEND_API GroupConvolution : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"GroupConvolution", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + GroupConvolution() = default; + GroupConvolution(const Output& data_batch, + const Output& filters, + const Strides& window_movement_strides, + const Strides& window_dilation_strides, + const CoordinateDiff& padding_below, + const CoordinateDiff& padding_above, + const Strides& data_dilation_strides, + const size_t groups, + const PadType& pad_type = PadType::EXPLICIT); + + // constructor which accept groups included in filters shape. + GroupConvolution(const Output& data_batch, + const Output& filters, + const Strides& window_movement_strides, + const Strides& window_dilation_strides, + const CoordinateDiff& padding_below, + const CoordinateDiff& padding_above, + const Strides& data_dilation_strides, + const PadType& pad_type = PadType::EXPLICIT); + Shape get_weights_dimensions() const; + const Strides& get_window_movement_strides() const + { + return m_window_movement_strides; + } + const Strides& get_window_dilation_strides() const + { + return m_window_dilation_strides; + } + const CoordinateDiff& get_padding_below() const { return m_padding_below; } + const CoordinateDiff& get_padding_above() const { return m_padding_above; } + const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; } + Output get_filters() { return input_value(1); } + Output get_data_batch() { return input_value(0); } + size_t get_groups() const { return m_groups; }; + const PadType& get_pad_type() const { return m_pad_type; } + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + virtual OutputVector decompose_op() const override; + + virtual void pre_validate_and_infer_types() override; + virtual void post_validate_and_infer_types() override; + + bool has_groups_in_filters() const { return m_groups_in_filters; } + protected: + Strides m_window_movement_strides; + Strides m_window_dilation_strides; + CoordinateDiff m_padding_below; + CoordinateDiff m_padding_above; + Strides m_data_dilation_strides; + size_t m_groups; + PadType m_pad_type{PadType::NOTSET}; + + private: + bool m_groups_in_filters; + }; + + /// \brief Group Convolution data batch backprop + class BACKEND_API GroupConvolutionBackpropData : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"GroupConvolutionBackpropData", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + GroupConvolutionBackpropData() = default; + GroupConvolutionBackpropData(const Output& data_batch, + const Output& filters, + const Output& output_delta, + const Strides& window_movement_strides, + const Strides& window_dilation_strides, + const CoordinateDiff& padding_below, + const CoordinateDiff& padding_above, + const size_t groups); + + const Strides& get_window_movement_strides() const + { + return m_window_movement_strides; + } + const Strides& get_window_dilation_strides() const + { + return m_window_dilation_strides; + } + const CoordinateDiff& get_padding_below() const { return m_padding_below; } + const CoordinateDiff& get_padding_above() const { return m_padding_above; } + size_t get_groups() const { return m_groups; }; + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + virtual OutputVector decompose_op() const override; + + virtual void pre_validate_and_infer_types() override; + + protected: + Strides m_window_movement_strides; + Strides m_window_dilation_strides; + CoordinateDiff m_padding_below; + CoordinateDiff m_padding_above; + size_t m_groups; + }; + } + } // namespace op +} // namespace ngraph + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/test/runtime/opset0_tbl.hpp b/ngraph/test/runtime/opset0_tbl.hpp index a0eac8c3e6599f..1b9f5946978240 100644 --- a/ngraph/test/runtime/opset0_tbl.hpp +++ b/ngraph/test/runtime/opset0_tbl.hpp @@ -85,13 +85,11 @@ NGRAPH_OP(Gather, ngraph::op) NGRAPH_OP(GatherND, ngraph::op) NGRAPH_OP(Gelu, ngraph::op) NGRAPH_OP(Greater, ngraph::op) -NGRAPH_OP(GreaterEq, ngraph::op) NGRAPH_OP(GroupConvolution, ngraph::op::v0) NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v0) NGRAPH_OP(HardSigmoid, ngraph::op) NGRAPH_OP(Interpolate, ngraph::op::v0) NGRAPH_OP(Less, ngraph::op) -NGRAPH_OP(LessEq, ngraph::op) NGRAPH_OP(Log, ngraph::op) NGRAPH_OP(LRN, ngraph::op) NGRAPH_OP(LSTMSequence, ngraph::op::v0) diff --git a/ngraph/test/runtime/pass/opset0_downgrade.cpp b/ngraph/test/runtime/pass/opset0_downgrade.cpp index 7ecc21b58c6883..0d668c87253cfc 100644 --- a/ngraph/test/runtime/pass/opset0_downgrade.cpp +++ b/ngraph/test/runtime/pass/opset0_downgrade.cpp @@ -96,29 +96,6 @@ namespace // Default is that we did nothing shared_ptr op_cast(shared_ptr node) { return nullptr; } - shared_ptr op_cast(shared_ptr node) - { - auto const input_arg = node->input_value(0); - const auto ceil_mode = static_cast(node->get_rounding_type()); - const auto include_padding_in_avg_computation = !node->get_exclude_pad(); - const auto pad_type = node->get_auto_pad(); - const auto padding_below = node->get_pads_begin(); - const auto padding_above = node->get_pads_end(); - const auto window_movement_strides = node->get_strides(); - const auto window_shape = node->get_kernel(); - - auto replacement_node = make_shared(input_arg, - window_shape, - window_movement_strides, - padding_below, - padding_above, - include_padding_in_avg_computation, - pad_type, - ceil_mode); - replace_node(node, replacement_node); - return replacement_node; - } - shared_ptr op_cast(shared_ptr node) { auto arg = node->input_value(0); @@ -262,33 +239,6 @@ namespace return op_cast_binary_elementwise_node(node); } - shared_ptr op_cast(shared_ptr node) - { - const auto indices = node->input_value(0); - const auto depth = node->input_value(1).get_node(); - auto on_value = node->input_value(2); - auto off_value = node->input_value(3); - const auto axis = node->get_axis(); - - NGRAPH_CHECK(op::is_constant(depth), "depth input must be constant", *node); - const auto output_pshape = node->get_output_partial_shape(0); - NGRAPH_CHECK(output_pshape.is_static(), "output shape must be static", *node); - const auto output_shape = output_pshape.to_shape(); - - auto one_hot = std::make_shared( - std::make_shared(indices, output_shape, axis), - on_value.get_element_type()); - - auto broadcasted_values = builder::numpy_broadcast_outputs({one_hot, on_value, off_value}); - on_value = broadcasted_values[1]; - off_value = broadcasted_values[2]; - - auto replacement_node = one_hot * (on_value - off_value) + off_value; - - replace_node(node, replacement_node); - return replacement_node; - } - shared_ptr op_cast(shared_ptr node) { auto replacement_node = op_cast_reduction_node(node); diff --git a/ngraph/test/runtime/pass/opset1_upgrade.cpp b/ngraph/test/runtime/pass/opset1_upgrade.cpp index 8b20cfb9624e89..08ca76a6be9f7e 100644 --- a/ngraph/test/runtime/pass/opset1_upgrade.cpp +++ b/ngraph/test/runtime/pass/opset1_upgrade.cpp @@ -49,10 +49,6 @@ namespace // Default is that we didn nothing shared_ptr op_cast(shared_ptr node) { return nullptr; } - shared_ptr op_cast(shared_ptr node) - { - return op_cast_binary_elementwise_node(node); - } shared_ptr op_cast(shared_ptr node) { @@ -144,11 +140,6 @@ namespace return replacement_node; } - shared_ptr op_cast(shared_ptr node) - { - return op_cast_binary_elementwise_node(node); - } - shared_ptr op_cast(shared_ptr node) { int64_t axis = node->get_axis(); @@ -160,15 +151,6 @@ namespace return replacement_node; } - shared_ptr op_cast(shared_ptr node) - { - return op_cast_binary_elementwise_node(node); - } - - shared_ptr op_cast(shared_ptr node) - { - return op_cast_binary_elementwise_node(node); - } shared_ptr op_cast(shared_ptr node) { @@ -267,15 +249,6 @@ namespace return replacement_node; } - shared_ptr op_cast(shared_ptr node) - { - return op_cast_binary_elementwise_node(node); - } - - shared_ptr op_cast(shared_ptr node) - { - return op_cast_binary_elementwise_node(node); - } shared_ptr op_cast(shared_ptr node) { @@ -286,10 +259,6 @@ namespace return replacement_node; } - shared_ptr op_cast(shared_ptr node) - { - return op_cast_binary_elementwise_node(node); - } shared_ptr op_cast(shared_ptr node) { @@ -300,16 +269,6 @@ namespace return replacement_node; } - shared_ptr op_cast(shared_ptr node) - { - return op_cast_binary_elementwise_node(node); - } - - shared_ptr op_cast(shared_ptr node) - { - return op_cast_binary_elementwise_node(node); - } - shared_ptr op_cast(shared_ptr node) { auto replacement_node = make_shared(node->input_value(0)); @@ -317,10 +276,6 @@ namespace return replacement_node; } - shared_ptr op_cast(shared_ptr node) - { - return op_cast_binary_elementwise_node(node); - } shared_ptr op_cast(shared_ptr node) { @@ -348,11 +303,6 @@ namespace return op_cast_binary_elementwise_node(node); } - shared_ptr op_cast(shared_ptr node) - { - return op_cast_binary_elementwise_node(node); - } - shared_ptr op_cast(shared_ptr node) { bool keep_dims = false; @@ -457,11 +407,6 @@ namespace return replacement_node; } - shared_ptr op_cast(shared_ptr node) - { - return op_cast_binary_elementwise_node(node); - } - shared_ptr op_cast(shared_ptr node) { bool keep_dims = false; diff --git a/ngraph/test/type_prop/binary_elementwise.cpp b/ngraph/test/type_prop/binary_elementwise.cpp index 1b27002c2602a6..26cf1aebfa4580 100644 --- a/ngraph/test/type_prop/binary_elementwise.cpp +++ b/ngraph/test/type_prop/binary_elementwise.cpp @@ -285,50 +285,6 @@ TEST(type_prop, binary_elementwise_arithmetic_both_dynamic) ASSERT_TRUE(add->get_output_partial_shape(0).rank().is_dynamic()); } -TEST(type_prop, binary_elementwise_arithmetic_left_rank_dynamic_right_static) -{ - auto a = make_shared(element::f32, PartialShape::dynamic()); - auto b = make_shared(element::f32, Shape{1, 2, 3}); - auto add = make_shared(a, b); - - ASSERT_TRUE(add->get_output_partial_shape(0).is_static()); - ASSERT_EQ(add->get_shape(), (Shape{1, 2, 3})); -} - -TEST(type_prop, binary_elementwise_arithmetic_left_static_right_rank_dynamic) -{ - auto a = make_shared(element::f32, Shape{1, 2, 3}); - auto b = make_shared(element::f32, PartialShape::dynamic()); - auto add = make_shared(a, b); - - ASSERT_TRUE(add->get_output_partial_shape(0).is_static()); - ASSERT_EQ(add->get_shape(), (Shape{1, 2, 3})); -} - -TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_right_rank_dynamic) -{ - auto a = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 3}); - auto b = make_shared(element::f32, PartialShape::dynamic()); - auto add = make_shared(a, b); - - ASSERT_TRUE(add->get_output_partial_shape(0).rank().is_static()); - ASSERT_TRUE(add->get_output_partial_shape(0).is_dynamic()); - ASSERT_TRUE( - add->get_output_partial_shape(0).same_scheme(PartialShape{1, Dimension::dynamic(), 3})); -} - -TEST(type_prop, binary_elementwise_arithmetic_left_rank_dynamic_right_rank_static_dynamic) -{ - auto a = make_shared(element::f32, PartialShape::dynamic()); - auto b = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 3}); - auto add = make_shared(a, b); - - ASSERT_TRUE(add->get_output_partial_shape(0).rank().is_static()); - ASSERT_TRUE(add->get_output_partial_shape(0).is_dynamic()); - ASSERT_TRUE( - add->get_output_partial_shape(0).same_scheme(PartialShape{1, Dimension::dynamic(), 3})); -} - TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_right_rank_static_dynamic_result_static) { diff --git a/ngraph/test/type_prop/select.cpp b/ngraph/test/type_prop/select.cpp index e70cff09043ce0..488098d64ba201 100644 --- a/ngraph/test/type_prop/select.cpp +++ b/ngraph/test/type_prop/select.cpp @@ -211,48 +211,6 @@ TEST(type_prop, select_partial_all_rank_dynamic_arg0_arg1_arg2_et_dynamic) ASSERT_TRUE(sel->get_output_partial_shape(0).rank().is_dynamic()); } -TEST(type_prop, select_partial_arg0_rank_dynamic_static_arg1_arg2_rank_dynamic_ok) -{ - auto param0 = - make_shared(element::boolean, PartialShape{2, Dimension::dynamic(), 3}); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); - auto param2 = make_shared(element::f32, PartialShape::dynamic()); - - auto sel = make_shared(param0, param1, param2); - - ASSERT_EQ(sel->get_output_element_type(0), element::f32); - ASSERT_TRUE( - sel->get_output_partial_shape(0).same_scheme(PartialShape{2, Dimension::dynamic(), 3})); -} - -TEST(type_prop, select_partial_arg1_rank_dynamic_static_arg0_arg2_rank_dynamic_ok) -{ - auto param0 = make_shared(element::boolean, PartialShape::dynamic()); - auto param1 = - make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); - auto param2 = make_shared(element::f32, PartialShape::dynamic()); - - auto sel = make_shared(param0, param1, param2); - - ASSERT_EQ(sel->get_output_element_type(0), element::f32); - ASSERT_TRUE( - sel->get_output_partial_shape(0).same_scheme(PartialShape{2, Dimension::dynamic(), 3})); -} - -TEST(type_prop, select_partial_arg2_rank_dynamic_static_arg0_arg1_rank_dynamic_ok) -{ - auto param0 = make_shared(element::boolean, PartialShape::dynamic()); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); - auto param2 = - make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); - - auto sel = make_shared(param0, param1, param2); - - ASSERT_EQ(sel->get_output_element_type(0), element::f32); - ASSERT_TRUE( - sel->get_output_partial_shape(0).same_scheme(PartialShape{2, Dimension::dynamic(), 3})); -} - TEST(type_prop, select_partial_all_rank_static_dynamic_ok) { auto param0 = make_shared(