diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt index 52278facade11e..3a3d622852d435 100644 --- a/inference-engine/src/inference_engine/CMakeLists.txt +++ b/inference-engine/src/inference_engine/CMakeLists.txt @@ -9,7 +9,6 @@ file (GLOB LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/cpp/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/threading/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/cpp/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/shape_infer/ie_built_in_holder.cpp ) # TODO: WA for OneHot pass usage in reshape diff --git a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp index 02f8d2fc3fd8e8..774eed0dd2d35c 100644 --- a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp +++ b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp @@ -33,7 +33,6 @@ #include "exec_graph_info.hpp" #include "ie_itt.hpp" #include "generic_ie.hpp" -#include "shape_infer/ie_built_in_holder.hpp" using namespace std; using namespace InferenceEngine; @@ -114,9 +113,6 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl( network.setInputInfo(info); }; - // Add shape infer method for old operations which are not included to opset1, opset2 and opset3 - ::ngraph::op::GenericIE::addExtension(_ngraph_function, std::make_shared()); - reshape(); for (const auto& layer : _ngraph_function->get_parameters()) { std::string outName = layer->get_friendly_name(); diff --git a/inference-engine/src/inference_engine/generic_ie.cpp b/inference-engine/src/inference_engine/generic_ie.cpp index 840491493f0708..b5155013fafb14 100644 --- a/inference-engine/src/inference_engine/generic_ie.cpp +++ b/inference-engine/src/inference_engine/generic_ie.cpp @@ -15,7 +15,6 @@ #include "blob_factory.hpp" #include -#include "shape_infer/ie_ishape_infer_extension.hpp" #include "ngraph/util.hpp" #include "ngraph/graph_util.hpp" #include "ngraph/validation_util.hpp" @@ -85,63 +84,6 @@ void ngraph::op::GenericIE::validate_and_infer_types() { } return get_output_element_type(index); }; - // Try to find extension with shape inference implementation and apply it - for (const auto& ext : extensions) { - IE_SUPPRESS_DEPRECATED_START - InferenceEngine::IShapeInferImpl::Ptr impl; - InferenceEngine::StatusCode ret = ext->getShapeInferImpl(impl, type.c_str(), nullptr); - if (ret != InferenceEngine::StatusCode::OK || !impl) continue; - - std::vector inputs; - std::map parameters; - std::map blobs; - std::vector outShapes; - - for (uint64_t i = 0; i < get_input_size(); i++) { - PartialShape this_input_shape = get_input_partial_shape(i); - - if (!this_input_shape.is_static()) { - // Set dynamic output shapes if input shapes are not defined - for (size_t output_index = 0; output_index < outputs.size(); output_index++) { - set_output_type(output_index, get_precision(output_index), PartialShape::dynamic()); - } - return; - } - - Shape this_ishape = get_input_shape(i); - InferenceEngine::SizeVector dims = this_ishape; - InferenceEngine::Blob::Ptr input = make_blob_with_precision(InferenceEngine::TensorDesc( - InferenceEngine::details::convertPrecision(get_input_element_type(i)), dims, - InferenceEngine::TensorDesc::getLayoutByDims(dims))); - inputs.emplace_back(input); - } - - for (const auto& attr : params) { - if (attr.second.is()) { - parameters[attr.first] = attr.second.as(); - } else if (attr.second.is()) { - auto cBlob = attr.second.as(); - auto wBlob = std::const_pointer_cast(cBlob); - blobs[attr.first] = wBlob; - } else if (attr.second.is()) { - auto wBlob = attr.second.as(); - blobs[attr.first] = wBlob; - } else { - THROW_IE_EXCEPTION << "Generic node for layer " << get_friendly_name() << " with type " << type - << " has incorrect parameter " << attr.first << "!"; - } - } - - ret = impl->inferShapes(inputs, parameters, blobs, outShapes, nullptr); - IE_SUPPRESS_DEPRECATED_END - - if (ret != InferenceEngine::StatusCode::OK || outShapes.size() != outputs.size()) continue; - - for (size_t output_index = 0; output_index < outputs.size(); output_index++) { - set_output_type(output_index, get_precision(output_index), Shape(outShapes[output_index])); - } - return; - } // Extensions are not loaded when we create nGraph function // First call: create node diff --git a/inference-engine/src/inference_engine/shape_infer/ie_built_in_holder.cpp b/inference-engine/src/inference_engine/shape_infer/ie_built_in_holder.cpp deleted file mode 100644 index c39ed50ef1ad1c..00000000000000 --- a/inference-engine/src/inference_engine/shape_infer/ie_built_in_holder.cpp +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (C) 2017-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -#include "shape_infer/ie_built_in_holder.hpp" -#include "shape_infer/ie_proposal_shape_infer.hpp" -#include "shape_infer/ie_rnn_cell_shape_infer.hpp" -#include "shape_infer/ie_simpler_nms_shape_infer.hpp" -#include "shape_infer/ie_sparse_to_dense_shape_infer.hpp" -#include "shape_infer/ie_unique_shape_infer.hpp" -#include "shape_infer/ie_sparse_to_dense_shape_infer.hpp" - -namespace InferenceEngine { -namespace ShapeInfer { - -BuiltInShapeInferHolder::ImplsHolder::Ptr BuiltInShapeInferHolder::GetImplsHolder() { - static ImplsHolder::Ptr localHolder; - if (localHolder == nullptr) { - localHolder = std::make_shared(); - } - return localHolder; -} - -void BuiltInShapeInferHolder::AddImpl(const std::string& name, const IShapeInferImpl::Ptr& impl) { - GetImplsHolder()->list[name] = impl; -} - -StatusCode BuiltInShapeInferHolder::getShapeInferTypes(char**& types, unsigned int& size, ResponseDesc* resp) noexcept { - auto& factories = GetImplsHolder()->list; - types = new char*[factories.size()]; - size = 0; - for (auto it = factories.begin(); it != factories.end(); it++, size++) { - types[size] = new char[it->first.size() + 1]; - std::copy(it->first.begin(), it->first.end(), types[size]); - types[size][it->first.size()] = '\0'; - } - return OK; -} - -StatusCode BuiltInShapeInferHolder::getShapeInferImpl(IShapeInferImpl::Ptr& impl, const char* type, - ResponseDesc* resp) noexcept { - auto& impls = BuiltInShapeInferHolder::GetImplsHolder()->list; - if (impls.find(type) != impls.end()) { - impl = impls[type]; - return OK; - } - impl.reset(); - return NOT_FOUND; -} - -template -class ImplRegisterBase { -public: - explicit ImplRegisterBase(const std::string& type) { - BuiltInShapeInferHolder::AddImpl(type, std::make_shared(type)); - } -}; - -#define REG_SHAPE_INFER_FOR_TYPE(__prim, __type) \ - static ImplRegisterBase<__prim> __bi_reg__##__type(#__type) - -REG_SHAPE_INFER_FOR_TYPE(SimplerNMSShapeProp, SimplerNMS); -REG_SHAPE_INFER_FOR_TYPE(SparseToDenseShapeProp, SparseToDense); -REG_SHAPE_INFER_FOR_TYPE(ProposalShapeProp, Proposal); -REG_SHAPE_INFER_FOR_TYPE(RNNCellShapeProp, RNNCell); -REG_SHAPE_INFER_FOR_TYPE(GRUCellShapeProp, GRUCell); -REG_SHAPE_INFER_FOR_TYPE(UniqueShapeProp, Unique); - -} // namespace ShapeInfer -} // namespace InferenceEngine diff --git a/inference-engine/src/inference_engine/shape_infer/ie_built_in_holder.hpp b/inference-engine/src/inference_engine/shape_infer/ie_built_in_holder.hpp deleted file mode 100644 index 5ac7edc5943f37..00000000000000 --- a/inference-engine/src/inference_engine/shape_infer/ie_built_in_holder.hpp +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include - -#include -#include "caseless.hpp" - -#include "shape_infer/ie_ishape_infer_extension.hpp" - -namespace InferenceEngine { -namespace ShapeInfer { - -/** - * @brief Holder of shape infer implementations for build-in IE layers, that plugins support out-of-the-box - */ -class BuiltInShapeInferHolder : public IShapeInferExtension { - struct ImplsHolder { - using Ptr = std::shared_ptr; - InferenceEngine::details::caseless_map list; - }; - -public: - StatusCode getShapeInferTypes(char**& types, unsigned int& size, ResponseDesc* resp) noexcept override; - - StatusCode getShapeInferImpl(IShapeInferImpl::Ptr& impl, const char* type, ResponseDesc* resp) noexcept override; - - void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override {} - - void Release() noexcept override { - delete this; - } - - void Unload() noexcept override {} - - static void AddImpl(const std::string& name, const IShapeInferImpl::Ptr& impl); - -private: - static ImplsHolder::Ptr GetImplsHolder(); -}; - -} // namespace ShapeInfer -} // namespace InferenceEngine diff --git a/inference-engine/src/inference_engine/shape_infer/ie_built_in_impl.hpp b/inference-engine/src/inference_engine/shape_infer/ie_built_in_impl.hpp deleted file mode 100644 index ecf8a62a850064..00000000000000 --- a/inference-engine/src/inference_engine/shape_infer/ie_built_in_impl.hpp +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include -#include -#include -#include -#include - -namespace InferenceEngine { - -inline std::string GetParamAsString(const char* param, const std::map & params) { - auto it = params.find(param); - if (it == params.end()) { - THROW_IE_EXCEPTION << "No such parameter name '" << param << "'"; - } - return (*it).second; -} - -inline int GetParamAsInt(const char* param, const std::map & params) { - std::string val = GetParamAsString(param, params); - try { - return std::stoi(val); - } catch (...) { - THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer. Value " - << val << " cannot be casted to int."; - } -} - -inline bool GetParamAsBool(const char* param, const std::map & params) { - std::string val = GetParamAsString(param, params); - std::string loweredCaseValue; - std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) { - return static_cast(std::tolower(value)); - }); - - bool result = false; - - if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) { - // attempting parse using non alpha bool - return (GetParamAsInt(param, params) != 0); - } - - return result; -} - -std::string GetParamAsString(const char* param, const char* def, - const std::map & params) { - auto it = params.find(param); - if (it == params.end() || it->second.empty()) { - return def; - } - return (*it).second; -} - -int GetParamAsInt(const char* param, int def, - const std::map & params) { - std::string val = GetParamAsString(param, std::to_string(def).c_str(), params); - try { - return std::stoi(val); - } catch (...) { - THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer. Value " - << val << " cannot be casted to int."; - } -} - -bool GetParamAsBool(const char* param, bool def, - const std::map & params) { - std::string val = GetParamAsString(param, std::to_string(def).c_str(), params); - std::string loweredCaseValue; - std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) { - return static_cast(std::tolower(value)); - }); - - bool result = false; - - if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) { - // attempting parse using non alpha bool - return (GetParamAsInt(param, def, params) != 0); - } - - return result; -} - -inline unsigned int GetParamAsUInt(const char* param, const std::map & params) { - std::string val = GetParamAsString(param, params); - std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer" + - ". Value " + val + " cannot be casted to unsigned int."; - try { - int value = std::stoi(val); - if (value < 0) { - THROW_IE_EXCEPTION << message; - } - return static_cast(value); - } catch (...) { - THROW_IE_EXCEPTION << message; - } -} - -namespace ShapeInfer { - -/** - * @brief Base class for all built-in shape infer implementations. Contains common logic with validators and errors - * handling - */ -class BuiltInShapeInferImpl : public IShapeInferImpl { -public: - explicit BuiltInShapeInferImpl(const std::string& type): _type(type) { } - - virtual void inferShapesImpl(const std::vector& inBlobs, - const std::map& params, - const std::map& blobs, std::vector& outShapes) = 0; - - StatusCode inferShapes(const std::vector& inBlobs, const std::map& params, - const std::map& blobs, std::vector& outShapes, - ResponseDesc* resp) noexcept override { - inShapes.clear(); - for (const auto& blob : inBlobs) { - inShapes.push_back(blob->getTensorDesc().getDims()); - } - outShapes.clear(); - try { - inferShapesImpl(inBlobs, params, blobs, outShapes); - return OK; - } catch (const std::exception& ex) { - return InferenceEngine::DescriptionBuffer(GENERAL_ERROR, resp) << ex.what(); - } catch (...) { - return InferenceEngine::DescriptionBuffer(UNEXPECTED) << "Unknown error"; - } - } - -protected: - std::string _type; - std::vector inShapes; -}; - -} // namespace ShapeInfer -} // namespace InferenceEngine diff --git a/inference-engine/src/inference_engine/shape_infer/ie_ishape_infer_extension.hpp b/inference-engine/src/inference_engine/shape_infer/ie_ishape_infer_extension.hpp deleted file mode 100644 index 911746e20ecc31..00000000000000 --- a/inference-engine/src/inference_engine/shape_infer/ie_ishape_infer_extension.hpp +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "details/ie_irelease.hpp" -#include "ie_version.hpp" -#include "ie_common.h" -#include "ie_blob.h" - -namespace InferenceEngine { - -/** - * @class IShapeInferImpl - * @brief This class provides interface for the implementation with the custom execution code - */ -class IShapeInferImpl { -public: - /** - * @brief A shared pointer to a IShapeInferImpl object - */ - using Ptr = std::shared_ptr; - - virtual ~IShapeInferImpl() = default; - - /** - * @brief check that reshape can be applied, that parameters and shapes are valid - */ - virtual StatusCode inferShapes(const std::vector& /*inBlobs*/, - const std::map& /*params*/, - const std::map& /*blobs*/, - std::vector& /*outShapes*/, ResponseDesc* /*resp*/) noexcept { - return NOT_IMPLEMENTED; - } // For backward-compatibility -}; - -/** - * @class IShapeInferExtension - * @brief This class is the reader extension interface to provide implementation for shape propagation - */ -class IShapeInferExtension : public InferenceEngine::details::IRelease { -public: - /** - * @brief Gets extension version information and stores in versionInfo - * @param versionInfo Pointer to version info, will be set by plugin - */ - virtual void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept = 0; - - /** - * @brief Cleans resources up - */ - virtual void Unload() noexcept = 0; - - /** - * The method will be removed in 2021.1 release. - * @brief Fills passed array with types of layers which shape infer implementations are included in the extension - * - * @param types Array to store the layer types - * @param size Size of the layer types array - * @param resp Response descriptor - * @return Status code - */ - virtual StatusCode getShapeInferTypes(char**& types, unsigned int& size, ResponseDesc* resp) noexcept = 0; - - /** - * @brief Gets shape propagation implementation for the given string-type of CNNLayer - * - * @param impl the vector with implementations which is ordered by priority - * @param type A type of CNNLayer - * @param resp response descriptor - * @return status code - */ - virtual StatusCode getShapeInferImpl(IShapeInferImpl::Ptr& impl, const char* type, ResponseDesc* resp) noexcept = 0; -}; - -/** - * This API will be removed in 2021.1 release. - * @brief A shared pointer to a IShapeInferExtension interface - */ -using IShapeInferExtensionPtr = std::shared_ptr; - -} // namespace InferenceEngine diff --git a/inference-engine/src/inference_engine/shape_infer/ie_proposal_shape_infer.hpp b/inference-engine/src/inference_engine/shape_infer/ie_proposal_shape_infer.hpp deleted file mode 100644 index 67860ed8c2006b..00000000000000 --- a/inference-engine/src/inference_engine/shape_infer/ie_proposal_shape_infer.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "ie_built_in_impl.hpp" - -namespace InferenceEngine { -namespace ShapeInfer { - -/** - * @brief Implementation of Shape inference for Proposal layer - */ -class ProposalShapeProp : public BuiltInShapeInferImpl { -public: - explicit ProposalShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {} - - void inferShapesImpl(const std::vector& inBlobs, const std::map& params, - const std::map& blobs, std::vector& outShapes) override { - size_t post_nms_topn = static_cast(GetParamAsInt("post_nms_topn", params)); - auto num_outputs = GetParamAsUInt("num_outputs", params); - - if (num_outputs > 2) - THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs; - - outShapes.push_back({inShapes[0][0] * post_nms_topn, 5}); - if (num_outputs == 2) - outShapes.push_back({inShapes[0][0] * post_nms_topn}); - } -}; - -} // namespace ShapeInfer -} // namespace InferenceEngine diff --git a/inference-engine/src/inference_engine/shape_infer/ie_rnn_cell_shape_infer.hpp b/inference-engine/src/inference_engine/shape_infer/ie_rnn_cell_shape_infer.hpp deleted file mode 100644 index 7584cc31266247..00000000000000 --- a/inference-engine/src/inference_engine/shape_infer/ie_rnn_cell_shape_infer.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include - -#include "ie_built_in_impl.hpp" - -namespace InferenceEngine { -namespace ShapeInfer { - -/** - * @brief Implementation of Shape inference for DetectionOutput layer - */ -template -class RNNBaseCellShapeProp : public BuiltInShapeInferImpl { -public: - explicit RNNBaseCellShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {} - - void inferShapesImpl(const std::vector& inBlobs, const std::map& params, - const std::map& blobs, std::vector& outShapes) override { - auto state_dims = inShapes[1]; - for (int i = 0; i < S; i++) - outShapes.push_back(state_dims); - } -}; - -using RNNCellShapeProp = RNNBaseCellShapeProp<1>; -using GRUCellShapeProp = RNNBaseCellShapeProp<1>; -using LSTMCellShapeProp = RNNBaseCellShapeProp<2>; - -} // namespace ShapeInfer -} // namespace InferenceEngine diff --git a/inference-engine/src/inference_engine/shape_infer/ie_simpler_nms_shape_infer.hpp b/inference-engine/src/inference_engine/shape_infer/ie_simpler_nms_shape_infer.hpp deleted file mode 100644 index 882f479a0a76c0..00000000000000 --- a/inference-engine/src/inference_engine/shape_infer/ie_simpler_nms_shape_infer.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include - -#include "ie_built_in_impl.hpp" - -namespace InferenceEngine { -namespace ShapeInfer { - -/** - * @brief Implementation of Shape inference for SimplerNMS layer - */ -class SimplerNMSShapeProp : public BuiltInShapeInferImpl { -public: - explicit SimplerNMSShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {} - - void inferShapesImpl(const std::vector& inBlobs, const std::map& params, - const std::map& blobs, std::vector& outShapes) override { - size_t post_nms_topn = static_cast(GetParamAsInt("post_nms_topn", params)); - outShapes.push_back({post_nms_topn, 5}); - } -}; - -} // namespace ShapeInfer -} // namespace InferenceEngine diff --git a/inference-engine/src/inference_engine/shape_infer/ie_sparse_to_dense_shape_infer.hpp b/inference-engine/src/inference_engine/shape_infer/ie_sparse_to_dense_shape_infer.hpp deleted file mode 100644 index ca853c7f66e9a5..00000000000000 --- a/inference-engine/src/inference_engine/shape_infer/ie_sparse_to_dense_shape_infer.hpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "ie_built_in_impl.hpp" -#include -#include -#include -#include - -namespace InferenceEngine { -namespace ShapeInfer { - -/** -* @brief Implementation of Shape inference for SparseToDense layer -*/ -class SparseToDenseShapeProp : public BuiltInShapeInferImpl { -public: - explicit SparseToDenseShapeProp(const std::string& type) : BuiltInShapeInferImpl(type) {} - - void inferShapesImpl(const std::vector& inBlobs, - const std::map& params, - const std::map& blobs, - std::vector& outShapes) override { - SizeVector shapes; - if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::I32) { - auto* buffer = inBlobs[1]->cbuffer().as(); - if (buffer != nullptr) { - shapes.assign(buffer, buffer + inBlobs[1]->size()); - } else { - THROW_IE_EXCEPTION << "Second input must have allocated data"; - } - } else { - THROW_IE_EXCEPTION << "Second input must have I32 precision"; - } - - outShapes = { shapes }; - } -}; - -} // namespace ShapeInfer -} // namespace InferenceEngine diff --git a/inference-engine/src/inference_engine/shape_infer/ie_unique_shape_infer.hpp b/inference-engine/src/inference_engine/shape_infer/ie_unique_shape_infer.hpp deleted file mode 100644 index b95e0ff3feb3fb..00000000000000 --- a/inference-engine/src/inference_engine/shape_infer/ie_unique_shape_infer.hpp +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shape_infer/ie_built_in_impl.hpp" - -namespace InferenceEngine { -namespace ShapeInfer { - -/** - * @brief Implementation of Shape inference for Unique layer - */ -class UniqueShapeProp : public BuiltInShapeInferImpl { -public: - explicit UniqueShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {} - - void inferShapesImpl(const std::vector& inBlobs, const std::map& params, - const std::map& blobs, std::vector& outShapes) override { - bool return_inverse = GetParamAsBool("return_inverse", params); - bool return_counts = GetParamAsBool("return_counts", params); - - // compute a number of outputs - size_t num_outputs = 1; - if (return_counts) { - num_outputs++; - } - if (return_inverse) { - num_outputs++; - } - - // reshape available outputs - outShapes.resize(num_outputs); - for (size_t i = 0; i < num_outputs; i++) { - outShapes[i].resize(1); - outShapes[i][0] = inShapes[0][0]; - } - } -}; - -} // namespace ShapeInfer -} // namespace InferenceEngine diff --git a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp index c3e405a7beb922..5191f022b994f1 100644 --- a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp +++ b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp @@ -706,13 +706,17 @@ std::shared_ptr V10Parser::XmlDeserializer::createNode( auto opsetIt = opsets.find(params.version); // Try to create operation from loaded opsets - static const std::unordered_set experimental_detectrons = {"ExperimentalDetectronDetectionOutput", - "ExperimentalDetectronGenerateProposalsSingleImage", - "ExperimentalDetectronPriorGridGenerator", - "ExperimentalDetectronROIFeatureExtractor", - "ExperimentalDetectronTopKROIs"}; - - if (experimental_detectrons.count(params.type)) { + static const std::unordered_set experimental_ops_added_to_opset = { + "ExperimentalDetectronDetectionOutput", + "ExperimentalDetectronGenerateProposalsSingleImage", + "ExperimentalDetectronPriorGridGenerator", + "ExperimentalDetectronROIFeatureExtractor", + "ExperimentalDetectronTopKROIs", + "GRUCell", + "RNNCell", + "Proposal"}; + + if (experimental_ops_added_to_opset.count(params.type) && (params.version == "experimental" || params.version == "extension")) { opsetIt = opsets.find("opset6"); } diff --git a/inference-engine/tests/functional/inference_engine/ngraph_reader/proposal_tests.cpp b/inference-engine/tests/functional/inference_engine/ngraph_reader/proposal_tests.cpp index f0320c5135923a..958cdd46e1b9e8 100644 --- a/inference-engine/tests/functional/inference_engine/ngraph_reader/proposal_tests.cpp +++ b/inference-engine/tests/functional/inference_engine/ngraph_reader/proposal_tests.cpp @@ -4,6 +4,7 @@ #include #include +#include "ngraph/opsets/opset6.hpp" #include "ngraph_reader_tests.hpp" TEST_F(NGraphReaderTests, ReadProposalNetwork) { std::string model_v10 = R"V0G0N( @@ -308,6 +309,8 @@ TEST_F(NGraphReaderTests, ReadProposalNetwork_2) { } TEST_F(NGraphReaderTests, ReadExtensionProposalNetwork) { + // the Proposal with 2 inputs was initially marked as "extension" operation but later was added to opset + // the test checks that IR reader properly instantiate the "extension" Proposal as "opset6" Proposal std::string model_v10 = R"V0G0N( @@ -334,9 +337,9 @@ TEST_F(NGraphReaderTests, ReadExtensionProposalNetwork) { - + - + 3 @@ -391,15 +394,15 @@ TEST_F(NGraphReaderTests, ReadExtensionProposalNetwork) { Core ie; Blob::Ptr weights; - weights = make_shared_blob(TensorDesc(Precision::U8, {24}, Layout::C)); + weights = make_shared_blob(TensorDesc(Precision::U8, {12}, Layout::C)); weights->allocate(); CommonTestUtils::fill_data(weights->buffer().as(), weights->size() / sizeof(float)); auto func = ie.ReadNetwork(model_v10, weights).getFunction(); for (auto op : func->get_ordered_ops()) { - if (op->get_friendly_name() == "proposal" && op->get_type_info() == ngraph::op::GenericIE::type_info) { + if (op->get_friendly_name() == "proposal" && op->get_type_info() == ngraph::opset6::Proposal::type_info) { return; } } - FAIL() << "Custom proposal layer is not a Generic operation!"; -} + FAIL() << "Custom proposal layer is not an opset6 operation."; +} \ No newline at end of file diff --git a/inference-engine/tests/functional/inference_engine/ngraph_reshape_tests.cpp b/inference-engine/tests/functional/inference_engine/ngraph_reshape_tests.cpp index 27246aabffa262..3ec3d1ada7743c 100644 --- a/inference-engine/tests/functional/inference_engine/ngraph_reshape_tests.cpp +++ b/inference-engine/tests/functional/inference_engine/ngraph_reshape_tests.cpp @@ -472,66 +472,111 @@ TEST_F(NGraphReshapeTests, TestInterpParameters) { } TEST_F(NGraphReshapeTests, ReshapeWithDefaultGenericOps) { + // the RNNCEll was initially marked as "experimental" operation but later was added to opset + // the test checks that IR reader properly instantiate the "experimental" RNNCell as "opset6" RNNCell std::string model = R"V0G0N( - + 1 - 256 + 16 + + + + + + + + 1 + 128 + + + + + + + + 128 + 16 + + + + + + + + 128 + 128 + + + + + + + + 128 - - + + 1 - 256 + 16 1 - 256 + 128 + + + 128 + 16 + + + 128 + 128 + + + 128 - + 1 - 256 + 128 - - - - - + 1 - 256 + 128 - - - + + + + + + )V0G0N"; InferenceEngine::Core ie; Blob::Ptr weights; - weights = make_shared_blob(TensorDesc(Precision::U8, {1576960}, Layout::C)); - weights->allocate(); - fill_data(weights->buffer(), weights->size() / sizeof(float)); auto network = ie.ReadNetwork(model, weights); InferenceEngine::ICNNNetwork::InputShapes newShapes; - newShapes["in1"] = {2, 256}; + newShapes["in1"] = {2, 16}; + newShapes["in2"] = {2, 128}; ASSERT_NO_THROW(network.reshape(newShapes)); }