From 78c94305b726f1809a8e25c56b98637100167cb7 Mon Sep 17 00:00:00 2001 From: Gorokhov Dmitriy Date: Thu, 25 Feb 2021 12:09:06 +0300 Subject: [PATCH] Reimplemented add_config using ngrap node and TensorDescCreators (#11) * Reimplemented add_config using ngrap node and TensorDescCreators * Detailed unsupported errors propagation * Fixed review comments --- .../src/mkldnn_plugin/mkldnn_node.cpp | 29 ++++++-- .../src/mkldnn_plugin/nodes/base.hpp | 70 ++++++++++++++++++- .../src/mkldnn_plugin/nodes/gather.cpp | 69 ++++++++++-------- .../nodes/mkldnn_generic_node.cpp | 2 +- .../nodes/mkldnn_reference_node.cpp | 16 +++-- .../nodes/mkldnn_reference_node.h | 3 +- .../src/mkldnn_plugin/utils/general_utils.h | 17 +++++ 7 files changed, 160 insertions(+), 46 deletions(-) diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp index a726243a53c35d..7faf3da248136b 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp @@ -51,6 +51,7 @@ #include "utils/rt_info/memory_formats_attribute.hpp" #include +#include "utils/general_utils.h" using namespace mkldnn; using namespace MKLDNNPlugin; @@ -1266,14 +1267,17 @@ InferenceEngine::Precision MKLDNNNode::getRuntimePrecision() const { MKLDNNNode* MKLDNNNode::NodesFactory::create(const std::shared_ptr& op, const mkldnn::engine& eng, const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache) { MKLDNNNode *newNode = nullptr; - + std::string errorMessage; try { std::unique_ptr ol(createNodeIfRegistered(MKLDNNPlugin, Generic, op, eng, w_cache)); if (ol != nullptr && ol->created(extMgr)) newNode = ol.release(); } catch (const InferenceEngine::Exception& ex) { - if (ex.getStatus() != NOT_IMPLEMENTED) + if (ex.getStatus() != NOT_IMPLEMENTED) { throw; + } else { + errorMessage += getExceptionDescWithoutStatus(ex); + } } if (newNode == nullptr) { @@ -1282,19 +1286,25 @@ MKLDNNNode* MKLDNNNode::NodesFactory::create(const std::shared_ptr if (ol != nullptr && ol->created(extMgr)) newNode = ol.release(); } catch (const InferenceEngine::Exception& ex) { - if (ex.getStatus() != NOT_IMPLEMENTED) + if (ex.getStatus() != NOT_IMPLEMENTED) { throw; + } else { + errorMessage += getExceptionDescWithoutStatus(ex); + } } } if (newNode == nullptr) { try { - std::unique_ptr ol(new MKLDNNReferenceNode(op, eng, w_cache)); + std::unique_ptr ol(new MKLDNNReferenceNode(op, eng, w_cache, errorMessage)); if (ol != nullptr && ol->created(extMgr)) newNode = ol.release(); } catch (const InferenceEngine::Exception& ex) { - if (ex.getStatus() != NOT_IMPLEMENTED) + if (ex.getStatus() != NOT_IMPLEMENTED) { throw; + } else { + errorMessage += getExceptionDescWithoutStatus(ex); + } } } @@ -1306,8 +1316,13 @@ MKLDNNNode* MKLDNNNode::NodesFactory::create(const std::shared_ptr // ti->setExtManager(extMgr); // // WA-end - if (!newNode) - IE_THROW() << "Unsupported primitive of type: " << op->get_type_name() << " name: " << op->get_friendly_name(); + if (!newNode) { + std::string errorDetails; + if (!errorMessage.empty()) { + errorDetails = "\nDetails: \n" + errorMessage; + } + IE_THROW() << "Unsupported operation of type: " << op->get_type_name() << " name: " << op->get_friendly_name() << errorDetails; + } return newNode; } diff --git a/inference-engine/src/mkldnn_plugin/nodes/base.hpp b/inference-engine/src/mkldnn_plugin/nodes/base.hpp index 2b847ef1ae4007..4a0903cf6f6057 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/base.hpp +++ b/inference-engine/src/mkldnn_plugin/nodes/base.hpp @@ -6,6 +6,8 @@ #include #include "nodes/list.hpp" +#include "common/tensor_desc_creator.h" +#include "ngraph/descriptor/tensor.hpp" #include #include @@ -54,6 +56,71 @@ class ExtLayerBase: public ILayerExecImpl { } protected: + class DataConfigurator { + public: + DataConfigurator(MKLDNNPlugin::TensorDescCreatorTypes tensorDescType, Precision prc = Precision::UNSPECIFIED, bool constant = false, int inplace = -1) : + tensorDescCreator(getTensorDescCreator(tensorDescType)), prc(prc), constant(constant), inplace(inplace) {} + + DataConfigurator(const MKLDNNPlugin::TensorDescCreator::CreatorConstPtr& tensorDescCreator, Precision prc = Precision::UNSPECIFIED, + bool constant = false, int inplace = -1) : tensorDescCreator(tensorDescCreator), prc(prc), constant(constant), inplace(inplace) {} + + const MKLDNNPlugin::TensorDescCreator::CreatorConstPtr tensorDescCreator; + const bool constant = false; + const int inplace = -1; + const Precision prc = Precision::UNSPECIFIED; // By default ngraph node precision is used + private: + static MKLDNNPlugin::TensorDescCreator::CreatorConstPtr getTensorDescCreator(MKLDNNPlugin::TensorDescCreatorTypes tensorDescType) { + auto& creators = MKLDNNPlugin::TensorDescCreator::getCommonCreators(); + if (creators.find(tensorDescType) == creators.end()) { + IE_THROW() << "Cannot find tensor descriptor creator"; + } + return creators.at(tensorDescType); + } + }; + + void addConfig(const std::shared_ptr& op, + const std::vector& inDataConfigurators, + const std::vector& outDataConfigurators, + bool dynBatchSupport = false) { + LayerConfig config; + + if (inDataConfigurators.size() != op->get_input_size()) + IE_THROW() << "Cannot add config for operation " << op->get_friendly_name() << ". Incorrect number of inputs: " << + "expected: " << op->get_input_size() << ", provided: " << inDataConfigurators.size(); + if (outDataConfigurators.size() != op->get_output_size()) + IE_THROW() << "Cannot add config for operation " << op->get_friendly_name() << ". Incorrect number of outputs: " << + "expected: " << op->get_output_size() << ", provided: " << outDataConfigurators.size(); + + auto fill_port = [] (const DataConfigurator& dataConfigurator, const ngraph::descriptor::Tensor& tensor, std::vector& port) -> bool { + // In order to simplify particular node initialization logic we just don't add config in case target shape is not supported by tensorDescCreator. + // This should be suitable for major of scenarios since almost all nodes add `ncsp` tensorDescCreator which supports any shape rank. + if (tensor.get_shape().size() < dataConfigurator.tensorDescCreator->getMinimalRank()) + return false; + + auto precision = dataConfigurator.prc != Precision::UNSPECIFIED ? dataConfigurator.prc : details::convertPrecision(tensor.get_element_type()); + + DataConfig dataConfig; + dataConfig.inPlace = dataConfigurator.inplace; + dataConfig.constant = dataConfigurator.constant; + dataConfig.desc = dataConfigurator.tensorDescCreator->createDesc(precision, tensor.get_shape()); + + port.push_back(dataConfig); + + return true; + }; + + for (size_t i = 0; i < inDataConfigurators.size(); i++) + if (!fill_port(inDataConfigurators[i], op->get_input_tensor(i), config.inConfs)) + return; + + for (size_t i = 0; i < outDataConfigurators.size(); i++) + if (!fill_port(outDataConfigurators[i], op->get_output_tensor(i), config.outConfs)) + return; + + config.dynBatchSupport = dynBatchSupport; + confs.push_back(config); + } + std::string errorMsg; std::vector confs; }; @@ -68,7 +135,8 @@ class ImplFactory : public ILayerImplFactory { try { impls.push_back(ILayerImpl::Ptr(new IMPL(ngraphOp))); } catch (const InferenceEngine::Exception& ex) { - return ex.getStatus(); + strncpy(resp->msg, ex.what(), sizeof(resp->msg) - 1); + return ex.getStatus() != OK ? ex.getStatus() : GENERAL_ERROR; } return OK; } diff --git a/inference-engine/src/mkldnn_plugin/nodes/gather.cpp b/inference-engine/src/mkldnn_plugin/nodes/gather.cpp index 06a3d8815ae3a8..543ab5a0e5fc1a 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/gather.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/gather.cpp @@ -22,18 +22,41 @@ namespace InferenceEngine { namespace Extensions { namespace Cpu { +using MKLDNNPlugin::TensorDescCreatorTypes; + class GatherImpl: public ExtLayerBase { public: + static bool isSupportedOperation(const ngraph::Node& op, std::string& errorMessage) noexcept { + try { + auto gatherOp = ngraph::as_type(&op); + if (!gatherOp) { + errorMessage = "Only opset1 Gather operation is supported"; + return false; + } + + auto axesOp = gatherOp->get_input_node_shared_ptr(GATHER_AXIS); + if (!ngraph::as_type_ptr(axesOp)) { + errorMessage = "Only Constant operation on 'axis' input is supported"; + return false; + } + } catch (...) { + return false; + } + + return true; + } + explicit GatherImpl(const std::shared_ptr& op) { try { errorPrefix_ = std::string("Layer Gather with name '") + op->get_friendly_name() + "' "; - auto gatherOp = ngraph::as_type_ptr(op); - if (!gatherOp) - IE_THROW() << "CPU Gather node doesn't support ngraph operation " - << gatherOp->get_type_name() << " with name " << gatherOp->get_friendly_name(); + std::string errorMessage; + if (!isSupportedOperation(*op, errorMessage)) { + IE_THROW(NotImplemented) << errorMessage; + } - if (gatherOp->get_input_size() != 3 || gatherOp->get_output_size() == 0) + auto gatherOp = ngraph::as_type_ptr(op); + if (gatherOp->get_input_size() != 3 || gatherOp->get_output_size() != 1) IE_THROW() << errorPrefix_ << "has incorrect number of input/output edges!"; Precision inIdxPrecision = details::convertPrecision(gatherOp->get_input_element_type(GATHER_INDEXES)); @@ -44,16 +67,12 @@ class GatherImpl: public ExtLayerBase { if (dictionary_dims.size() == 0) IE_THROW() << errorPrefix_ << "has incorrect input parameters dimension!"; - auto axesOp = gatherOp->get_input_node_shared_ptr(GATHER_AXIS); - if (!ngraph::as_type_ptr(axesOp)) - IE_THROW() << errorPrefix_ << "supports only Constant op on 'axis' input."; - axis = static_cast(gatherOp->get_axis()); if (axis < 0) axis += dictionary_dims.size(); // Dictionary must be at least rank axis + 1 - IE_ASSERT(-static_cast(dictionary_dims.size()) <= axis && axis < static_cast(dictionary_dims.size())) - << errorPrefix_ << "has incorrect input parameters dimensions and axis number!"; + if (!(-static_cast(dictionary_dims.size()) <= axis && axis < static_cast(dictionary_dims.size()))) + IE_THROW() << errorPrefix_ << "has incorrect input parameters dimensions and axis number!"; // Find number of dictionaries, index range and data length for (int i = 0; i < axis; i++) @@ -65,24 +84,12 @@ class GatherImpl: public ExtLayerBase { if (dataLength == 0) IE_THROW() << errorPrefix_ << "had incorrect input parameters dimension!"; - LayerConfig config; - DataConfig dataConfigIdx, dataConfigDct, dataConfigAxis; Precision dataPrecision = details::convertPrecision(gatherOp->get_input_element_type(GATHER_DICTIONARY)); - dataConfigDct.desc = TensorDesc(dataPrecision, dictionary_dims, TensorDesc::getLayoutByDims(dictionary_dims)); - config.inConfs.push_back(dataConfigDct); - const SizeVector& indexes_dims = gatherOp->get_input_shape(GATHER_INDEXES); - dataConfigIdx.desc = TensorDesc(inIdxPrecision, indexes_dims, TensorDesc::getLayoutByDims(indexes_dims)); - config.inConfs.push_back(dataConfigIdx); - const SizeVector& axis_dims = gatherOp->get_input_shape(GATHER_AXIS); - dataConfigAxis.desc = TensorDesc(Precision::I32, axis_dims, TensorDesc::getLayoutByDims(axis_dims)); - config.inConfs.push_back(dataConfigAxis); - - DataConfig dataConfigOut; - const SizeVector& out_dims = gatherOp->get_output_shape(0); - dataConfigOut.desc = TensorDesc(dataPrecision, out_dims, TensorDesc::getLayoutByDims(out_dims)); - config.outConfs.push_back(dataConfigOut); - config.dynBatchSupport = false; - confs.push_back(config); + + addConfig(op, {{TensorDescCreatorTypes::ncsp, dataPrecision}, + {TensorDescCreatorTypes::ncsp, inIdxPrecision}, + {TensorDescCreatorTypes::ncsp, Precision::I32}}, + {{TensorDescCreatorTypes::ncsp, dataPrecision}}); } catch (InferenceEngine::Exception &ex) { errorMsg = ex.what(); throw; @@ -158,9 +165,9 @@ class GatherImpl: public ExtLayerBase { size_t numDictionaries = 1; size_t indexRange = 0; size_t dataLength = 1; - const size_t GATHER_DICTIONARY = 0; - const size_t GATHER_INDEXES = 1; - const size_t GATHER_AXIS = 2; + static const size_t GATHER_DICTIONARY = 0; + static const size_t GATHER_INDEXES = 1; + static const size_t GATHER_AXIS = 2; std::string errorPrefix_; }; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_generic_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_generic_node.cpp index 40a4435c5927dc..778dc64c709cad 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_generic_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_generic_node.cpp @@ -77,7 +77,7 @@ bool MKLDNNGenericNode::created(const MKLDNNExtensionManager::Ptr &extMgr) { extFactory = extMgr->CreateExtensionFactory(ngraphOp); if (!extFactory) - IE_THROW(NotImplemented) << "Descriptor for generic primitive doesn't exist"; + IE_THROW(NotImplemented); std::vector impls_no_exec; InferenceEngine::ResponseDesc resp; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.cpp index cfe0a3c55b45d2..3dd7db9c663d4d 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.cpp @@ -12,8 +12,9 @@ using namespace MKLDNNPlugin; using namespace InferenceEngine; using namespace InferenceEngine::details; -MKLDNNReferenceNode::MKLDNNReferenceNode(const std::shared_ptr& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache) : - MKLDNNNode(op, eng, cache), ngraphOp(op) { +MKLDNNReferenceNode::MKLDNNReferenceNode(const std::shared_ptr& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache, + const std::string& errorMessage) : + MKLDNNNode(op, eng, cache), ngraphOp(op), additionalErrorMessage(errorMessage) { setType(Reference); } @@ -67,12 +68,17 @@ void MKLDNNReferenceNode::execute(mkldnn::stream strm) { } if (!ngraphOp->evaluate(outputs, inputs)) { - IE_THROW(NotImplemented) - << "Cannot find reference implementation for node " << ngraphOp->get_type_name() << " with name '" << ngraphOp->get_friendly_name() << "'."; + std::string errorDetails = "Unsupported operation of type: " + std::string(ngraphOp->get_type_name()) + + " name: " + std::string(ngraphOp->get_friendly_name()); + errorDetails += "\nDetails: \n"; + if (!additionalErrorMessage.empty()) { + errorDetails += additionalErrorMessage + "\n"; + } + errorDetails += "Cannot fallback on ngraph reference implementation (Ngraph::Node::evaluate() is not implemented)"; + IE_THROW(NotImplemented) << errorDetails; } } bool MKLDNNReferenceNode::created() const { return getType() == Reference; } -REG_MKLDNN_PRIM_FOR(MKLDNNReferenceNode, Reference); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.h index 71c5ed08db8695..ed78ffe14bd6a3 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.h @@ -12,7 +12,7 @@ namespace MKLDNNPlugin { class MKLDNNReferenceNode : public MKLDNNNode { public: - MKLDNNReferenceNode(const std::shared_ptr& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache); + MKLDNNReferenceNode(const std::shared_ptr& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache, const std::string& errorMessage); ~MKLDNNReferenceNode() override = default; void getSupportedDescriptors() override; @@ -23,6 +23,7 @@ class MKLDNNReferenceNode : public MKLDNNNode { private: const std::shared_ptr ngraphOp; + const std::string additionalErrorMessage; }; } // namespace MKLDNNPlugin diff --git a/inference-engine/src/mkldnn_plugin/utils/general_utils.h b/inference-engine/src/mkldnn_plugin/utils/general_utils.h index d73a6737f302d1..5d295492bc4798 100644 --- a/inference-engine/src/mkldnn_plugin/utils/general_utils.h +++ b/inference-engine/src/mkldnn_plugin/utils/general_utils.h @@ -5,6 +5,7 @@ #pragma once #include +#include namespace MKLDNNPlugin { @@ -39,5 +40,21 @@ constexpr inline bool implication(bool cause, bool cond) { return !cause || !!cond; } +inline std::string getExceptionDescWithoutStatus(const InferenceEngine::details::InferenceEngineException& ex) { + std::string desc = ex.what(); + if (ex.getStatus() != 0) { + size_t pos = desc.find("]"); + if (pos != std::string::npos) { + if (desc.size() == pos + 1) { + desc.erase(0, pos + 1); + } else { + desc.erase(0, pos + 2); + } + } + } + + return desc; +} + } // namespace MKLDNNPlugin \ No newline at end of file