Skip to content

Commit

Permalink
Reimplemented add_config using ngrap node and TensorDescCreators (#11)
Browse files Browse the repository at this point in the history
* Reimplemented add_config using ngrap node and TensorDescCreators

* Detailed unsupported errors propagation

* Fixed review comments
  • Loading branch information
dmitry-gorokhov committed May 5, 2021
1 parent 8eec060 commit 78c9430
Show file tree
Hide file tree
Showing 7 changed files with 160 additions and 46 deletions.
29 changes: 22 additions & 7 deletions inference-engine/src/mkldnn_plugin/mkldnn_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@
#include "utils/rt_info/memory_formats_attribute.hpp"

#include <ie_ngraph_utils.hpp>
#include "utils/general_utils.h"

using namespace mkldnn;
using namespace MKLDNNPlugin;
Expand Down Expand Up @@ -1266,14 +1267,17 @@ InferenceEngine::Precision MKLDNNNode::getRuntimePrecision() const {
MKLDNNNode* MKLDNNNode::NodesFactory::create(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng,
const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache) {
MKLDNNNode *newNode = nullptr;

std::string errorMessage;
try {
std::unique_ptr<MKLDNNNode> ol(createNodeIfRegistered(MKLDNNPlugin, Generic, op, eng, w_cache));
if (ol != nullptr && ol->created(extMgr))
newNode = ol.release();
} catch (const InferenceEngine::Exception& ex) {
if (ex.getStatus() != NOT_IMPLEMENTED)
if (ex.getStatus() != NOT_IMPLEMENTED) {
throw;
} else {
errorMessage += getExceptionDescWithoutStatus(ex);
}
}

if (newNode == nullptr) {
Expand All @@ -1282,19 +1286,25 @@ MKLDNNNode* MKLDNNNode::NodesFactory::create(const std::shared_ptr<ngraph::Node>
if (ol != nullptr && ol->created(extMgr))
newNode = ol.release();
} catch (const InferenceEngine::Exception& ex) {
if (ex.getStatus() != NOT_IMPLEMENTED)
if (ex.getStatus() != NOT_IMPLEMENTED) {
throw;
} else {
errorMessage += getExceptionDescWithoutStatus(ex);
}
}
}

if (newNode == nullptr) {
try {
std::unique_ptr<MKLDNNNode> ol(new MKLDNNReferenceNode(op, eng, w_cache));
std::unique_ptr<MKLDNNNode> ol(new MKLDNNReferenceNode(op, eng, w_cache, errorMessage));
if (ol != nullptr && ol->created(extMgr))
newNode = ol.release();
} catch (const InferenceEngine::Exception& ex) {
if (ex.getStatus() != NOT_IMPLEMENTED)
if (ex.getStatus() != NOT_IMPLEMENTED) {
throw;
} else {
errorMessage += getExceptionDescWithoutStatus(ex);
}
}
}

Expand All @@ -1306,8 +1316,13 @@ MKLDNNNode* MKLDNNNode::NodesFactory::create(const std::shared_ptr<ngraph::Node>
// ti->setExtManager(extMgr);
// // WA-end

if (!newNode)
IE_THROW() << "Unsupported primitive of type: " << op->get_type_name() << " name: " << op->get_friendly_name();
if (!newNode) {
std::string errorDetails;
if (!errorMessage.empty()) {
errorDetails = "\nDetails: \n" + errorMessage;
}
IE_THROW() << "Unsupported operation of type: " << op->get_type_name() << " name: " << op->get_friendly_name() << errorDetails;
}

return newNode;
}
70 changes: 69 additions & 1 deletion inference-engine/src/mkldnn_plugin/nodes/base.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@

#include <ie_iextension.h>
#include "nodes/list.hpp"
#include "common/tensor_desc_creator.h"
#include "ngraph/descriptor/tensor.hpp"
#include <ie_ngraph_utils.hpp>

#include <string>
Expand Down Expand Up @@ -54,6 +56,71 @@ class ExtLayerBase: public ILayerExecImpl {
}

protected:
class DataConfigurator {
public:
DataConfigurator(MKLDNNPlugin::TensorDescCreatorTypes tensorDescType, Precision prc = Precision::UNSPECIFIED, bool constant = false, int inplace = -1) :
tensorDescCreator(getTensorDescCreator(tensorDescType)), prc(prc), constant(constant), inplace(inplace) {}

DataConfigurator(const MKLDNNPlugin::TensorDescCreator::CreatorConstPtr& tensorDescCreator, Precision prc = Precision::UNSPECIFIED,
bool constant = false, int inplace = -1) : tensorDescCreator(tensorDescCreator), prc(prc), constant(constant), inplace(inplace) {}

const MKLDNNPlugin::TensorDescCreator::CreatorConstPtr tensorDescCreator;
const bool constant = false;
const int inplace = -1;
const Precision prc = Precision::UNSPECIFIED; // By default ngraph node precision is used
private:
static MKLDNNPlugin::TensorDescCreator::CreatorConstPtr getTensorDescCreator(MKLDNNPlugin::TensorDescCreatorTypes tensorDescType) {
auto& creators = MKLDNNPlugin::TensorDescCreator::getCommonCreators();
if (creators.find(tensorDescType) == creators.end()) {
IE_THROW() << "Cannot find tensor descriptor creator";
}
return creators.at(tensorDescType);
}
};

void addConfig(const std::shared_ptr<ngraph::Node>& op,
const std::vector<DataConfigurator>& inDataConfigurators,
const std::vector<DataConfigurator>& outDataConfigurators,
bool dynBatchSupport = false) {
LayerConfig config;

if (inDataConfigurators.size() != op->get_input_size())
IE_THROW() << "Cannot add config for operation " << op->get_friendly_name() << ". Incorrect number of inputs: " <<
"expected: " << op->get_input_size() << ", provided: " << inDataConfigurators.size();
if (outDataConfigurators.size() != op->get_output_size())
IE_THROW() << "Cannot add config for operation " << op->get_friendly_name() << ". Incorrect number of outputs: " <<
"expected: " << op->get_output_size() << ", provided: " << outDataConfigurators.size();

auto fill_port = [] (const DataConfigurator& dataConfigurator, const ngraph::descriptor::Tensor& tensor, std::vector<DataConfig>& port) -> bool {
// In order to simplify particular node initialization logic we just don't add config in case target shape is not supported by tensorDescCreator.
// This should be suitable for major of scenarios since almost all nodes add `ncsp` tensorDescCreator which supports any shape rank.
if (tensor.get_shape().size() < dataConfigurator.tensorDescCreator->getMinimalRank())
return false;

auto precision = dataConfigurator.prc != Precision::UNSPECIFIED ? dataConfigurator.prc : details::convertPrecision(tensor.get_element_type());

DataConfig dataConfig;
dataConfig.inPlace = dataConfigurator.inplace;
dataConfig.constant = dataConfigurator.constant;
dataConfig.desc = dataConfigurator.tensorDescCreator->createDesc(precision, tensor.get_shape());

port.push_back(dataConfig);

return true;
};

for (size_t i = 0; i < inDataConfigurators.size(); i++)
if (!fill_port(inDataConfigurators[i], op->get_input_tensor(i), config.inConfs))
return;

for (size_t i = 0; i < outDataConfigurators.size(); i++)
if (!fill_port(outDataConfigurators[i], op->get_output_tensor(i), config.outConfs))
return;

config.dynBatchSupport = dynBatchSupport;
confs.push_back(config);
}

std::string errorMsg;
std::vector<LayerConfig> confs;
};
Expand All @@ -68,7 +135,8 @@ class ImplFactory : public ILayerImplFactory {
try {
impls.push_back(ILayerImpl::Ptr(new IMPL(ngraphOp)));
} catch (const InferenceEngine::Exception& ex) {
return ex.getStatus();
strncpy(resp->msg, ex.what(), sizeof(resp->msg) - 1);
return ex.getStatus() != OK ? ex.getStatus() : GENERAL_ERROR;
}
return OK;
}
Expand Down
69 changes: 38 additions & 31 deletions inference-engine/src/mkldnn_plugin/nodes/gather.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,18 +22,41 @@ namespace InferenceEngine {
namespace Extensions {
namespace Cpu {

using MKLDNNPlugin::TensorDescCreatorTypes;

class GatherImpl: public ExtLayerBase {
public:
static bool isSupportedOperation(const ngraph::Node& op, std::string& errorMessage) noexcept {
try {
auto gatherOp = ngraph::as_type<const ngraph::op::v1::Gather>(&op);
if (!gatherOp) {
errorMessage = "Only opset1 Gather operation is supported";
return false;
}

auto axesOp = gatherOp->get_input_node_shared_ptr(GATHER_AXIS);
if (!ngraph::as_type_ptr<const ngraph::op::Constant>(axesOp)) {
errorMessage = "Only Constant operation on 'axis' input is supported";
return false;
}
} catch (...) {
return false;
}

return true;
}

explicit GatherImpl(const std::shared_ptr<ngraph::Node>& op) {
try {
errorPrefix_ = std::string("Layer Gather with name '") + op->get_friendly_name() + "' ";

auto gatherOp = ngraph::as_type_ptr<ngraph::op::v1::Gather>(op);
if (!gatherOp)
IE_THROW() << "CPU Gather node doesn't support ngraph operation "
<< gatherOp->get_type_name() << " with name " << gatherOp->get_friendly_name();
std::string errorMessage;
if (!isSupportedOperation(*op, errorMessage)) {
IE_THROW(NotImplemented) << errorMessage;
}

if (gatherOp->get_input_size() != 3 || gatherOp->get_output_size() == 0)
auto gatherOp = ngraph::as_type_ptr<ngraph::op::v1::Gather>(op);
if (gatherOp->get_input_size() != 3 || gatherOp->get_output_size() != 1)
IE_THROW() << errorPrefix_ << "has incorrect number of input/output edges!";

Precision inIdxPrecision = details::convertPrecision(gatherOp->get_input_element_type(GATHER_INDEXES));
Expand All @@ -44,16 +67,12 @@ class GatherImpl: public ExtLayerBase {
if (dictionary_dims.size() == 0)
IE_THROW() << errorPrefix_ << "has incorrect input parameters dimension!";

auto axesOp = gatherOp->get_input_node_shared_ptr(GATHER_AXIS);
if (!ngraph::as_type_ptr<ngraph::op::Constant>(axesOp))
IE_THROW() << errorPrefix_ << "supports only Constant op on 'axis' input.";

axis = static_cast<int>(gatherOp->get_axis());
if (axis < 0)
axis += dictionary_dims.size();
// Dictionary must be at least rank axis + 1
IE_ASSERT(-static_cast<int>(dictionary_dims.size()) <= axis && axis < static_cast<int>(dictionary_dims.size()))
<< errorPrefix_ << "has incorrect input parameters dimensions and axis number!";
if (!(-static_cast<int>(dictionary_dims.size()) <= axis && axis < static_cast<int>(dictionary_dims.size())))
IE_THROW() << errorPrefix_ << "has incorrect input parameters dimensions and axis number!";

// Find number of dictionaries, index range and data length
for (int i = 0; i < axis; i++)
Expand All @@ -65,24 +84,12 @@ class GatherImpl: public ExtLayerBase {
if (dataLength == 0)
IE_THROW() << errorPrefix_ << "had incorrect input parameters dimension!";

LayerConfig config;
DataConfig dataConfigIdx, dataConfigDct, dataConfigAxis;
Precision dataPrecision = details::convertPrecision(gatherOp->get_input_element_type(GATHER_DICTIONARY));
dataConfigDct.desc = TensorDesc(dataPrecision, dictionary_dims, TensorDesc::getLayoutByDims(dictionary_dims));
config.inConfs.push_back(dataConfigDct);
const SizeVector& indexes_dims = gatherOp->get_input_shape(GATHER_INDEXES);
dataConfigIdx.desc = TensorDesc(inIdxPrecision, indexes_dims, TensorDesc::getLayoutByDims(indexes_dims));
config.inConfs.push_back(dataConfigIdx);
const SizeVector& axis_dims = gatherOp->get_input_shape(GATHER_AXIS);
dataConfigAxis.desc = TensorDesc(Precision::I32, axis_dims, TensorDesc::getLayoutByDims(axis_dims));
config.inConfs.push_back(dataConfigAxis);

DataConfig dataConfigOut;
const SizeVector& out_dims = gatherOp->get_output_shape(0);
dataConfigOut.desc = TensorDesc(dataPrecision, out_dims, TensorDesc::getLayoutByDims(out_dims));
config.outConfs.push_back(dataConfigOut);
config.dynBatchSupport = false;
confs.push_back(config);

addConfig(op, {{TensorDescCreatorTypes::ncsp, dataPrecision},
{TensorDescCreatorTypes::ncsp, inIdxPrecision},
{TensorDescCreatorTypes::ncsp, Precision::I32}},
{{TensorDescCreatorTypes::ncsp, dataPrecision}});
} catch (InferenceEngine::Exception &ex) {
errorMsg = ex.what();
throw;
Expand Down Expand Up @@ -158,9 +165,9 @@ class GatherImpl: public ExtLayerBase {
size_t numDictionaries = 1;
size_t indexRange = 0;
size_t dataLength = 1;
const size_t GATHER_DICTIONARY = 0;
const size_t GATHER_INDEXES = 1;
const size_t GATHER_AXIS = 2;
static const size_t GATHER_DICTIONARY = 0;
static const size_t GATHER_INDEXES = 1;
static const size_t GATHER_AXIS = 2;

std::string errorPrefix_;
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ bool MKLDNNGenericNode::created(const MKLDNNExtensionManager::Ptr &extMgr) {
extFactory = extMgr->CreateExtensionFactory(ngraphOp);

if (!extFactory)
IE_THROW(NotImplemented) << "Descriptor for generic primitive doesn't exist";
IE_THROW(NotImplemented);

std::vector<InferenceEngine::ILayerImpl::Ptr> impls_no_exec;
InferenceEngine::ResponseDesc resp;
Expand Down
16 changes: 11 additions & 5 deletions inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,9 @@ using namespace MKLDNNPlugin;
using namespace InferenceEngine;
using namespace InferenceEngine::details;

MKLDNNReferenceNode::MKLDNNReferenceNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache) :
MKLDNNNode(op, eng, cache), ngraphOp(op) {
MKLDNNReferenceNode::MKLDNNReferenceNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache,
const std::string& errorMessage) :
MKLDNNNode(op, eng, cache), ngraphOp(op), additionalErrorMessage(errorMessage) {
setType(Reference);
}

Expand Down Expand Up @@ -67,12 +68,17 @@ void MKLDNNReferenceNode::execute(mkldnn::stream strm) {
}

if (!ngraphOp->evaluate(outputs, inputs)) {
IE_THROW(NotImplemented)
<< "Cannot find reference implementation for node " << ngraphOp->get_type_name() << " with name '" << ngraphOp->get_friendly_name() << "'.";
std::string errorDetails = "Unsupported operation of type: " + std::string(ngraphOp->get_type_name()) +
" name: " + std::string(ngraphOp->get_friendly_name());
errorDetails += "\nDetails: \n";
if (!additionalErrorMessage.empty()) {
errorDetails += additionalErrorMessage + "\n";
}
errorDetails += "Cannot fallback on ngraph reference implementation (Ngraph::Node::evaluate() is not implemented)";
IE_THROW(NotImplemented) << errorDetails;
}
}

bool MKLDNNReferenceNode::created() const {
return getType() == Reference;
}
REG_MKLDNN_PRIM_FOR(MKLDNNReferenceNode, Reference);
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ namespace MKLDNNPlugin {

class MKLDNNReferenceNode : public MKLDNNNode {
public:
MKLDNNReferenceNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache);
MKLDNNReferenceNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache, const std::string& errorMessage);
~MKLDNNReferenceNode() override = default;

void getSupportedDescriptors() override;
Expand All @@ -23,6 +23,7 @@ class MKLDNNReferenceNode : public MKLDNNNode {

private:
const std::shared_ptr<ngraph::Node> ngraphOp;
const std::string additionalErrorMessage;
};

} // namespace MKLDNNPlugin
Expand Down
17 changes: 17 additions & 0 deletions inference-engine/src/mkldnn_plugin/utils/general_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#pragma once

#include <cassert>
#include <inference_engine.hpp>

namespace MKLDNNPlugin {

Expand Down Expand Up @@ -39,5 +40,21 @@ constexpr inline bool implication(bool cause, bool cond) {
return !cause || !!cond;
}

inline std::string getExceptionDescWithoutStatus(const InferenceEngine::details::InferenceEngineException& ex) {
std::string desc = ex.what();
if (ex.getStatus() != 0) {
size_t pos = desc.find("]");
if (pos != std::string::npos) {
if (desc.size() == pos + 1) {
desc.erase(0, pos + 1);
} else {
desc.erase(0, pos + 2);
}
}
}

return desc;
}


} // namespace MKLDNNPlugin

0 comments on commit 78c9430

Please sign in to comment.