diff --git a/inference-engine/src/mkldnn_plugin/CMakeLists.txt b/inference-engine/src/mkldnn_plugin/CMakeLists.txt index f5eb1178dc1c59..4e0103b25843db 100644 --- a/inference-engine/src/mkldnn_plugin/CMakeLists.txt +++ b/inference-engine/src/mkldnn_plugin/CMakeLists.txt @@ -32,7 +32,7 @@ set(LAYERS ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_input_node.cpp # ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_lrn_node.cpp # ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_memory_node.cpp -# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_pad_node.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_pad_node.cpp ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_transpose_node.cpp ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_pooling_node.cpp ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_quantize_node.cpp diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp index a4c9a474fbd722..3d461a616e533b 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp @@ -170,7 +170,7 @@ static const InferenceEngine::details::caseless_unordered_map // { "ROIPooling", ROIPooling }, // { "BatchNormalization", BatchNormalization }, // { "Flatten", Flatten }, -// { "Pad", Pad }, + { "Pad", Pad }, { "Transpose", Transpose }, // { "Copy", Copy }, // { "LSTMCell", RNNCell }, diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pad_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pad_node.cpp index b4e812838f5d4b..d26c2dcf915feb 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pad_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pad_node.cpp @@ -3,7 +3,6 @@ // #include "mkldnn_pad_node.h" -#include #include #include #include @@ -13,54 +12,95 @@ #include "common/cpu_memcpy.h" #include "utils/bfloat16.hpp" #include +#include using namespace mkldnn; using namespace MKLDNNPlugin; using namespace InferenceEngine; -MKLDNNPadNode::MKLDNNPadNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache) - : MKLDNNNode(layer, eng, cache) {} +bool MKLDNNPadNode::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { + try { + const auto pad = std::dynamic_pointer_cast(op); + if (!pad) { + errorMessage = "Only opset1 Pad operation is supported"; + return false; + } + if (std::dynamic_pointer_cast(pad->get_input_node_shared_ptr(PADS_BEGIN_ID)) == nullptr || + std::dynamic_pointer_cast(pad->get_input_node_shared_ptr(PADS_END_ID)) == nullptr || + (pad->get_pad_mode() == ngraph::op::PadMode::CONSTANT && pad->get_input_size() == 4 && + std::dynamic_pointer_cast(pad->get_input_node_shared_ptr(PAD_VALUE_ID)) == nullptr)) { + errorMessage = "Only Constant operation on 'pads_begin', 'pads_end', 'pad_value' inpus is supported"; + return false; + } + const auto pad_mode = pad->get_pad_mode(); + if (pad_mode != ngraph::op::PadMode::CONSTANT && pad_mode != ngraph::op::PadMode::EDGE && pad_mode != ngraph::op::PadMode::REFLECT && + pad_mode != ngraph::op::PadMode::SYMMETRIC) { + errorMessage = "Has unsupported pad_mode: " + ngraph::as_string(pad_mode); + return false; + } + } catch (...) { + return false; + } + return true; +} -void MKLDNNPadNode::getSupportedDescriptors() { - auto* padLayer = dynamic_cast(getCnnLayer().get()); - if (padLayer == nullptr) - IE_THROW() << "Cannot convert Pad layer."; +MKLDNNPadNode::MKLDNNPadNode(const std::shared_ptr& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache) + : MKLDNNNode(op, eng, cache) { + std::string errorMessage; + if (isSupportedOperation(op, errorMessage)) { + errorPrefix = "Pad node with name '" + op->get_friendly_name() + "'"; + const auto pad = std::dynamic_pointer_cast(op); + + const auto pb = pad->get_pads_begin(); + const auto pe = pad->get_pads_end(); + for (size_t i = 0; i < pb.size(); i++) + padsBegin.push_back(static_cast(pb[i])); + for (size_t i = 0; i < pe.size(); i++) + padsEnd.push_back(static_cast(pe[i])); + + const auto pad_mode = pad->get_pad_mode(); + isPadValueSpecified = pad->get_input_size() == 4; + if (pad_mode == ngraph::op::PadMode::CONSTANT) { + padMode = CONSTANT; + if (isPadValueSpecified) { + if (!ngraph::is_scalar(pad->get_input_shape(PAD_VALUE_ID))) + IE_THROW() << errorPrefix << " has non scalar 'pad_value' input"; + padValue = std::dynamic_pointer_cast(pad->get_input_node_shared_ptr(PAD_VALUE_ID))->cast_vector()[0]; + } + } else if (pad_mode == ngraph::op::PadMode::EDGE) { + padMode = EDGE; + } else if (pad_mode == ngraph::op::PadMode::REFLECT) { + padMode = REFLECT; + } else if (pad_mode == ngraph::op::PadMode::SYMMETRIC) { + padMode = SYMMETRIC; + } + } else { + IE_THROW(NotImplemented) << errorMessage; + } +} - padsBegin = padLayer->GetParamAsUInts("pads_begin"); - padsEnd = padLayer->GetParamAsUInts("pads_end"); +void MKLDNNPadNode::getSupportedDescriptors() { + if (getParentEdges().size() != 3 && getParentEdges().size() != 4) + IE_THROW() << errorPrefix << " has incorrect number of input edges"; + if (getChildEdges().empty()) + IE_THROW() << errorPrefix << "Incorrect number of output edges"; - SizeVector srcDims = padLayer->insData[0].lock()->getTensorDesc().getDims(); - SizeVector dstDims = padLayer->outData[0]->getTensorDesc().getDims(); + const SizeVector srcDims = getParentEdgeAt(DATA_ID)->getDims().ToSizeVector(); + const SizeVector dstDims = getChildEdgeAt(DATA_ID)->getDims().ToSizeVector(); if (srcDims.size() != dstDims.size() || padsBegin.size() != srcDims.size() || padsEnd.size() != srcDims.size()) - IE_THROW() << padLayer->name << " Incorrect number of input/output dimensions!"; - - std::string pad_mode = padLayer->GetParamAsString("pad_mode"); - if (pad_mode == "constant") { - padMode = CONSTANT; - padValue = padLayer->GetParamAsFloat("pad_value", 0.f); - } else if (pad_mode == "edge") { - padMode = EDGE; - } else if (pad_mode == "reflect") { - padMode = REFLECT; + IE_THROW() << errorPrefix << " has incorrect number of input/output dimensions!"; + + if (padMode == REFLECT) { for (size_t i = 0; i < srcDims.size(); i++) { if ((srcDims[i] - 1) < padsBegin[i] || (srcDims[i] - 1) < padsEnd[i]) - IE_THROW() << padLayer->name << " Incorrect padsBegin or padsEnd for 'reflect' pad mode"; + IE_THROW() << errorPrefix << " has incorrect padsBegin or padsEnd for 'reflect' pad mode"; } - } else if (pad_mode == "symmetric") { - padMode = SYMMETRIC; + } else if (padMode == SYMMETRIC) { for (size_t i = 0; i < srcDims.size(); i++) { if (srcDims[i] < padsBegin[i] || srcDims[i] < padsEnd[i]) - IE_THROW() << padLayer->name << " Incorrect padsBegin or padsEnd for 'symmetric' pad mode"; + IE_THROW() << errorPrefix << " has incorrect padsBegin or padsEnd for 'symmetric' pad mode"; } - } else { - IE_THROW() << padLayer->name - << " Incorrect pad_mode. Only constants|edge|reflect|symmetric modes are supported!"; } - - if (getParentEdges().size() != 1) - IE_THROW() << "Incorrect number of input edges for layer " << getName(); - if (getChildEdges().empty()) - IE_THROW() << "Incorrect number of output edges for layer " << getName(); } void MKLDNNPadNode::initSupportedPrimitiveDescriptors() { @@ -70,26 +110,26 @@ void MKLDNNPadNode::initSupportedPrimitiveDescriptors() { std::vector supportedPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::I32, InferenceEngine::Precision::BF16, InferenceEngine::Precision::I8, InferenceEngine::Precision::U8}; - InferenceEngine::Precision precision = getCnnLayer()->insData[0].lock()->getPrecision(); + InferenceEngine::Precision precision = getOriginalInputPrecisions()[DATA_ID]; if (std::find(supportedPrecisions.begin(), supportedPrecisions.end(), precision) == supportedPrecisions.end()) precision = precision.is_float() ? InferenceEngine::Precision::FP32 : InferenceEngine::Precision::I32; auto dataType = MKLDNNExtensionUtils::IEPrecisionToDataType(precision); - auto srcDims = getParentEdgeAt(0)->getDims(); + auto srcDims = getParentEdgeAt(DATA_ID)->getDims(); int numOfDims = srcDims.ToSizeVector().size(); InferenceEngine::LayerConfig config; config.dynBatchSupport = false; - config.inConfs.resize(1); + config.inConfs.resize(isPadValueSpecified ? 4 : 3); config.outConfs.resize(1); - config.inConfs[0].inPlace = -1; - config.inConfs[0].constant = false; - config.outConfs[0].inPlace = -1; - config.outConfs[0].constant = false; auto pushSupportedPrimitiveDescriptor = [&](memory::format_tag memoryFormat) { - config.inConfs[0].desc = MKLDNNMemoryDesc(getParentEdgeAt(0)->getDims(), dataType, memoryFormat); - config.outConfs[0].desc = MKLDNNMemoryDesc(getChildEdgeAt(0)->getDims(), dataType, memoryFormat); + config.inConfs[0].desc = MKLDNNMemoryDesc(getParentEdgeAt(DATA_ID)->getDims(), dataType, memoryFormat); + config.inConfs[1].desc = MKLDNNMemoryDesc(getParentEdgeAt(PADS_BEGIN_ID)->getDims(), memory::data_type::s32, memory::format_tag::x); + config.inConfs[2].desc = MKLDNNMemoryDesc(getParentEdgeAt(PADS_END_ID)->getDims(), memory::data_type::s32, memory::format_tag::x); + if (isPadValueSpecified) + config.inConfs[3].desc = MKLDNNMemoryDesc(getParentEdgeAt(PAD_VALUE_ID)->getDims(), memory::data_type::f32, memory::format_tag::x); + config.outConfs[0].desc = MKLDNNMemoryDesc(getChildEdgeAt(DATA_ID)->getDims(), dataType, memoryFormat); supportedPrimitiveDescriptors.push_back({config, impl_desc_type::ref, memoryFormat}); }; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pad_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pad_node.h index f363750ab1420d..8be96b2bea6925 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pad_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pad_node.h @@ -21,6 +21,8 @@ class MKLDNNPadNode : public MKLDNNNode { void execute(mkldnn::stream strm) override; bool created() const override; + static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; + private: enum PadMode { CONSTANT = 0, @@ -63,6 +65,14 @@ class MKLDNNPadNode : public MKLDNNNode { node->padConstantCommon(); } }; + + std::string errorPrefix; + static const size_t DATA_ID = 0; + static const size_t PADS_BEGIN_ID = 1; + static const size_t PADS_END_ID = 2; + static const size_t PAD_VALUE_ID = 3; + + bool isPadValueSpecified = false; }; } // namespace MKLDNNPlugin