Skip to content

Commit

Permalink
[CPU] Pad node migration on nGraph (openvinotoolkit#18)
Browse files Browse the repository at this point in the history
  • Loading branch information
Maxim Andronov authored and dmitry-gorokhov committed Mar 25, 2021
1 parent f0b58d7 commit f0dab6a
Show file tree
Hide file tree
Showing 4 changed files with 94 additions and 44 deletions.
2 changes: 1 addition & 1 deletion inference-engine/src/mkldnn_plugin/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ set(LAYERS
${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_input_node.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_lrn_node.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_memory_node.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_pad_node.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_pad_node.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_transpose_node.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_pooling_node.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_quantize_node.cpp
Expand Down
2 changes: 1 addition & 1 deletion inference-engine/src/mkldnn_plugin/mkldnn_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ static const InferenceEngine::details::caseless_unordered_map<std::string, Type>
// { "ROIPooling", ROIPooling },
// { "BatchNormalization", BatchNormalization },
// { "Flatten", Flatten },
// { "Pad", Pad },
{ "Pad", Pad },
{ "Transpose", Transpose },
// { "Copy", Copy },
// { "LSTMCell", RNNCell },
Expand Down
124 changes: 82 additions & 42 deletions inference-engine/src/mkldnn_plugin/nodes/mkldnn_pad_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
//

#include "mkldnn_pad_node.h"
#include <legacy/ie_layers.h>
#include <string>
#include <cmath>
#include <mkldnn_types.h>
Expand All @@ -13,54 +12,95 @@
#include "common/cpu_memcpy.h"
#include "utils/bfloat16.hpp"
#include <mkldnn_selective_build.h>
#include <ngraph/opsets/opset1.hpp>

using namespace mkldnn;
using namespace MKLDNNPlugin;
using namespace InferenceEngine;

MKLDNNPadNode::MKLDNNPadNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache)
: MKLDNNNode(layer, eng, cache) {}
bool MKLDNNPadNode::isSupportedOperation(const std::shared_ptr<ngraph::Node>& op, std::string& errorMessage) noexcept {
try {
const auto pad = std::dynamic_pointer_cast<const ngraph::opset1::Pad>(op);
if (!pad) {
errorMessage = "Only opset1 Pad operation is supported";
return false;
}
if (std::dynamic_pointer_cast<const ngraph::opset1::Constant>(pad->get_input_node_shared_ptr(PADS_BEGIN_ID)) == nullptr ||
std::dynamic_pointer_cast<const ngraph::opset1::Constant>(pad->get_input_node_shared_ptr(PADS_END_ID)) == nullptr ||
(pad->get_pad_mode() == ngraph::op::PadMode::CONSTANT && pad->get_input_size() == 4 &&
std::dynamic_pointer_cast<const ngraph::opset1::Constant>(pad->get_input_node_shared_ptr(PAD_VALUE_ID)) == nullptr)) {
errorMessage = "Only Constant operation on 'pads_begin', 'pads_end', 'pad_value' inpus is supported";
return false;
}
const auto pad_mode = pad->get_pad_mode();
if (pad_mode != ngraph::op::PadMode::CONSTANT && pad_mode != ngraph::op::PadMode::EDGE && pad_mode != ngraph::op::PadMode::REFLECT &&
pad_mode != ngraph::op::PadMode::SYMMETRIC) {
errorMessage = "Has unsupported pad_mode: " + ngraph::as_string(pad_mode);
return false;
}
} catch (...) {
return false;
}
return true;
}

void MKLDNNPadNode::getSupportedDescriptors() {
auto* padLayer = dynamic_cast<PadLayer*>(getCnnLayer().get());
if (padLayer == nullptr)
IE_THROW() << "Cannot convert Pad layer.";
MKLDNNPadNode::MKLDNNPadNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache)
: MKLDNNNode(op, eng, cache) {
std::string errorMessage;
if (isSupportedOperation(op, errorMessage)) {
errorPrefix = "Pad node with name '" + op->get_friendly_name() + "'";
const auto pad = std::dynamic_pointer_cast<const ngraph::opset1::Pad>(op);

const auto pb = pad->get_pads_begin();
const auto pe = pad->get_pads_end();
for (size_t i = 0; i < pb.size(); i++)
padsBegin.push_back(static_cast<unsigned int>(pb[i]));
for (size_t i = 0; i < pe.size(); i++)
padsEnd.push_back(static_cast<unsigned int>(pe[i]));

const auto pad_mode = pad->get_pad_mode();
isPadValueSpecified = pad->get_input_size() == 4;
if (pad_mode == ngraph::op::PadMode::CONSTANT) {
padMode = CONSTANT;
if (isPadValueSpecified) {
if (!ngraph::is_scalar(pad->get_input_shape(PAD_VALUE_ID)))
IE_THROW() << errorPrefix << " has non scalar 'pad_value' input";
padValue = std::dynamic_pointer_cast<const ngraph::opset1::Constant>(pad->get_input_node_shared_ptr(PAD_VALUE_ID))->cast_vector<float>()[0];
}
} else if (pad_mode == ngraph::op::PadMode::EDGE) {
padMode = EDGE;
} else if (pad_mode == ngraph::op::PadMode::REFLECT) {
padMode = REFLECT;
} else if (pad_mode == ngraph::op::PadMode::SYMMETRIC) {
padMode = SYMMETRIC;
}
} else {
IE_THROW(NotImplemented) << errorMessage;
}
}

padsBegin = padLayer->GetParamAsUInts("pads_begin");
padsEnd = padLayer->GetParamAsUInts("pads_end");
void MKLDNNPadNode::getSupportedDescriptors() {
if (getParentEdges().size() != 3 && getParentEdges().size() != 4)
IE_THROW() << errorPrefix << " has incorrect number of input edges";
if (getChildEdges().empty())
IE_THROW() << errorPrefix << "Incorrect number of output edges";

SizeVector srcDims = padLayer->insData[0].lock()->getTensorDesc().getDims();
SizeVector dstDims = padLayer->outData[0]->getTensorDesc().getDims();
const SizeVector srcDims = getParentEdgeAt(DATA_ID)->getDims().ToSizeVector();
const SizeVector dstDims = getChildEdgeAt(DATA_ID)->getDims().ToSizeVector();
if (srcDims.size() != dstDims.size() || padsBegin.size() != srcDims.size() || padsEnd.size() != srcDims.size())
IE_THROW() << padLayer->name << " Incorrect number of input/output dimensions!";

std::string pad_mode = padLayer->GetParamAsString("pad_mode");
if (pad_mode == "constant") {
padMode = CONSTANT;
padValue = padLayer->GetParamAsFloat("pad_value", 0.f);
} else if (pad_mode == "edge") {
padMode = EDGE;
} else if (pad_mode == "reflect") {
padMode = REFLECT;
IE_THROW() << errorPrefix << " has incorrect number of input/output dimensions!";

if (padMode == REFLECT) {
for (size_t i = 0; i < srcDims.size(); i++) {
if ((srcDims[i] - 1) < padsBegin[i] || (srcDims[i] - 1) < padsEnd[i])
IE_THROW() << padLayer->name << " Incorrect padsBegin or padsEnd for 'reflect' pad mode";
IE_THROW() << errorPrefix << " has incorrect padsBegin or padsEnd for 'reflect' pad mode";
}
} else if (pad_mode == "symmetric") {
padMode = SYMMETRIC;
} else if (padMode == SYMMETRIC) {
for (size_t i = 0; i < srcDims.size(); i++) {
if (srcDims[i] < padsBegin[i] || srcDims[i] < padsEnd[i])
IE_THROW() << padLayer->name << " Incorrect padsBegin or padsEnd for 'symmetric' pad mode";
IE_THROW() << errorPrefix << " has incorrect padsBegin or padsEnd for 'symmetric' pad mode";
}
} else {
IE_THROW() << padLayer->name
<< " Incorrect pad_mode. Only constants|edge|reflect|symmetric modes are supported!";
}

if (getParentEdges().size() != 1)
IE_THROW() << "Incorrect number of input edges for layer " << getName();
if (getChildEdges().empty())
IE_THROW() << "Incorrect number of output edges for layer " << getName();
}

void MKLDNNPadNode::initSupportedPrimitiveDescriptors() {
Expand All @@ -70,26 +110,26 @@ void MKLDNNPadNode::initSupportedPrimitiveDescriptors() {
std::vector<InferenceEngine::Precision> supportedPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::I32,
InferenceEngine::Precision::BF16, InferenceEngine::Precision::I8,
InferenceEngine::Precision::U8};
InferenceEngine::Precision precision = getCnnLayer()->insData[0].lock()->getPrecision();
InferenceEngine::Precision precision = getOriginalInputPrecisions()[DATA_ID];
if (std::find(supportedPrecisions.begin(), supportedPrecisions.end(), precision) == supportedPrecisions.end())
precision = precision.is_float() ? InferenceEngine::Precision::FP32 : InferenceEngine::Precision::I32;
auto dataType = MKLDNNExtensionUtils::IEPrecisionToDataType(precision);

auto srcDims = getParentEdgeAt(0)->getDims();
auto srcDims = getParentEdgeAt(DATA_ID)->getDims();
int numOfDims = srcDims.ToSizeVector().size();

InferenceEngine::LayerConfig config;
config.dynBatchSupport = false;
config.inConfs.resize(1);
config.inConfs.resize(isPadValueSpecified ? 4 : 3);
config.outConfs.resize(1);
config.inConfs[0].inPlace = -1;
config.inConfs[0].constant = false;
config.outConfs[0].inPlace = -1;
config.outConfs[0].constant = false;

auto pushSupportedPrimitiveDescriptor = [&](memory::format_tag memoryFormat) {
config.inConfs[0].desc = MKLDNNMemoryDesc(getParentEdgeAt(0)->getDims(), dataType, memoryFormat);
config.outConfs[0].desc = MKLDNNMemoryDesc(getChildEdgeAt(0)->getDims(), dataType, memoryFormat);
config.inConfs[0].desc = MKLDNNMemoryDesc(getParentEdgeAt(DATA_ID)->getDims(), dataType, memoryFormat);
config.inConfs[1].desc = MKLDNNMemoryDesc(getParentEdgeAt(PADS_BEGIN_ID)->getDims(), memory::data_type::s32, memory::format_tag::x);
config.inConfs[2].desc = MKLDNNMemoryDesc(getParentEdgeAt(PADS_END_ID)->getDims(), memory::data_type::s32, memory::format_tag::x);
if (isPadValueSpecified)
config.inConfs[3].desc = MKLDNNMemoryDesc(getParentEdgeAt(PAD_VALUE_ID)->getDims(), memory::data_type::f32, memory::format_tag::x);
config.outConfs[0].desc = MKLDNNMemoryDesc(getChildEdgeAt(DATA_ID)->getDims(), dataType, memoryFormat);
supportedPrimitiveDescriptors.push_back({config, impl_desc_type::ref, memoryFormat});
};

Expand Down
10 changes: 10 additions & 0 deletions inference-engine/src/mkldnn_plugin/nodes/mkldnn_pad_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ class MKLDNNPadNode : public MKLDNNNode {
void execute(mkldnn::stream strm) override;
bool created() const override;

static bool isSupportedOperation(const std::shared_ptr<ngraph::Node>& op, std::string& errorMessage) noexcept;

private:
enum PadMode {
CONSTANT = 0,
Expand Down Expand Up @@ -62,6 +64,14 @@ class MKLDNNPadNode : public MKLDNNNode {
node->padConstantCommon<T>();
}
};

std::string errorPrefix;
static const size_t DATA_ID = 0;
static const size_t PADS_BEGIN_ID = 1;
static const size_t PADS_END_ID = 2;
static const size_t PAD_VALUE_ID = 3;

bool isPadValueSpecified = false;
};

} // namespace MKLDNNPlugin

0 comments on commit f0dab6a

Please sign in to comment.