Skip to content

Commit

Permalink
[CPU] Interpolate node migration on nGraph (openvinotoolkit#17)
Browse files Browse the repository at this point in the history
  • Loading branch information
Maxim Andronov authored and dmitry-gorokhov committed May 4, 2021
1 parent b915d5d commit 114ad33
Show file tree
Hide file tree
Showing 9 changed files with 427 additions and 408 deletions.
13 changes: 2 additions & 11 deletions inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1487,12 +1487,7 @@ void MKLDNNGraphOptimizer::FuseInterpolateAndSimpleOperation(MKLDNNGraph &graph)
auto& graphNodes = graph.GetNodes();

auto isSuitableParentNode = [](MKLDNNNodePtr node) {
bool isSuitable = (node->getType() == Interpolate);
if (isSuitable) {
return node->getChildEdges().size() == 1;
} else {
return false;
}
return node->getType() == Interpolate && node->getChildEdges().size() == 1;
};

auto isSutableChildNode = [&](MKLDNNNodePtr parentNode, MKLDNNNodePtr childNode) {
Expand Down Expand Up @@ -1544,11 +1539,7 @@ void MKLDNNGraphOptimizer::FuseNormalizeL2AndSimpleOperation(MKLDNNGraph &graph)
auto& graphNodes = graph.GetNodes();

auto isSutableParentNode = [](MKLDNNNodePtr node) {
if (node->getType() == NormalizeL2) {
return node->getChildEdges().size() == 1;
} else {
return false;
}
return node->getType() == NormalizeL2 && node->getChildEdges().size() == 1;
};

auto parent = graphNodes.begin();
Expand Down
41 changes: 40 additions & 1 deletion inference-engine/src/mkldnn_plugin/mkldnn_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
#include <nodes/mkldnn_space_to_depth_node.h>
#include <nodes/mkldnn_strided_slice_node.h>
#include <nodes/mkldnn_reference_node.h>
#include <nodes/mkldnn_quantize_node.h>
#include <mkldnn_types.h>
#include <dnnl_types.h>
#include "mkldnn_extension_utils.h"
Expand Down Expand Up @@ -191,7 +192,7 @@ static const InferenceEngine::details::caseless_unordered_map<std::string, Type>
{ "ScatterUpdate", ScatterUpdate},
{ "ScatterElementsUpdate", ScatterElementsUpdate},
{ "ScatterNDUpdate", ScatterNDUpdate},
// { "Interpolate", Interpolate},
{ "Interpolate", Interpolate},
// { "ReduceAnd", ReduceAnd},
// { "ReduceL1", ReduceL1},
// { "ReduceL2", ReduceL2},
Expand Down Expand Up @@ -229,6 +230,16 @@ MKLDNNNode::MKLDNNNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::en
algorithm = Algorithm::Undefined;
fusingPort = -1;

const std::string errorPrefix = "Ngraph operation " + std::string(op->get_type_name()) + " with name " + op->get_friendly_name();
for (size_t i = 0; i < op->get_input_size(); i++) {
if (op->get_input_partial_shape(i).is_dynamic())
THROW_IE_EXCEPTION << errorPrefix << " has dynamic input shape on " << i << " port, but CPU plug-in supports only static shape";
}
for (size_t i = 0; i < op->get_output_size(); i++) {
if (op->get_output_partial_shape(i).is_dynamic())
THROW_IE_EXCEPTION << errorPrefix << " has dynamic output shape on " << i << " port, but CPU plug-in supports only static shape";
}

for (size_t i = 0; i < op->get_input_size(); i++) {
inDims.emplace_back(op->get_input_shape(i));
originalInputPrecisions.emplace_back(details::convertPrecision(op->get_input_element_type(i)));
Expand Down Expand Up @@ -1331,3 +1342,31 @@ MKLDNNNode* MKLDNNNode::NodesFactory::create(const std::shared_ptr<ngraph::Node>

return newNode;
}

bool MKLDNNNode::canBePerformedAsScaleShift() const {
bool inputsIsConst = true;
for (size_t i = 1; i < getParentEdges().size(); i++) {
if (!getParentEdgeAt(i)->getParent()->isConstant() || getParentEdgeAt(i)->getParent()->getType() != Input) {
inputsIsConst = false;
}
}
return one_of(getAlgorithm(), EltwiseAdd, EltwiseMultiply, EltwiseSubtract, EltwiseDivide, EltwisePrelu, EltwiseMulAdd) && inputsIsConst &&
MKLDNNExtensionUtils::isPerTensorOrPerChannelBroadcastable(getParentEdgeAt(0)->getDims().ToSizeVector(),
getParentEdgeAt(1)->getDims().ToSizeVector());
}

bool MKLDNNNode::canFuseSimpleOperation(const MKLDNNNodePtr& node) const {
if (node->getType() == Quantize) {
auto* quantizeNode = dynamic_cast<MKLDNNQuantizeNode*>(node.get());
if (quantizeNode == nullptr)
THROW_IE_EXCEPTION << "Cannot get quantize layer " << node->getName();
return !quantizeNode->isBinarization();
} else if (node->getType() == Eltwise) {
return one_of(node->getAlgorithm(), EltwiseRelu, EltwiseGelu, EltwiseElu, EltwiseSigmoid, EltwiseBoundedRelu, EltwiseClamp, EltwiseTanh,
EltwiseSwish, EltwiseHswish, EltwiseMish, EltwiseHsigmoid, EltwiseRoundHalfToEven,
EltwiseRoundHalfAwayFromZero, EltwiseLinear, EltwiseAbs, EltwiseSquare, EltwiseSqrt) ||
node->canBePerformedAsScaleShift();
}

return false;
}
3 changes: 3 additions & 0 deletions inference-engine/src/mkldnn_plugin/mkldnn_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -666,6 +666,9 @@ class MKLDNNNode : public InferenceEngine::details::no_copy {
}

protected:
bool canBePerformedAsScaleShift() const;
bool canFuseSimpleOperation(const MKLDNNNodePtr& node) const;

void setType(Type type) {
this->type = type;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1775,7 +1775,7 @@ void MKLDNNEltwiseNode::fuseInto(MKLDNNNodePtr& parentNode) {
// Handling Convolution custom Add node fusing case which is processed via dnnl append_sum() API.
bool isSpecialConvolutionAddFusing = parentNode->getType() == Convolution && getAlgorithm() == EltwiseAdd &&
getParentEdgesAtPort(0)[0]->getDims().ToSizeVector() == getParentEdgesAtPort(1)[0]->getDims().ToSizeVector();
if (!isSpecialConvolutionAddFusing && one_of(getAlgorithm(), EltwiseAdd, EltwiseSubtract, EltwiseMultiply, EltwiseDivide, EltwiseMulAdd, EltwisePrelu)) {
if (!isSpecialConvolutionAddFusing && canBePerformedAsScaleShift()) {
fillScalesAndShifts();
}
MKLDNNNode::fuseInto(parentNode);
Expand Down
Loading

0 comments on commit 114ad33

Please sign in to comment.