Skip to content

Commit

Permalink
[CPU] Permute node migration on nGraph. (#15)
Browse files Browse the repository at this point in the history
  • Loading branch information
nshchego authored and mandrono committed May 3, 2021
1 parent ea5094a commit 6cfba2b
Show file tree
Hide file tree
Showing 17 changed files with 644 additions and 625 deletions.
2 changes: 1 addition & 1 deletion inference-engine/src/mkldnn_plugin/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ set(LAYERS
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_lrn_node.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_memory_node.cpp
# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_pad_node.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_permute_node.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_transpose_node.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_pooling_node.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_quantize_node.cpp
${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_reorder_node.cpp
Expand Down
2 changes: 1 addition & 1 deletion inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1182,7 +1182,7 @@ MKLDNNNodePtr MKLDNNGraph::InsertReorder(MKLDNNEdgePtr edge, std::string layerNa
InsertNode(edge, newReorder, true);

// Using the method MKLDNNEdge::getDesc() we can check that input and output tensor descriptors are equal.
// Due to the specificity of MKLDNNGraphOptimizer::MergePermuteAndReorder() that isOptimized flag uses, we shouldn't do these checks.
// Due to the specificity of MKLDNNGraphOptimizer::MergeTransposeAndReorder() that isOptimized flag uses, we shouldn't do these checks.
if (!isOptimized) {
newReorder->getParentEdgeAt(0)->getDesc();
newReorder->getChildEdgeAt(0)->getDesc();
Expand Down
106 changes: 42 additions & 64 deletions inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
#include "nodes/mkldnn_bin_conv_node.h"
#include "nodes/mkldnn_quantize_node.h"
#include "nodes/mkldnn_mvn_node.h"
#include <nodes/mkldnn_permute_node.h>
#include <nodes/mkldnn_transpose_node.h>
#include "nodes/mkldnn_interpolate_node.h"
#include "nodes/mkldnn_input_node.h"

Expand Down Expand Up @@ -171,7 +171,7 @@ void MKLDNNGraphOptimizer::ApplyImplSpecificGraphOptimizations(MKLDNNGraph &grap
graph.RemoveDroppedNodes();
#endif

MergePermuteAndReorder(graph);
MergeTransposeAndReorder(graph);
graph.RemoveDroppedNodes();

graph.RemoveDroppedEdges();
Expand Down Expand Up @@ -1677,43 +1677,6 @@ void MKLDNNGraphOptimizer::DropDoubleReorders(MKLDNNGraph &graph) {
}
}

void MKLDNNGraphOptimizer::DropConvertReorder(MKLDNNGraph& graph) {
// for (auto input : graph.GetNodes()) {
// if (input->getType() != Input) {
// continue;
// }
//
// auto inTD = input->getCnnLayer().get()->outData[0]->getTensorDesc();
// for (size_t i = 0; i < input->getChildEdges().size(); i++) {
// auto inputEdge = input->getChildEdgeAt(i);
// auto convert = inputEdge->getChild();
// if (convert->getType() == Convert) {
// for (int j = 0; j < convert->getChildEdges().size(); j++) {
// auto convertEdge = convert->getChildEdgeAt(j);
// auto reorder = convertEdge->getChild();
// if (reorder->getType() == Reorder) {
// MKLDNNReorderNode* rn = dynamic_cast<MKLDNNReorderNode*>(reorder.get());
// auto rnOutput = rn->getOutput();
// if (inTD.getPrecision() == rnOutput.getPrecision() &&
// inTD.getLayout() == rnOutput.getLayout() &&
// inTD.getDims() == rnOutput.getDims()) {
// auto avterReorder = reorder->getChildEdgeAt(0)->getChild();
// auto oldEdgeNum = reorder->getChildEdgeAt(0)->getOutputNum();
// reorder->getChildEdgeAt(0)->drop();
// convertEdge->drop();
//
// MKLDNNEdgePtr newEdge(new MKLDNNEdge(input, avterReorder, i, oldEdgeNum));
// graph.GetEdges().push_back(newEdge);
// input->addEdge(newEdge);
// j--;
// }
// }
// }
// }
// }
// }
}

// TODO [NM]: reuse common/general_utils version
bool MKLDNNGraphOptimizer::IsOneOf(Type type, std::vector<Type> types) {
for (auto tp : types) {
Expand Down Expand Up @@ -1978,32 +1941,32 @@ void MKLDNNGraphOptimizer::FuseScaleShiftAndQuantize(MKLDNNGraph &graph) {
}
}

void MKLDNNGraphOptimizer::MergePermuteAndReorder(MKLDNNGraph &graph) {
void MKLDNNGraphOptimizer::MergeTransposeAndReorder(MKLDNNGraph &graph) {
auto& graphNodes = graph.GetNodes();

auto isSutableParentNode = [](MKLDNNNodePtr node) {
return node->getType() == Permute && node->getChildEdges().size() == 1;
return node->getType() == Transpose && node->getChildEdges().size() == 1;
};

auto isSutableChildNode = [](MKLDNNNodePtr node) {
return node->getType() == Reorder && node->getChildEdges().size() == 1;
};

// Method checkAscendingSummaryOrder() checks that after the sequential execution of Permute and Reorder nodes,
// the order of the elements in the memory will not change. In other words, that Permute+Reorder is identical permutation.
// Method checkAscendingSummaryOrder() checks that after the sequential execution of Transpose and Reorder nodes,
// the order of the elements in the memory will not change. In other words, that Transpose+Reorder is identical permutation.
auto checkAscendingSummaryOrder = [](std::shared_ptr<MKLDNNNode> &parentNode, std::shared_ptr<MKLDNNNode> &childNode) -> bool {
auto* permuteNode = dynamic_cast<MKLDNNPermuteNode*>(parentNode.get());
auto* transposeNode = dynamic_cast<MKLDNNTransposeNode*>(parentNode.get());
auto* reorderNode = dynamic_cast<MKLDNNReorderNode*>(childNode.get());
if (!permuteNode || !reorderNode) {
if (!transposeNode || !reorderNode) {
return false;
}

auto& permuteOrder = permuteNode->getOrder();
auto& layoutOrder = permuteNode->getSelectedPrimitiveDescriptor()->getConfig().outConfs[0].desc.getBlockingDesc().getOrder();
auto& transposeOrder = transposeNode->getOrder();
auto& layoutOrder = transposeNode->getSelectedPrimitiveDescriptor()->getConfig().outConfs[0].desc.getBlockingDesc().getOrder();
auto& inOrder = reorderNode->getSelectedPrimitiveDescriptor()->getConfig().inConfs[0].desc.getBlockingDesc().getOrder();
auto& outOrder = reorderNode->getSelectedPrimitiveDescriptor()->getConfig().outConfs[0].desc.getBlockingDesc().getOrder();

if (permuteOrder.size() != layoutOrder.size() || layoutOrder.size() != inOrder.size() || inOrder.size() != outOrder.size()) {
if (transposeOrder.size() != layoutOrder.size() || layoutOrder.size() != inOrder.size() || inOrder.size() != outOrder.size()) {
return false;
}

Expand All @@ -2013,10 +1976,10 @@ void MKLDNNGraphOptimizer::MergePermuteAndReorder(MKLDNNGraph &graph) {
revLayoutOrder[layoutOrder[i]] = i;
}

// newPermuteOrder - Permute layout-aware permutation
auto newPermuteOrder = SizeVector(permuteOrder.size());
for (int i = 0; i < newPermuteOrder.size(); i++) {
newPermuteOrder[i] = layoutOrder[permuteOrder[revLayoutOrder[i]]];
// newTransposeOrder - Transpose layout-aware permutation
auto newTransposeOrder = SizeVector(transposeOrder.size());
for (int i = 0; i < newTransposeOrder.size(); i++) {
newTransposeOrder[i] = layoutOrder[transposeOrder[revLayoutOrder[i]]];
}

// reorderOrder - Reorder layout-aware permutation
Expand All @@ -2030,13 +1993,13 @@ void MKLDNNGraphOptimizer::MergePermuteAndReorder(MKLDNNGraph &graph) {
}
}

// summaryOrder - resulting Permute+Reorder permutation
auto summaryOrder = SizeVector(permuteOrder.size());
// summaryOrder - resulting Transpose+Reorder permutation
auto summaryOrder = SizeVector(transposeOrder.size());
for (int i = 0; i < summaryOrder.size(); i++) {
summaryOrder[i] = reorderOrder[newPermuteOrder[i]];
summaryOrder[i] = reorderOrder[newTransposeOrder[i]];
}

// check that Permute+Reorder is the identical permutation
// check that Transpose+Reorder is the identical permutation
for (int i = 0; i < summaryOrder.size(); i++) {
if (summaryOrder[i] != i) {
return false;
Expand All @@ -2046,22 +2009,34 @@ void MKLDNNGraphOptimizer::MergePermuteAndReorder(MKLDNNGraph &graph) {
return true;
};

// Permute and Reorder do opposite permutation to each other.
// Transpose and Reorder do opposite permutation to each other.
// Example:
// chain [physical layout: NCHW, logical layout: NCHW] -> Permute(order=0312) -> [physical layout: NWCH, logical layout: NCHW] ->
// chain [physical layout: NCHW, logical layout: NCHW] -> Transpose(order=0312) -> [physical layout: NWCH, logical layout: NCHW] ->
// Reorder(nchw->nhwc) -> [physical layout: NCHW, logical layout: NHWC] can be replaced with Reorder(nchw->nhwc; isOptimized=true)
// which will just reinterprets layout without physical change of the memory.
// Two cases are possible:
// 1) inPrec = outPrec
// In this case, we replace Permute+Reorder pattern with a new Reorder that does nothing.
// In this case, we replace Transpose+Reorder pattern with a new Reorder that does nothing.
// 2) inPrec != outPrec
// As in the first case, we also replace Permute+Reorder pattern with a new Reorder.
// As in the first case, we also replace Transpose+Reorder pattern with a new Reorder.
// Additionally, we insert another Reorder that performs the conversion from the input precision (inPrec)
// to the output precision (outPrec)
auto mergePermuteAndReorder = [&](std::shared_ptr<MKLDNNNode>& parentNode, std::shared_ptr<MKLDNNNode>& childNode) {
auto parentParentNode = parentNode->getParentEdgeAt(0)->getParent();
auto mergeTransposeAndReorder = [&](std::shared_ptr<MKLDNNNode>& parentNode, std::shared_ptr<MKLDNNNode>& childNode) {
auto parentParentNode = parentNode->getParentEdgesAtPort(0)[0]->getParent();
auto parentParentConstNode = parentNode->getParentEdgesAtPort(1)[0]->getParent();
auto childChildNode = childNode->getChildEdgeAt(0)->getChild();

auto &remEdge = parentParentConstNode->getChildEdgeAt(0);
remEdge->drop();
auto& edges = graph.GetEdges();
for (auto it = edges.begin(); it != edges.end(); it++) {
if ((*it) == remEdge) {
edges.erase(it);
parentParentConstNode->remove();
break;
}
}

graph.DropNode(parentNode);
graph.DropNode(childNode);

Expand All @@ -2085,6 +2060,9 @@ void MKLDNNGraphOptimizer::MergePermuteAndReorder(MKLDNNGraph &graph) {
break;
}
}
if (!edge) {
IE_THROW() << "Transpose node '" << parentNode->getName() << "' has invalid edges.";
}

auto reorderNode = graph.InsertReorder(edge, reorderlayerName, reorderInDesc, reorderOutDesc, true);

Expand All @@ -2111,7 +2089,7 @@ void MKLDNNGraphOptimizer::MergePermuteAndReorder(MKLDNNGraph &graph) {
}

if (checkAscendingSummaryOrder(parentNode, childNode)) {
mergePermuteAndReorder(parentNode, childNode);
mergeTransposeAndReorder(parentNode, childNode);
}
}
}
}
4 changes: 1 addition & 3 deletions inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,12 @@ class MKLDNNGraphOptimizer {
void FuseNormalizeL2AndSimpleOperation(MKLDNNGraph &graph);

void DropDoubleReorders(MKLDNNGraph& graph);
void DropConvertReorder(MKLDNNGraph& graph);
void AddConvertToReorder(MKLDNNGraph &graph);
void FuseConvolutionAndZeroPoints(MKLDNNGraph &graph);
void FuseBroadcastAndEltwise(MKLDNNGraph &graph);
void FuseEltwiseAndSimple(MKLDNNGraph &graph);
void FuseScaleShiftAndQuantize(MKLDNNGraph &graph);
void FuseClampAndQuantize(MKLDNNGraph &graph);
void MergePermuteAndReorder(MKLDNNGraph &graph);
void MergeTransposeAndReorder(MKLDNNGraph &graph);

bool IsOneOf(Type type, std::vector<Type> types);

Expand Down
8 changes: 4 additions & 4 deletions inference-engine/src/mkldnn_plugin/mkldnn_memory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -489,8 +489,8 @@ static const std::map<int, std::vector<mkldnn::memory::format_tag>> form_tags_by
mkldnn::memory::format_tag::aBCde4c8b2c,
}}, {6, { // Popular
mkldnn::memory::format_tag::abcdef, // plain
mkldnn::memory::format_tag::acbdef, // permuted
mkldnn::memory::format_tag::defcab, // permuted
mkldnn::memory::format_tag::acbdef, // permute
mkldnn::memory::format_tag::defcab, // permute
mkldnn::memory::format_tag::aBcdef16b, // blocked 16c

mkldnn::memory::format_tag::aBCdef16b16c,
Expand Down Expand Up @@ -742,7 +742,7 @@ MKLDNNMemoryDesc::operator InferenceEngine::TensorDesc() const {
MKLDNNMemory::convertToIePrec(desc.data_type()),
SizeVector {begin(dims), end(dims)},
ie_blk_desc };
// TODO: BLOCKED is the most common layout which covers all other permuted layout like NHWC.
// TODO: BLOCKED is the most common layout which covers all other permute layout like NHWC.
// But for some cases we have to specify it more correctly.. may be.. or just keep
// auto detected layout in constructor of TensorDesc.
return res;
Expand Down Expand Up @@ -809,7 +809,7 @@ MKLDNNMemoryDesc::MKLDNNMemoryDesc(const TensorDesc& tDesc):
is_descending_strides &= (ie_strides[i-1] >= ie_strides[i]);
}

// TODO: That's strong constrains and can be mitigated. IE::TensorDesc allow to permute blocked dims
// TODO: That's strong constrains and can be mitigated. IE::TensorDesc allow to transpose blocked dims
// and may be we can achieve correct "descending strides" form which allow conversion.
if (!is_descending_strides)
IE_THROW() << "Unsupported case for conversion";
Expand Down
6 changes: 3 additions & 3 deletions inference-engine/src/mkldnn_plugin/mkldnn_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
#include <nodes/mkldnn_tile_node.h>
#include <nodes/mkldnn_split_node.h>
#include <nodes/mkldnn_pad_node.h>
#include <nodes/mkldnn_permute_node.h>
#include <nodes/mkldnn_transpose_node.h>
#include <nodes/mkldnn_memory_node.hpp>
#include <nodes/mkldnn_mvn_node.h>
#include <nodes/mkldnn_normalize_node.h>
Expand Down Expand Up @@ -157,7 +157,7 @@ static const InferenceEngine::details::caseless_unordered_map<std::string, Type>
// { "SoftMax", SoftMax },
// { "Split", Split },
// { "Slice", Split },
// { "Concat", Concatenation },
{ "Concat", Concatenation },
// { "Deconvolution", Deconvolution },
// { "Eltwise", Eltwise },
// { "Mod", Eltwise },
Expand All @@ -171,7 +171,7 @@ static const InferenceEngine::details::caseless_unordered_map<std::string, Type>
// { "BatchNormalization", BatchNormalization },
// { "Flatten", Flatten },
// { "Pad", Pad },
// { "Permute", Permute },
{ "Transpose", Transpose },
// { "Copy", Copy },
// { "LSTMCell", RNNCell },
// { "GRUCell", RNNCell },
Expand Down
6 changes: 3 additions & 3 deletions inference-engine/src/mkldnn_plugin/mkldnn_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ enum Type {
DepthToSpace,
Flatten,
Pad,
Permute,
Transpose,
SpaceToDepth,
StridedSlice,
Copy,
Expand Down Expand Up @@ -209,8 +209,8 @@ static std::string NameFromType(Type type) {
return "Flatten";
case Pad:
return "Pad";
case Permute:
return "Permute";
case Transpose:
return "Transpose";
case SpaceToDepth:
return "SpaceToDepth";
case StridedSlice:
Expand Down
3 changes: 2 additions & 1 deletion inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,8 @@ static void Transformation(CNNNetwork& clonedNetwork, const Config& conf) {

pass_config->set_callback<ngraph::pass::MVN6Decomposition>(
[](const_node_ptr &node) -> bool {
return MKLDNNMVNNode::checkAxesSuitability(node);
std::string errorMessage;
return MKLDNNMVNNode::isSupportedOperation(node, errorMessage);
});

pass_config->set_callback<ngraph::pass::SoftmaxFusion>(
Expand Down
Loading

0 comments on commit 6cfba2b

Please sign in to comment.