diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp index 36d9942b09932f..8d810fc259e503 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp @@ -691,37 +691,6 @@ MKLDNNMVNNode::MKLDNNMVNNode(const std::shared_ptr& op, const mkld epsMode_ = INSIDE_SQRT; acrossChannels_ = mvnOp->get_across_channels(); } - - transformTo5DCase(inDataShape); -} - -void MKLDNNMVNNode::transformTo5DCase(const ngraph::Shape& shape) { - switch (shape.size()) { - // for 1 and 2 rank, if acrossChannels_ is true, adjust shape to fully vectorize under unified 5d procedure. - // otherwise there are not enough data in spatial dimension to process in one kernel. - case 1 : // C - if (acrossChannels_) { - shape5D = std::make_tuple(1, 1, 1, 1, shape[0]); - acrossChannels_ = false; - break; - } else { - shape5D = std::make_tuple(1, shape[0], 1, 1, 1); - break; - } - case 2 : // NC - if (acrossChannels_) { - shape5D = std::make_tuple(1, shape[0], 1, shape[1], 1); - acrossChannels_ = false; - break; - } else { - shape5D = std::make_tuple(shape[0], shape[1], 1, 1, 1); - break; - } - case 3 : { shape5D = std::make_tuple(shape[0], shape[1], 1, shape[2], 1); break; } - case 4 : { shape5D = std::make_tuple(shape[0], shape[1], 1, shape[2], shape[3]); break; } - case 5 : { shape5D = std::make_tuple(shape[0], shape[1], shape[2], shape[3], shape[4]); break; } - default : { IE_THROW() << "MVN layer with name '" << getName() << "' doesn't support planar layout with rank: " << shape.size(); } - } } void MKLDNNMVNNode::getSupportedDescriptors() { @@ -850,6 +819,7 @@ void MKLDNNMVNNode::createPrimitive() { jcp.normalize_variance = normalizeVariance_; jcp.across_channels = acrossChannels_; SizeVector in_dims = getParentEdgeAt(0)->getDims().ToSizeVector(); + transformTo5DCase(in_dims); int N = 0; std::tie(N, jcp.C, jcp.D, jcp.H, jcp.W) = shape5D; @@ -892,6 +862,35 @@ void MKLDNNMVNNode::createPrimitive() { mvn_variance_kernel->create_ker(); } +void MKLDNNMVNNode::transformTo5DCase(const SizeVector& shape) { + switch (shape.size()) { + // for 1 and 2 rank, if acrossChannels_ is true, adjust shape to fully vectorize under unified 5d procedure. + // otherwise there are not enough data in spatial dimension to process in one kernel. + case 1 : // C + if (acrossChannels_) { + shape5D = std::make_tuple(1, 1, 1, 1, shape[0]); + acrossChannels_ = false; + break; + } else { + shape5D = std::make_tuple(1, shape[0], 1, 1, 1); + break; + } + case 2 : // NC + if (acrossChannels_) { + shape5D = std::make_tuple(1, shape[0], 1, shape[1], 1); + acrossChannels_ = false; + break; + } else { + shape5D = std::make_tuple(shape[0], shape[1], 1, 1, 1); + break; + } + case 3 : { shape5D = std::make_tuple(shape[0], shape[1], 1, shape[2], 1); break; } + case 4 : { shape5D = std::make_tuple(shape[0], shape[1], 1, shape[2], shape[3]); break; } + case 5 : { shape5D = std::make_tuple(shape[0], shape[1], shape[2], shape[3], shape[4]); break; } + default : { IE_THROW() << "MVN layer with name '" << getName() << "' doesn't support planar layout with rank: " << shape.size(); } + } +} + void MKLDNNMVNNode::setPostOps(mkldnn::primitive_attr &attr, bool initWeights) { mkldnn::post_ops ops; for (auto &node : fusedWith) { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.h index 9ce7e784e406d0..dd0090c3d72079 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.h @@ -103,7 +103,7 @@ class MKLDNNMVNNode : public MKLDNNNode { void setPostOps(mkldnn::primitive_attr &attr, bool initWeights = false); - void transformTo5DCase(const ngraph::Shape& shape); + void transformTo5DCase(const InferenceEngine::SizeVector& shape); std::tuple shape5D;