diff --git a/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.h b/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.h index 0a879c3cfecc1d..81f0ede0c8dc7f 100644 --- a/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.h +++ b/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.h @@ -21,14 +21,8 @@ class MemoryDescUtils { static MKLDNNMemoryDesc convertToMKLDNNMemoryDesc(const MemoryDesc& desc); static MKLDNNMemoryDesc convertToMKLDNNMemoryDesc(const BlockedMemoryDesc& desc); static MKLDNNMemoryDesc convertToMKLDNNMemoryDesc(const InferenceEngine::TensorDesc& desc); - -private: - static BlockedMemoryDesc convertToBlockedDescriptor(const MKLDNNMemoryDesc& inpDesc); static BlockedMemoryDesc convertToBlockedDescriptor(const MemoryDesc& desc); - - friend class MKLDNNMemory; - friend class MKLDNNGraphOptimizer; - + static BlockedMemoryDesc convertToBlockedDescriptor(const MKLDNNMemoryDesc& inpDesc); //static MemoryDescPtr getUndefinedMemoryDesc(const MKLDNNMemoryDesc& desc); }; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_concat_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_concat_node.cpp index 5fa089d6e8a443..056d7342361702 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_concat_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_concat_node.cpp @@ -22,6 +22,7 @@ #include #include "common/cpu_memcpy.h" #include "common/blocked_desc_creator.h" +#include using namespace mkldnn; using namespace MKLDNNPlugin; @@ -162,8 +163,8 @@ void MKLDNNConcatNode::initSupportedPrimitiveDescriptors() { const auto& refConfig = supportedPrimitiveDescriptors[refPdIndex].getConfig(); auto config = refConfig; - const auto& order = (refConfig.outConfs[0].desc->as())->getOrder(); - const auto& blkDims = (refConfig.outConfs[0].desc->as())->getBlockDims(); + const auto &order = refConfig.outConfs[0].desc->as()->getOrder(); + const auto &blkDims = refConfig.outConfs[0].desc->as()->getBlockDims(); auto numOfDim = blkDims.size(); SizeVector offsets(numOfDim, 0lu); @@ -182,7 +183,7 @@ void MKLDNNConcatNode::initSupportedPrimitiveDescriptors() { config.outConfs[0].desc = make_unique(outputPrecision, dstDims, blkDims, order, offset, offsets, strides); for (size_t i = 0; i < getParentEdges().size(); i++) { - const auto& srcBlkDims = (refConfig.inConfs[i].desc->as())->getBlockDims(); + const auto& srcBlkDims = refConfig.inConfs[i].desc->as()->getBlockDims(); const auto& dims = refConfig.inConfs[i].desc->getShape().getStaticDims(); config.inConfs[i].inPlace = 0; @@ -433,36 +434,35 @@ void MKLDNNConcatNode::initOptimalPrimitiveDescriptor() { } // reset undefined offsets - auto outBlockingDesc = config.outConfs[i].desc->as(); - config.outConfs[i].desc = make_unique(outBlockingDesc->getPrecision(), outBlockingDesc->getShape().getStaticDims(), - outBlockingDesc->getBlockDims(), outBlockingDesc->getOrder()); + auto outBlockingDesc = MemoryDescUtils::convertToBlockedDescriptor(*config.outConfs[i].desc); + config.outConfs[i].desc = make_unique(outBlockingDesc.getPrecision(), outBlockingDesc.getShape().getStaticDims(), + outBlockingDesc.getBlockDims(), outBlockingDesc.getOrder()); } - auto firstOutBlockingDesc = config.outConfs[0].desc->as(); + auto firstOutBlockingDesc = MemoryDescUtils::convertToBlockedDescriptor(*config.outConfs[0].desc); size_t offset = 0; for (size_t i = 0; i < config.inConfs.size(); i++) { - auto inpDesc = config.inConfs[i].desc->clone(); - auto inpBlockingDesc = inpDesc->as(); - config.inConfs[i].desc = make_unique(inpBlockingDesc->getPrecision(), - inpBlockingDesc->getShape().getStaticDims(), - inpBlockingDesc->getBlockDims(), - inpBlockingDesc->getOrder(), - firstOutBlockingDesc->getOffsetPadding() + offset, - firstOutBlockingDesc->getOffsetPaddingToData(), - firstOutBlockingDesc->getStrides()); + auto inpBlockingDesc = MemoryDescUtils::convertToBlockedDescriptor(*config.inConfs[i].desc); + config.inConfs[i].desc = make_unique(inpBlockingDesc.getPrecision(), + inpBlockingDesc.getShape().getStaticDims(), + inpBlockingDesc.getBlockDims(), + inpBlockingDesc.getOrder(), + firstOutBlockingDesc.getOffsetPadding() + offset, + firstOutBlockingDesc.getOffsetPaddingToData(), + firstOutBlockingDesc.getStrides()); size_t axisSize = 1; - auto firstInpBlockingDesc = config.inConfs[0].desc->as(); - if (firstInpBlockingDesc->checkGeneralLayout(GeneralLayout::nspc)) { + auto firstInpBlockingDesc = MemoryDescUtils::convertToBlockedDescriptor(*config.inConfs[0].desc); + if (firstInpBlockingDesc.checkGeneralLayout(GeneralLayout::nspc)) { // This is more general and works for any "direct" Layout (such as nchw or nhwc), but it doesn't work for blocked - size_t realAxis = inverseOrder(firstInpBlockingDesc->getOrder(), axis); - for (size_t j = realAxis; j < inpBlockingDesc->getBlockDims().size(); j++) { - size_t jj = firstInpBlockingDesc->getOrder()[j]; - axisSize *= inpBlockingDesc->getBlockDims()[jj]; + size_t realAxis = inverseOrder(firstInpBlockingDesc.getOrder(), axis); + for (size_t j = realAxis; j < inpBlockingDesc.getBlockDims().size(); j++) { + size_t jj = firstInpBlockingDesc.getOrder()[j]; + axisSize *= inpBlockingDesc.getBlockDims()[jj]; } } else { // This works for nchw and nchw8c/nchw16c - for (size_t j = axis; j < inpBlockingDesc->getBlockDims().size(); j++) { - axisSize *= inpBlockingDesc->getBlockDims()[j]; + for (size_t j = axis; j < inpBlockingDesc.getBlockDims().size(); j++) { + axisSize *= inpBlockingDesc.getBlockDims()[j]; } } offset += axisSize; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_conv_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_conv_node.cpp index c15a3316db054b..283d92ff6ead1c 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_conv_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_conv_node.cpp @@ -18,6 +18,7 @@ #include #include #include "common/cpu_convert.h" +#include using namespace mkldnn; using namespace MKLDNNPlugin; @@ -480,13 +481,13 @@ bool MKLDNNConvolutionNode::created() const { void MKLDNNConvolutionNode::createDescriptor(const std::vector& inputDesc, const std::vector& outputDesc) { - auto inDesc = inputDesc[0]->as(); - auto outDesc = outputDesc[0]->as(); + auto inDesc = MemoryDescUtils::convertToMKLDNNMemoryDesc(*inputDesc[0]); + auto outDesc = MemoryDescUtils::convertToMKLDNNMemoryDesc(*outputDesc[0]); - memory::data_type wdt = MKLDNNExtensionUtils::IEPrecisionToDataType(inDesc->getPrecision()); + memory::data_type wdt = MKLDNNExtensionUtils::IEPrecisionToDataType(inDesc.getPrecision()); memory::data_type bdt = memory::data_type::f32; - if (inDesc->getPrecision() == Precision::U8 || inDesc->getPrecision() == Precision::I8) { + if (inDesc.getPrecision() == Precision::U8 || inDesc.getPrecision() == Precision::I8) { wdt = memory::data_type::s8; } @@ -507,14 +508,14 @@ void MKLDNNConvolutionNode::createDescriptor(const std::vector #include #include +#include using namespace mkldnn; using namespace MKLDNNPlugin; @@ -348,11 +349,11 @@ void MKLDNNDeconvolutionNode::createPrimitive() { void MKLDNNDeconvolutionNode::createDescriptor(const std::vector &inputDesc, const std::vector &outputDesc) { - const MKLDNNMemoryDesc* in_candidate = inputDesc[0]->as(); - const MKLDNNMemoryDesc* out_candidate = outputDesc[0]->as();; + const MKLDNNMemoryDesc in_candidate = MemoryDescUtils::convertToMKLDNNMemoryDesc(*inputDesc[0]); + const MKLDNNMemoryDesc out_candidate = MemoryDescUtils::convertToMKLDNNMemoryDesc(*outputDesc[0]); // grouping and autoblicking is not compatible - if ((withGroups && !isDW) && (in_candidate->blocksExtended() || out_candidate->blocksExtended())) + if ((withGroups && !isDW) && (in_candidate.blocksExtended() || out_candidate.blocksExtended())) return; auto convertDims = [] (const std::vector& orig_dims) { @@ -364,25 +365,25 @@ void MKLDNNDeconvolutionNode::createDescriptor(const std::vector deconv_desc; deconv_desc.reset(new deconvolution_forward::desc(prop_kind::forward_inference, mkldnn::algorithm::deconvolution_direct, - *in_candidate, wgh_candidate, *out_candidate, + in_candidate, wgh_candidate, out_candidate, convertDims(stride), convertDims(dilation), convertDims(paddingL), convertDims(paddingR))); descs.emplace_back(deconv_desc); } else { MKLDNNDims weightsDims = MKLDNNDims(weightDims); - mkldnn::memory::desc wgh_candidate(weightsDims, in_candidate->getDataType(), memory::format_tag::any); + mkldnn::memory::desc wgh_candidate(weightsDims, in_candidate.getDataType(), memory::format_tag::any); for (auto alg : {mkldnn::algorithm::convolution_winograd, mkldnn::algorithm::convolution_direct}) { std::shared_ptr conv_desc; conv_desc.reset(new convolution_forward::desc(prop_kind::forward_inference, alg, - *out_candidate, wgh_candidate, *in_candidate, + out_candidate, wgh_candidate, in_candidate, convertDims(stride), convertDims(dilation), convertDims(paddingL), convertDims(paddingR))); std::shared_ptr deconv_desc; - deconv_desc.reset(new convolution_backward_data::desc(alg, *out_candidate, wgh_candidate, - *in_candidate, + deconv_desc.reset(new convolution_backward_data::desc(alg, out_candidate, wgh_candidate, + in_candidate, convertDims(stride), convertDims(dilation), convertDims(paddingL), diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.cpp index 8b4bc82748b53e..a03cbe2d0ba2ae 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.cpp @@ -125,12 +125,11 @@ struct jit_uni_eltwise_generic : public MKLDNNPlugin::jit_uni_eltwise_kernel, pu if (eltwiseNode.getFusedWith()[i].get()->getType() == Eltwise) { post_op_emitters.push_back(create_eltwise_emitter(*eltwiseNode.getFusedWith()[i].get(), exec_prc)); } else if (eltwiseNode.getFusedWith()[i].get()->getType() == FakeQuantize) { - IE_THROW() << "[DS] Unimplemented"; -// auto fakeQuantizeNode = dynamic_cast(eltwiseNode.getFusedWith()[i].get()); -// fakeQuantizeNode->appendPostOps(post_ops); -// -// quantization_injectors.push_back(std::make_shared>( -// this, post_ops.get()->entry_[post_ops.len() - 1], vmm_d_weights, vmm_d_bias, reg_d_weights, reg_d_bias)); + auto fakeQuantizeNode = dynamic_cast(eltwiseNode.getFusedWith()[i].get()); + fakeQuantizeNode->appendPostOps(post_ops); + + quantization_injectors.push_back(std::make_shared>( + this, post_ops.get()->entry_[post_ops.len() - 1], vmm_d_weights, vmm_d_bias, reg_d_weights, reg_d_bias)); } } diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_fake_quantize_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_fake_quantize_node.cpp index a29f10cc689946..79a9dee8d0f9f6 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_fake_quantize_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_fake_quantize_node.cpp @@ -19,6 +19,7 @@ #include "ie_parallel.hpp" #include +#include // Quantization ranges validation is switched off by default in order to avoid regressions on user side // #define VALIDATE_QUANTIZATION_RANGES @@ -1220,13 +1221,13 @@ void MKLDNNFakeQuantizeNode::createPrimitive() { jqp.wei_prc = Precision::FP32; jqp.dst_prc = config.outConfs[0].desc->getPrecision(); - auto srcDesc = config.inConfs[0].desc->as(); - jqp.s_str = srcDesc->getStrides(); + auto srcDesc = getParentEdgeAt(0)->getMemory().GetDescWithType(); + jqp.s_str = srcDesc.getStrides(); - auto dstDesc = config.outConfs[0].desc->as(); - jqp.d_str = dstDesc->getStrides(); + auto dstDesc = getChildEdgeAt(0)->getMemory().GetDescWithType(); + jqp.d_str = dstDesc.getStrides(); - jqp.is_planar = srcDesc->checkGeneralLayout(GeneralLayout::ncsp) && one_of(srcDesc->getShape().getRank(), 3, 4, 5); + jqp.is_planar = srcDesc.checkGeneralLayout(GeneralLayout::ncsp) && one_of(srcDesc.getShape().getRank(), 3, 4, 5); jqp.op_type = getAlgorithm(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_fullyconnected_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_fullyconnected_node.cpp index e00e3b02d994ba..977e1ac4a22031 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_fullyconnected_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_fullyconnected_node.cpp @@ -12,6 +12,7 @@ #include #include #include "utils/general_utils.h" +#include using namespace mkldnn; using namespace MKLDNNPlugin; @@ -283,7 +284,7 @@ void MKLDNNFullyConnectedNode::createDescriptorInternal(const mkldnn::memory::de void MKLDNNFullyConnectedNode::createDescriptor(const std::vector &inputDesc, const std::vector &outputDesc) { - createDescriptorInternal(*inputDesc[0]->as(), *outputDesc[0]->as()); + createDescriptorInternal(MemoryDescUtils::convertToMKLDNNMemoryDesc(*inputDesc[0]), MemoryDescUtils::convertToMKLDNNMemoryDesc(*outputDesc[0])); } std::unique_ptr MKLDNNFullyConnectedNode::getSrcMemDesc(mkldnn::primitive_desc_iterator &primitive_desc_it, size_t idx) { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_lrn_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_lrn_node.cpp index 53c2044d663fa2..c46d195c0e9c0a 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_lrn_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_lrn_node.cpp @@ -6,6 +6,7 @@ #include #include #include +#include using namespace MKLDNNPlugin; using namespace InferenceEngine; @@ -128,7 +129,7 @@ void MKLDNNLrnNode::createDescriptor(const std::vector &input const std::vector &outputDesc) { mkldnn::algorithm alg = isAcrossMaps ? mkldnn::algorithm::lrn_across_channels : mkldnn::algorithm::lrn_within_channel; MKLDNNDescriptor desc(std::shared_ptr( - new mkldnn::lrn_forward::desc(mkldnn::prop_kind::forward_scoring, alg, *inputDesc[0]->as(), + new mkldnn::lrn_forward::desc(mkldnn::prop_kind::forward_scoring, alg, MemoryDescUtils::convertToMKLDNNMemoryDesc(*inputDesc[0]), size, alpha, beta, k))); descs.push_back(desc); } diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pooling_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pooling_node.cpp index 93290429c150df..0b3ccc9f26dc29 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pooling_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_pooling_node.cpp @@ -13,6 +13,7 @@ #include #include #include +#include using namespace mkldnn; using namespace MKLDNNPlugin; @@ -180,8 +181,8 @@ bool MKLDNNPoolingNode::created() const { void MKLDNNPoolingNode::createDescriptor(const std::vector &inputDesc, const std::vector &outputDesc) { - MKLDNNMemoryDesc in_candidate = *inputDesc[0]->as(); - MKLDNNMemoryDesc out_candidate = *outputDesc[0]->as(); + MKLDNNMemoryDesc in_candidate = MemoryDescUtils::convertToMKLDNNMemoryDesc(*inputDesc[0]); + MKLDNNMemoryDesc out_candidate = MemoryDescUtils::convertToMKLDNNMemoryDesc(*outputDesc[0]); mkldnn::algorithm alg; if (algorithm == PoolingAvg) { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_softmax_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_softmax_node.cpp index 1bc131f90fd202..04f96e8e549bf4 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_softmax_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_softmax_node.cpp @@ -7,6 +7,7 @@ #include #include #include +#include using namespace mkldnn; using namespace MKLDNNPlugin; @@ -119,7 +120,7 @@ bool MKLDNNSoftMaxNode::created() const { void MKLDNNSoftMaxNode::createDescriptor(const std::vector &inputDesc, const std::vector &outputDesc) { - MKLDNNMemoryDesc in_candidate = *inputDesc[0]->as(); + MKLDNNMemoryDesc in_candidate = MemoryDescUtils::convertToMKLDNNMemoryDesc(*inputDesc[0]); MKLDNNDescriptor desc(std::shared_ptr( new softmax_forward::desc(prop_kind::forward_scoring, in_candidate, axis))); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_split_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_split_node.cpp index 5696ac2abca23e..bc16d37c5e7c42 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_split_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_split_node.cpp @@ -10,6 +10,7 @@ #include #include #include "utils/general_utils.h" +#include #define THROW_ERROR IE_THROW() << "Split layer with name '" << getName() <<"' " @@ -191,9 +192,9 @@ void MKLDNNSplitNode::initSupportedPrimitiveDescriptors() { config.inConfs[0].desc = make_unique(inpPrecision, srcShape.getStaticDims(), blkDims, order, offset, offsets, strides); for (size_t i = 0; i < outputShapes.size(); i++) { - auto outBlockingDesc = refConfig.outConfs[i].desc->as(); - const auto& outBlkDims = outBlockingDesc->getBlockDims(); - const auto& dims = outBlockingDesc->getShape().getStaticDims(); + auto outBlockingDesc = MemoryDescUtils::convertToBlockedDescriptor(*refConfig.outConfs[i].desc); + const auto& outBlkDims = outBlockingDesc.getBlockDims(); + const auto& dims = outBlockingDesc.getShape().getStaticDims(); config.outConfs[i].inPlace = 0; config.outConfs[i].desc = make_unique(outPrecision, dims, outBlkDims, order, offset, offsets, strides); @@ -358,31 +359,31 @@ void MKLDNNSplitNode::initOptimalPrimitiveDescriptor() { } // reset undefined offsets - auto inBlockingDesc = config.inConfs[i].desc->as(); - config.inConfs[i].desc = make_unique(inBlockingDesc->getPrecision(), - inBlockingDesc->getShape().getStaticDims(), - inBlockingDesc->getBlockDims(), - inBlockingDesc->getOrder()); + auto inBlockingDesc = MemoryDescUtils::convertToBlockedDescriptor(*config.inConfs[i].desc); + config.inConfs[i].desc = make_unique(inBlockingDesc.getPrecision(), + inBlockingDesc.getShape().getStaticDims(), + inBlockingDesc.getBlockDims(), + inBlockingDesc.getOrder()); } if (config.outConfs.size() != outputShapes.size()) THROW_ERROR << "has invalid config"; - auto firstInBlockingDesc = config.inConfs[0].desc->as(); + auto firstInBlockingDesc = MemoryDescUtils::convertToBlockedDescriptor(*config.inConfs[0].desc); size_t offset = 0; for (size_t i = 0; i < outputShapes.size(); i++) { auto outDesc = config.outConfs[i].desc->clone(); - auto outBlockingDesc = outDesc->as(); - config.outConfs[i].desc = make_unique(outBlockingDesc->getPrecision(), - outBlockingDesc->getShape().getStaticDims(), - outBlockingDesc->getBlockDims(), - outBlockingDesc->getOrder(), - firstInBlockingDesc->getOffsetPadding() + offset, - firstInBlockingDesc->getOffsetPaddingToData(), - firstInBlockingDesc->getStrides()); + auto outBlockingDesc = MemoryDescUtils::convertToBlockedDescriptor(*outDesc); + config.outConfs[i].desc = make_unique(outBlockingDesc.getPrecision(), + outBlockingDesc.getShape().getStaticDims(), + outBlockingDesc.getBlockDims(), + outBlockingDesc.getOrder(), + firstInBlockingDesc.getOffsetPadding() + offset, + firstInBlockingDesc.getOffsetPaddingToData(), + firstInBlockingDesc.getStrides()); size_t axisSize = 1; - for (size_t j = axis; j < outBlockingDesc->getBlockDims().size(); j++) { - axisSize *= outBlockingDesc->getBlockDims()[j]; + for (size_t j = axis; j < outBlockingDesc.getBlockDims().size(); j++) { + axisSize *= outBlockingDesc.getBlockDims()[j]; } offset += axisSize; } @@ -397,7 +398,7 @@ void MKLDNNSplitNode::selectOptimalPrimitiveDescriptor() { auto plain = PartialBlkDesc::makePlain(getParentEdgeAt(0)->getShape().getStaticDims()); for (size_t i = 0; i < supportedPrimitiveDescriptors.size(); ++i) { auto& pd = supportedPrimitiveDescriptors[i]; - if (PartialBlkDesc::extractFrom(*pd.getConfig().inConfs[0].desc->as()) == plain && + if (PartialBlkDesc::extractFrom(MemoryDescUtils::convertToBlockedDescriptor(*pd.getConfig().inConfs[0].desc)) == plain && impl_desc_type::ref == pd.getImplementationType()) { selectPrimitiveDescriptorByIndex(static_cast(i)); return; @@ -497,11 +498,11 @@ void MKLDNNSplitNode::prepareOptimizedParams() { auto selectedPrimitiveDescriptor = getSelectedPrimitiveDescriptor(); if (!selectedPrimitiveDescriptor) IE_THROW() << "CPU Split node with name '" << getName() << "' doesn't have primitive descriptors."; - const auto inpTensorDesc = selectedPrimitiveDescriptor->getConfig().inConfs[0].desc->as(); + const auto inpTensorDesc = MemoryDescUtils::convertToBlockedDescriptor(*selectedPrimitiveDescriptor->getConfig().inConfs[0].desc); const auto outputPortsCount = outputShapes.size(); //find axis order position - const auto& order = inpTensorDesc->getOrder(); + const auto& order = inpTensorDesc.getOrder(); unsigned axisOrderPos = std::numeric_limits::max(); for (size_t i = 0; i < order.size(); ++i) { if (order[i] == axis) { @@ -513,8 +514,8 @@ void MKLDNNSplitNode::prepareOptimizedParams() { THROW_ERROR << "Can't find the axis in the input tensor order list"; } - uint8_t srcDataSize = inpTensorDesc->getPrecision().size(); - const auto& srcDims = inpTensorDesc->getBlockDims(); + uint8_t srcDataSize = inpTensorDesc.getPrecision().size(); + const auto& srcDims = inpTensorDesc.getBlockDims(); const auto getRank = srcDims.size(); optimizedParams.countStrides = 1; diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp index 296be4032b4b23..2cbe8b70185844 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp @@ -73,7 +73,28 @@ std::vector disabledTestPatterns() { // TODO: 57562 No dynamic output shape support R"(.*NonZeroLayerTest.*)", // need to implement Export / Import - R"(.*IEClassImportExportTestP.*)" + R"(.*IEClassImportExportTestP.*)", + + + // INVESTIGATE + // R"(.*FakeQuantizeLayerTest.*)", + // R"(.*StaticShapeLoopTest.*)", + // R"(.*TrivialLoopTest.*)", + // R"(.*TransposeLayerTest.*)", + // R"(.*TransposeLayerCPUTest.*)", + // R"(.*FuseTransposeAndReorderTest.*)", + // + // R"(.*TensorIteratorTest.*)", + + // R"(.*FuseScaleShiftAndFakeQuantizeTest.*)", + // R"(.*OnnxModelWithCustomAbs.*)", + // R"(.*XmlModelWithCustomAbs.*)", + // R"(.*Gather_x2_add_mul_relu_concat_matmul.*)", + + // R"(.*SetMean.*)", + // R"(.*SetScale.*)", + // R"(.*smoke_LPT.*)", + // R"(.*GRUSequenceCPUTest.*)", // reorder BF16 + // R"(.*LSTMSequenceCPUTest.*)", // reorder BF16 + // R"(.*RNNSequenceCPUTest.*)" // reorder BF16 }; #ifdef __APPLE__ // TODO: Issue 55717