diff --git a/inference-engine/src/mkldnn_plugin/CMakeLists.txt b/inference-engine/src/mkldnn_plugin/CMakeLists.txt index c2fa2e2ef4bc44..f2bbc52bdc56bb 100644 --- a/inference-engine/src/mkldnn_plugin/CMakeLists.txt +++ b/inference-engine/src/mkldnn_plugin/CMakeLists.txt @@ -16,182 +16,9 @@ if (ENABLE_CPU_DEBUG_CAPS) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DCPU_DEBUG_CAPS") endif() -file(GLOB SOURCES - ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/mkldnn/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/utils/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/utils/rt_info/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/common/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/emitters/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/ngraph_transformations/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/ngraph_transformations/op/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_split_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_reorder_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_input_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_eltwise_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_convert_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_reference_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_conv_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_fake_quantize_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_tensoriterator_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_batch_to_space_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_broadcast_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_tile_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_bucketize_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_concat_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_rnn.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_bin_conv_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_ctc_greedy_decoder_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_ctc_greedy_decoder_seq_len_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_ctc_loss_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_select_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_transpose_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_topk_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_strided_slice_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_space_to_depth_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_shuffle_channels_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_scatter_update_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_roll_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_roi_pooling_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_roi_align_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_psroi_pooling_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_reverse_sequence_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_reshape_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_reorg_yolo_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_region_yolo_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_reduce_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_range_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_pad_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_one_hot_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_normalize_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_non_max_suppression_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_mvn_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_cum_sum_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_deconv_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_memory_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_math_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_lrn_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_log_softmax_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_interpolate_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_experimental_detectron_detection_output_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_experimental_detectron_generate_proposals_single_image_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_experimental_detectron_priorgridgenerator_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_experimental_detectron_roifeatureextractor_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_experimental_detectron_topkrois_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_embedding_bag_offset_sum_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_embedding_bag_packed_sum_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_embedding_bag_sum_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_embedding_segments_sum_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_space_to_batch_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_grn_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_gather_tree_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_gather_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_gather_nd_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_gather_elements_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_extract_image_patches_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_dft_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_detection_output_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_depth_to_space_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_proposal_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_softmax_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_fullyconnected_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_matmul_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_pooling_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_def_conv_node.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/proposal_imp.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/list.cpp -) - -file(GLOB HEADERS - ${CMAKE_CURRENT_SOURCE_DIR}/*.h - ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp - ${CMAKE_CURRENT_SOURCE_DIR}/mkldnn/*.h - ${CMAKE_CURRENT_SOURCE_DIR}/mkldnn/*.hpp - ${CMAKE_CURRENT_SOURCE_DIR}/utils/*.h - ${CMAKE_CURRENT_SOURCE_DIR}/utils/rt_info/*.hpp -# ${CMAKE_CURRENT_SOURCE_DIR}/nodes/*.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_split_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_reorder_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_input_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_eltwise_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_convert_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_reference_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_conv_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_fake_quantize_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_tensoriterator_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_batch_to_space_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_concat_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_rnn.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_bin_conv_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_broadcast_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_tile_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_bucketize_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_ctc_greedy_decoder_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_ctc_greedy_decoder_seq_len_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_ctc_loss_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_select_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_transpose_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_topk_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_strided_slice_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_space_to_depth_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_shuffle_channels_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_scatter_update_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_roll_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_roi_pooling_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_roi_align_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_psroi_pooling_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_reverse_sequence_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_reshape_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_reorg_yolo_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_region_yolo_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_reduce_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_range_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_pad_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_one_hot_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_normalize_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_non_max_suppression_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_mvn_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_cum_sum_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_deconv_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_memory_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_math_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_lrn_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_log_softmax_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_interpolate_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_experimental_detectron_detection_output_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_experimental_detectron_generate_proposals_single_image_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_experimental_detectron_priorgridgenerator_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_experimental_detectron_roifeatureextractor_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_experimental_detectron_topkrois_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_embedding_bag_offset_sum_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_embedding_bag_packed_sum_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_embedding_bag_sum_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_embedding_segments_sum_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_space_to_batch_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_grn_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_gather_tree_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_gather_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_gather_nd_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_gather_elements_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_extract_image_patches_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_dft_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_detection_output_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_depth_to_space_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_proposal_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_softmax_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_fullyconnected_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_matmul_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_pooling_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/mkldnn_def_conv_node.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/proposal_imp.hpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/base.hpp - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/common/*.h - ${CMAKE_CURRENT_SOURCE_DIR}/nodes/common/*.hpp -) - -#file(GLOB_RECURSE SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) -#file(GLOB_RECURSE HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.h -# ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp) +file(GLOB_RECURSE SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) +file(GLOB_RECURSE HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.h + ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp) addVersionDefines(mkldnn_plugin.cpp CI_BUILD_NUMBER) diff --git a/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.cpp b/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.cpp index 96a454dd9c373e..8cf978b7d51d01 100644 --- a/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.cpp +++ b/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.cpp @@ -6,9 +6,11 @@ #include "cpu_memory_desc_utils.h" #include "mkldnn_memory.h" #include "utils/general_utils.h" +#include "utils/cpu_utils.hpp" #include #include #include +#include using namespace mkldnn; using namespace MKLDNNPlugin; @@ -380,4 +382,13 @@ MemoryDescPtr MemoryDescUtils::resetOffset(const MemoryDesc* desc) { return desc->clone(); } +InferenceEngine::Blob::Ptr MemoryDescUtils::interpretAsBlob(const MKLDNNMemory &mem) { + // TODO [DS]: Rewrite when IE is moved to the new TensorDescriptor + auto& memDesc = mem.GetDesc(); + InferenceEngine::TensorDesc desc = convertToTensorDesc(memDesc); + + desc = InferenceEngine::TensorDesc(desc.getPrecision(), memDesc.getShape().getStaticDims(), desc.getBlockingDesc()); + return MKLDNNPlugin::isEmptyTensorDesc(desc) ? make_blob_with_precision(desc) : make_blob_with_precision(desc, mem.GetData()); +} + } // namespace MKLDNNPlugin diff --git a/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.h b/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.h index ed5ade8a3e4b60..bb938099d15254 100644 --- a/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.h +++ b/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.h @@ -14,6 +14,7 @@ namespace MKLDNNPlugin { class MKLDNNMemoryDesc; class BlockedMemoryDesc; +class MKLDNNMemory; class MemoryDescUtils { public: @@ -26,6 +27,7 @@ class MemoryDescUtils { static MemoryDescPtr applyUndefinedOffset(const MKLDNNMemoryDesc& desc); static MemoryDescPtr applyUndefinedOffset(const BlockedMemoryDesc& desc); static MemoryDescPtr resetOffset(const MemoryDesc* desc); + static InferenceEngine::Blob::Ptr interpretAsBlob(const MKLDNNMemory& mem); }; } // namespace MKLDNNPlugin diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp index 19fd0ed94f7dc8..0596182454c96c 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp @@ -947,13 +947,13 @@ Config MKLDNNGraph::getProperty() const { void MKLDNNGraph::getInputBlobs(InferenceEngine::BlobMap &resp) { for (auto &it : inputNodesMap) { - resp[it.first] = convertMemToBlob(it.second->getChildEdgeAt(0)->getMemory()); + resp[it.first] = MemoryDescUtils::interpretAsBlob(it.second->getChildEdgeAt(0)->getMemory()); } } void MKLDNNGraph::getOutputBlobs(InferenceEngine::BlobMap &resp) { for (auto &it : outputNodesMap) { - resp[it.first] = convertMemToBlob(it.second->getParentEdgeAt(0)->getMemory()); + resp[it.first] = MemoryDescUtils::interpretAsBlob(it.second->getParentEdgeAt(0)->getMemory()); } } @@ -1188,12 +1188,3 @@ void MKLDNNGraph::EnforceBF16() { InferenceEngine::CNNNetwork MKLDNNGraph::dump() const { return dump_graph_as_ie_ngraph_net(*this); } - -InferenceEngine::Blob::Ptr MKLDNNGraph::convertMemToBlob(const MKLDNNMemory &mem) const { - // TODO [DS]: Rewrite when IE is moved to the new TensorDescriptor - auto& memDesc = mem.GetDesc(); - InferenceEngine::TensorDesc desc = MemoryDescUtils::convertToTensorDesc(memDesc); - - desc = InferenceEngine::TensorDesc(desc.getPrecision(), memDesc.getShape().getStaticDims(), desc.getBlockingDesc()); - return isEmptyTensorDesc(desc) ? make_blob_with_precision(desc) : make_blob_with_precision(desc, mem.GetData()); -} diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_graph.h b/inference-engine/src/mkldnn_plugin/mkldnn_graph.h index 48443a25cc197f..7eb028c3c217bb 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_graph.h +++ b/inference-engine/src/mkldnn_plugin/mkldnn_graph.h @@ -226,7 +226,6 @@ class MKLDNNGraph { private: void EnforceBF16(); - InferenceEngine::Blob::Ptr convertMemToBlob(const MKLDNNMemory& mem) const; }; } // namespace MKLDNNPlugin diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp index 41e0efcc5d978c..5da413d4db50e2 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp @@ -1190,20 +1190,19 @@ MKLDNNNode* MKLDNNNode::NodesFactory::create(const std::shared_ptr const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache) { MKLDNNNode *newNode = nullptr; std::string errorMessage; - // TODO [DS]: uncomment -// try { -// std::unique_ptr ol(createNodeIfRegistered(MKLDNNPlugin, Generic, op, eng, w_cache)); -// if (ol != nullptr && ol->created(extMgr)) -// newNode = ol.release(); -// } catch (const InferenceEngine::Exception& ex) { -// IE_SUPPRESS_DEPRECATED_START -// if (ex.getStatus() != NOT_IMPLEMENTED) { -// throw; -// } else { -// errorMessage += getExceptionDescWithoutStatus(ex); -// } -// IE_SUPPRESS_DEPRECATED_END -// } + try { + std::unique_ptr ol(createNodeIfRegistered(MKLDNNPlugin, Generic, op, eng, w_cache)); + if (ol != nullptr && ol->created(extMgr)) + newNode = ol.release(); + } catch (const InferenceEngine::Exception& ex) { + IE_SUPPRESS_DEPRECATED_START + if (ex.getStatus() != NOT_IMPLEMENTED) { + throw; + } else { + errorMessage += getExceptionDescWithoutStatus(ex); + } + IE_SUPPRESS_DEPRECATED_END + } if (newNode == nullptr) { try { diff --git a/inference-engine/src/mkldnn_plugin/nodes/base.hpp b/inference-engine/src/mkldnn_plugin/nodes/base.hpp deleted file mode 100644 index 33f3f411cef800..00000000000000 --- a/inference-engine/src/mkldnn_plugin/nodes/base.hpp +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include "nodes/list.hpp" -#include "common/blocked_desc_creator.h" -#include "ngraph/descriptor/tensor.hpp" -#include -#include "cpu_types.h" - -#include -#include - -namespace InferenceEngine { -namespace Extensions { -namespace Cpu { - -class ExtLayerBase: public ILayerExecImpl { -public: - StatusCode getSupportedConfigurations(std::vector& conf, ResponseDesc *resp) noexcept override { - if (!errorMsg.empty()) { - if (resp) { - errorMsg.copy(resp->msg, sizeof(resp->msg) - 1); - } - return GENERAL_ERROR; - } - conf = confs; - return OK; - } - - StatusCode init(LayerConfig& config, ResponseDesc *resp) noexcept override { - for (auto& input : config.inConfs) { - for (auto& offset : input.desc.getBlockingDesc().getOffsetPaddingToData()) { - if (offset) { - return GENERAL_ERROR; - } - } - if (input.desc.getBlockingDesc().getOffsetPadding()) { - return GENERAL_ERROR; - } - } - for (auto& output : config.outConfs) { - for (auto& offset : output.desc.getBlockingDesc().getOffsetPaddingToData()) { - if (offset) { - return GENERAL_ERROR; - } - } - if (output.desc.getBlockingDesc().getOffsetPadding()) { - return GENERAL_ERROR; - } - } - return OK; - } - -protected: - MKLDNNPlugin::Algorithm getAlgorithm() const { - return algorithm; - } - MKLDNNPlugin::Algorithm algorithm; - - class DataConfigurator { - public: - DataConfigurator(MKLDNNPlugin::TensorDescCreatorTypes tensorDescType, Precision prc = Precision::UNSPECIFIED, bool constant = false, int inplace = -1) : - tensorDescCreator(getTensorDescCreator(tensorDescType)), prc(prc), constant(constant), inplace(inplace) {} - - DataConfigurator(const MKLDNNPlugin::BlockedDescCreator::CreatorConstPtr& tensorDescCreator, Precision prc = Precision::UNSPECIFIED, - bool constant = false, int inplace = -1) : tensorDescCreator(tensorDescCreator), prc(prc), constant(constant), inplace(inplace) {} - - const MKLDNNPlugin::BlockedDescCreator::CreatorConstPtr tensorDescCreator; - const bool constant = false; - const int inplace = -1; - const Precision prc = Precision::UNSPECIFIED; // By default ngraph node precision is used - private: - static MKLDNNPlugin::BlockedDescCreator::CreatorConstPtr getTensorDescCreator(MKLDNNPlugin::TensorDescCreatorTypes tensorDescType) { - auto& creators = MKLDNNPlugin::BlockedDescCreator::getCommonCreators(); - if (creators.find(tensorDescType) == creators.end()) { - IE_THROW() << "Cannot find tensor descriptor creator"; - } - return creators.at(tensorDescType); - } - }; - - void addConfig(const std::shared_ptr& op, - const std::vector& inDataConfigurators, - const std::vector& outDataConfigurators, - bool dynBatchSupport = false) { - LayerConfig config; - - if (inDataConfigurators.size() != op->get_input_size()) - IE_THROW() << "Cannot add config for operation " << op->get_friendly_name() << ". Incorrect number of inputs: " << - "expected: " << op->get_input_size() << ", provided: " << inDataConfigurators.size(); - if (outDataConfigurators.size() != op->get_output_size()) - IE_THROW() << "Cannot add config for operation " << op->get_friendly_name() << ". Incorrect number of outputs: " << - "expected: " << op->get_output_size() << ", provided: " << outDataConfigurators.size(); - - auto fill_port = [] (const DataConfigurator& dataConfigurator, const ngraph::descriptor::Tensor& tensor, std::vector& port) -> bool { - // In order to simplify particular node initialization logic we just don't add config in case target shape is not supported by tensorDescCreator. - // This should be suitable for major of scenarios since almost all nodes add `ncsp` tensorDescCreator which supports any shape rank. - if (tensor.get_shape().size() < dataConfigurator.tensorDescCreator->getMinimalRank()) - return false; - - auto precision = dataConfigurator.prc != Precision::UNSPECIFIED ? dataConfigurator.prc : details::convertPrecision(tensor.get_element_type()); - - DataConfig dataConfig; - dataConfig.inPlace = dataConfigurator.inplace; - dataConfig.constant = dataConfigurator.constant; - dataConfig.desc = dataConfigurator.tensorDescCreator->createDesc(precision, tensor.get_shape()); - - port.push_back(dataConfig); - - return true; - }; - - for (size_t i = 0; i < inDataConfigurators.size(); i++) - if (!fill_port(inDataConfigurators[i], op->get_input_tensor(i), config.inConfs)) - return; - - for (size_t i = 0; i < outDataConfigurators.size(); i++) - if (!fill_port(outDataConfigurators[i], op->get_output_tensor(i), config.outConfs)) - return; - - config.dynBatchSupport = dynBatchSupport; - confs.push_back(config); - } - - std::string errorMsg; - std::vector confs; -}; - -template -class ImplFactory : public ILayerImplFactory { -public: - explicit ImplFactory(const std::shared_ptr& op) : ngraphOp(op) {} - - // First implementation has more priority than next - StatusCode getImplementations(std::vector& impls, ResponseDesc *resp) noexcept override { - try { - impls.push_back(ILayerImpl::Ptr(new IMPL(ngraphOp))); - } catch (const InferenceEngine::Exception& ex) { - strncpy(resp->msg, ex.what(), sizeof(resp->msg) - 1); - IE_SUPPRESS_DEPRECATED_START - return ex.getStatus() != OK ? ex.getStatus() : GENERAL_ERROR; - IE_SUPPRESS_DEPRECATED_END - } - return OK; - } -protected: - const std::shared_ptr ngraphOp; -}; - -#define REG_FACTORY_FOR(__prim, __type) \ - void __prim ## __type(MKLDNNExtensions * extInstance) { \ - using namespace MKLDNNPlugin; \ - extInstance->layersFactory.registerNodeIfRequired(MKLDNNPlugin, __type, OV_PP_TOSTRING(__type), ImplFactory<__prim>); \ - } - -} // namespace Cpu -} // namespace Extensions -} // namespace InferenceEngine diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_generic_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_generic_node.cpp index 86f89ccea7c480..ef87345daae9a1 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_generic_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_generic_node.cpp @@ -8,6 +8,7 @@ #include #include #include +#include "cpu_memory_desc_utils.h" using namespace mkldnn; using namespace MKLDNNPlugin; @@ -22,6 +23,42 @@ void MKLDNNGenericNode::getSupportedDescriptors() { } } +NodeConfig MKLDNNGenericNode::convertLayerToNodeConfig(const InferenceEngine::LayerConfig &layerConfig) { + NodeConfig config; + config.dynBatchSupport = layerConfig.dynBatchSupport; + config.inConfs.resize(layerConfig.inConfs.size()); + for (size_t i = 0; i < layerConfig.inConfs.size(); i++) { + config.inConfs[i].inPlace = layerConfig.inConfs[i].inPlace; + config.inConfs[i].constant = layerConfig.inConfs[i].constant; + config.inConfs[i].desc = MemoryDescUtils::convertToMKLDNNMemoryDesc(layerConfig.inConfs[i].desc).clone(); + } + config.outConfs.resize(layerConfig.outConfs.size()); + for (size_t i = 0; i < layerConfig.outConfs.size(); i++) { + config.outConfs[i].inPlace = layerConfig.outConfs[i].inPlace; + config.outConfs[i].constant = layerConfig.outConfs[i].constant; + config.outConfs[i].desc = MemoryDescUtils::convertToMKLDNNMemoryDesc(layerConfig.outConfs[i].desc).clone(); + } + return config; +} + +InferenceEngine::LayerConfig MKLDNNGenericNode::convertNodeToLayerConfig(const NodeConfig &nodeConfig) { + InferenceEngine::LayerConfig config; + config.dynBatchSupport = nodeConfig.dynBatchSupport; + config.inConfs.resize(nodeConfig.inConfs.size()); + for (size_t i = 0; i < nodeConfig.inConfs.size(); i++) { + config.inConfs[i].inPlace = nodeConfig.inConfs[i].inPlace; + config.inConfs[i].constant = nodeConfig.inConfs[i].constant; + config.inConfs[i].desc = MemoryDescUtils::convertToTensorDesc(*nodeConfig.inConfs[i].desc); + } + config.outConfs.resize(nodeConfig.outConfs.size()); + for (size_t i = 0; i < nodeConfig.outConfs.size(); i++) { + config.outConfs[i].inPlace = nodeConfig.outConfs[i].inPlace; + config.outConfs[i].constant = nodeConfig.outConfs[i].constant; + config.outConfs[i].desc = MemoryDescUtils::convertToTensorDesc(*nodeConfig.outConfs[i].desc); + } + return config; +} + void MKLDNNGenericNode::initSupportedPrimitiveDescriptors() { if (!supportedPrimitiveDescriptors.empty()) return; @@ -35,7 +72,7 @@ void MKLDNNGenericNode::initSupportedPrimitiveDescriptors() { } for (auto& config : configs) { - supportedPrimitiveDescriptors.emplace_back(config, impl_desc_type::unknown); + supportedPrimitiveDescriptors.emplace_back(convertLayerToNodeConfig(config), impl_desc_type::unknown); } } if (impls.empty()) { @@ -109,9 +146,9 @@ void MKLDNNGenericNode::execLayer() { std::vector inputs; std::vector constInputs; std::vector inputDescs; - std::vector outputShapes; + std::vector execOutputShapes; for (size_t i = 0; i < getParentEdges().size(); i++) { - auto inputBlob = getParentEdgeAt(i)->getBlob(); + auto inputBlob = MemoryDescUtils::interpretAsBlob(getParentEdgeAt(i)->getMemory()); inputs.push_back(inputBlob); constInputs.push_back(inputBlob); if (isDynBatch && dynBatchLim >= inputs[inputs.size() - 1]->getTensorDesc().getDims()[0]) { @@ -137,14 +174,14 @@ void MKLDNNGenericNode::execLayer() { } } std::vector outputs; - for (size_t i = 0; i < outDims.size(); i++) { + for (size_t i = 0; i < outputShapes.size(); i++) { if (isDynBatch) { auto out_edge = getChildEdgesAtPort(i)[0]; - auto td = out_edge->getBlob()->getTensorDesc(); - td.setDims(outputShapes[i]); + auto td = MemoryDescUtils::convertToTensorDesc(out_edge->getMemory().GetDesc()); + td.setDims(execOutputShapes[i]); outputs.push_back(make_blob_with_precision(td, out_edge->getMemory().GetData())); } else { - outputs.push_back(getChildEdgesAtPort(i)[0]->getBlob()); + outputs.push_back(MemoryDescUtils::interpretAsBlob(getChildEdgesAtPort(i)[0]->getMemory())); } } InferenceEngine::ResponseDesc resp; @@ -154,8 +191,8 @@ void MKLDNNGenericNode::execLayer() { } } -void MKLDNNGenericNode::initDescriptor(const InferenceEngine::LayerConfig &config) { - InferenceEngine::LayerConfig rightConfig = config; +void MKLDNNGenericNode::initDescriptor(const NodeConfig &config) { + NodeConfig rightConfig = config; InferenceEngine::StatusCode rc; InferenceEngine::ResponseDesc resp; @@ -190,14 +227,15 @@ void MKLDNNGenericNode::initDescriptor(const InferenceEngine::LayerConfig &confi impls.clear(); impls.emplace_back(selectedImpl); - rc = impls[0]->init(rightConfig, &resp); + auto ieConfig = convertNodeToLayerConfig(rightConfig); + rc = impls[0]->init(ieConfig, &resp); if (rc != InferenceEngine::OK) { IE_THROW() << resp.msg; } - + rightConfig = convertLayerToNodeConfig(ieConfig); auto descriptor = getSelectedPrimitiveDescriptor(); if (descriptor != nullptr) { - descriptor->getConfig() = rightConfig; + descriptor->setConfig(rightConfig); } bool isConst = !rightConfig.inConfs.empty() || !rightConfig.outConfs.empty(); for (const auto &inConf : rightConfig.inConfs) { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_generic_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_generic_node.h index ef83c0b333974d..63d0d5e20f059a 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_generic_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_generic_node.h @@ -35,6 +35,9 @@ class MKLDNNGenericNode : public MKLDNNNode { void cleanup() override; protected: + NodeConfig convertLayerToNodeConfig(const InferenceEngine::LayerConfig &layerConfig); + InferenceEngine::LayerConfig convertNodeToLayerConfig(const NodeConfig &nodeConfig); + InferenceEngine::ILayerImplFactory::Ptr extFactory; std::vector impls; diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp index afe4f5f2d39079..296be4032b4b23 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/skip_tests_config.cpp @@ -73,12 +73,7 @@ std::vector disabledTestPatterns() { // TODO: 57562 No dynamic output shape support R"(.*NonZeroLayerTest.*)", // need to implement Export / Import - R"(.*IEClassImportExportTestP.*)", - - - // INVESTIGATE - R"(.*OnnxModelWithCustomAbs.*)", - R"(.*XmlModelWithCustomAbs.*)", + R"(.*IEClassImportExportTestP.*)" }; #ifdef __APPLE__ // TODO: Issue 55717