From 6726175b916528fd6f9bc3d0d9d4cd3a0da1d09a Mon Sep 17 00:00:00 2001 From: Maxim Andronov Date: Wed, 4 Aug 2021 19:29:49 +0300 Subject: [PATCH] Input, Reference nodes enabled + memory allocation with undef bound (#19) * Input and Reference node enabled * upper bound allocation enabled * fixes after first review * fixes after second review --- .../mkldnn_plugin/cpu_memory_desc_utils.cpp | 10 ++ .../src/mkldnn_plugin/cpu_memory_desc_utils.h | 9 +- .../src/mkldnn_plugin/cpu_shape.h | 9 ++ .../src/mkldnn_plugin/mkldnn_edge.h | 4 + .../src/mkldnn_plugin/mkldnn_graph.cpp | 30 ++-- .../src/mkldnn_plugin/mkldnn_graph.h | 18 ++- .../mkldnn_plugin/mkldnn_infer_request.cpp | 136 +++++++++++++----- .../src/mkldnn_plugin/mkldnn_infer_request.h | 4 + .../src/mkldnn_plugin/mkldnn_memory.h | 4 + .../src/mkldnn_plugin/mkldnn_node.cpp | 47 ++++-- .../src/mkldnn_plugin/mkldnn_node.h | 34 +++++ .../nodes/mkldnn_broadcast_node.cpp | 6 +- .../nodes/mkldnn_eltwise_node.cpp | 24 +++- .../mkldnn_plugin/nodes/mkldnn_eltwise_node.h | 2 + .../nodes/mkldnn_fake_quantize_node.cpp | 6 +- .../mkldnn_plugin/nodes/mkldnn_input_node.cpp | 2 +- .../mkldnn_plugin/nodes/mkldnn_input_node.h | 3 + .../nodes/mkldnn_multiclass_nms.cpp | 4 +- .../nodes/mkldnn_reference_node.cpp | 37 ++++- .../nodes/mkldnn_reference_node.h | 4 + .../src/mkldnn_plugin/utils/general_utils.h | 8 ++ .../behavior/infer_requset_dynamic.cpp | 29 ++++ .../behavior/infer_request_dynamic.hpp | 23 +-- 23 files changed, 371 insertions(+), 82 deletions(-) create mode 100644 inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_requset_dynamic.cpp diff --git a/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.cpp b/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.cpp index 42264fa49f126e..9eaa1654466f97 100644 --- a/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.cpp +++ b/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.cpp @@ -263,6 +263,16 @@ MemoryDescPtr MemoryDescUtils::resetOffset(const MemoryDesc* desc) { } } +InferenceEngine::Blob::Ptr MemoryDescUtils::createBlob(const MemoryDesc &memDesc) { + // TODO [DS]: Rewrite when IE is moved to the new TensorDescriptor + InferenceEngine::TensorDesc desc = convertToTensorDesc(memDesc); + + desc = InferenceEngine::TensorDesc(desc.getPrecision(), memDesc.getShape().getStaticDims(), desc.getBlockingDesc()); + InferenceEngine::Blob::Ptr blob = make_blob_with_precision(desc); + blob->allocate(); + return blob; +} + InferenceEngine::Blob::Ptr MemoryDescUtils::interpretAsBlob(const MKLDNNMemory &mem) { // TODO [DS]: Rewrite when IE is moved to the new TensorDescriptor auto& memDesc = mem.GetDesc(); diff --git a/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.h b/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.h index 5cc6b0fc1038c7..536448ce74b1fa 100644 --- a/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.h +++ b/inference-engine/src/mkldnn_plugin/cpu_memory_desc_utils.h @@ -78,7 +78,14 @@ class MemoryDescUtils { static MemoryDescPtr resetOffset(const MemoryDesc* desc); /** - * @brief Creates InferenceEngine::Blob from MKLDNNMemory + * @brief Creates InferenceEngine::Blob from MemoryDesc + * @param desc MemoryDesc from which will be created InferenceEngine::Blob + * @return pointer to InferenceEngine::Blob + */ + static InferenceEngine::Blob::Ptr createBlob(const MemoryDesc& memDesc); + + /** + * @brief Creates InferenceEngine::Blob from MKLDNNMemory with the memory reuse * @param desc MKLDNNMemory from which will be created InferenceEngine::Blob * @return pointer to InferenceEngine::Blob */ diff --git a/inference-engine/src/mkldnn_plugin/cpu_shape.h b/inference-engine/src/mkldnn_plugin/cpu_shape.h index 2f4018dbc98fdf..9e0a00b4d26026 100644 --- a/inference-engine/src/mkldnn_plugin/cpu_shape.h +++ b/inference-engine/src/mkldnn_plugin/cpu_shape.h @@ -97,10 +97,15 @@ class Shape { const std::vector& getDims() const { return dims; } + bool isStatic() const { return type == ShapeType::Static; } + bool isDynamic() const { + return type == ShapeType::Dynamic; + } + size_t getRank() const { return minDims.size(); } @@ -143,6 +148,10 @@ class Shape { return !(*this == rhs); } + bool hasDefinedUpperBounds() const { + return std::all_of(maxDims.begin(), maxDims.end(), [](size_t dim){ return dim != UNDEFINED_DIM; }); + } + enum : size_t { UNDEFINED_DIM = 0xffffffffffffffff }; diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_edge.h b/inference-engine/src/mkldnn_plugin/mkldnn_edge.h index 5e6f4d23542f9f..d2c2f6c6233cd9 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_edge.h +++ b/inference-engine/src/mkldnn_plugin/mkldnn_edge.h @@ -68,6 +68,10 @@ class MKLDNNEdge { MKLDNNEdgePtr getSharedEdge() const; MKLDNNEdgePtr getSharedEdge(std::nothrow_t) const; + bool canProvideMaxSize() { + return getDesc().getMaxMemSize() != MemoryDesc::UNDEFINED_SIZE; + } + private: std::string name() const; diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp index 600355812d5722..b8f4c3259ed459 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp @@ -522,6 +522,9 @@ static edge_clusters_t findEdgeClusters(const std::vector & graph edge_cluster_idx_map_t edge_cluster_indices; for (auto &edge : graphEdges) { + if (!edge->canProvideMaxSize()) + continue; + auto edge_it = edge_cluster_indices.find(edge); if (edge_it != edge_cluster_indices.end()) @@ -603,7 +606,6 @@ void MKLDNNGraph::AllocateWithReuse() { int e_finish = edge->getChild()->execIndex; int64_t e_size = edge->getDesc().getMaxMemSize(); // size in bytes (from the beginning of data to the last element) - //TODO [DS]: phase 2: remove this restriction if (e_size == MemoryDesc::UNDEFINED_SIZE) { IE_THROW() << "Can not allocate memory since the size is undefined."; } @@ -683,6 +685,9 @@ void MKLDNNGraph::Allocate() { // Resolve all other edges with status NotAllocated or in-place for (auto& node : graphNodes) node->resolveNotAllocatedEdges(); + // Create dummy memory with undefined desc + for (auto& edge : graphEdges) edge->allocate(); + // Check all getters. Should work. for (auto& edge : graphEdges) edge->validate(); } @@ -738,7 +743,7 @@ void MKLDNNGraph::PullOutputData(BlobMap &out) { // TODO [DS]: phase 2: remove this blob allocation when possible, i.e. when dynamic ie blob representation becomes available if (out.find(name) == out.end()) { - out[name] = MemoryDescUtils::interpretAsBlob(intr_blob); + out[name] = MemoryDescUtils::createBlob(intr_blob.GetDesc()); } // TODO [DS]: is it sill true for the new paradigm? @@ -750,7 +755,7 @@ void MKLDNNGraph::PullOutputData(BlobMap &out) { // TODO [DS]: phase 2: rewrite when dynamic ie blob representation becomes available // IE_THROW() << "Output blob number of elements is not equal network output number of elements (" // << ext_blob->size() << "!=" << intr_blob.GetElementsCount() << ")."; - out[name] = MemoryDescUtils::interpretAsBlob(intr_blob); + out[name] = MemoryDescUtils::createBlob(intr_blob.GetDesc()); } auto ext_blob = out.at(name); @@ -769,14 +774,19 @@ void MKLDNNGraph::PullOutputData(BlobMap &out) { if (ext_blob_ptr == intr_blob_ptr) continue; int MB = intr_blob.GetDims()[0]; - int MB_to_process = node->batchToProcess(); + int MB_to_process = MB; // TODO: Should we support InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_LIMIT??? // TODO [DS]: phase 2: should we support this behaviour? Looks obsolete in the dynamic shapes paradigm - if (config.batchLimit) - MB_to_process = std::min(config.batchLimit, MB_to_process); + if (config.batchLimit) { + if (node->isDynamicNode()) { + IE_THROW(NotImplemented) << "[DS] not implemented dynamic batch for node with dynamic shape"; + } + MB_to_process = node->batchToProcess(); + } + size_t size_to_copy = intr_blob.GetElementsCount() * MB_to_process / MB; - const auto actualDesc = MemoryDescUtils::convertToTensorDesc(node->getParentEdgeAt(0)->getDesc()); + const auto actualDesc = MemoryDescUtils::convertToTensorDesc(node->getParentEdgeAt(0)->getMemory().GetDesc()); const auto expectedDesc = ext_blob->getTensorDesc(); // TODO [NM]: need to create universal reorder which will be detect cases when we really need to use it @@ -829,7 +839,11 @@ void MKLDNNGraph::Infer(MKLDNNInferRequest* request, int batch) { ENABLE_CPU_DEBUG_CAP(nd.dumpInputBlobs(node)); OV_ITT_SCOPED_TASK(itt::domains::MKLDNNPlugin, node->profiling.execute); - node->execute(stream); + if (node->isDynamicNode()) { + node->executeDynamic(stream); + } else { + node->execute(stream); + } ENABLE_CPU_DEBUG_CAP(nd.dumpOutputBlobs(node)); } diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_graph.h b/inference-engine/src/mkldnn_plugin/mkldnn_graph.h index 741880aa548379..5a51b07014278c 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_graph.h +++ b/inference-engine/src/mkldnn_plugin/mkldnn_graph.h @@ -86,6 +86,20 @@ class MKLDNNGraph { return outputNodesMap; } + MKLDNNNodePtr GetInputNodeByName(const std::string &name) { + auto input = inputNodesMap.find(name); + if (input == inputNodesMap.end()) + IE_THROW() << "CPU execution graph doesn't contain input node with name: " << name; + return input->second; + } + + MKLDNNNodePtr GetOutputNodeByName(const std::string &name) { + auto output = outputNodesMap.find(name); + if (output == outputNodesMap.end()) + IE_THROW() << "CPU execution graph doesn't contain output node with name: " << name; + return output->second; + } + bool hasInputWithName(const std::string& name) const { return inputNodesMap.count(name); } @@ -197,8 +211,6 @@ class MKLDNNGraph { MKLDNNMemoryPtr memWorkspace; - std::map inputNodesMap; - std::map outputNodesMap; std::vector graphNodes; std::vector graphEdges; @@ -227,6 +239,8 @@ class MKLDNNGraph { friend std::shared_ptr dump_graph_as_ie_ngraph_net(const MKLDNNGraph &graph); private: + std::map inputNodesMap; + std::map outputNodesMap; // these node pointers (from graphNodes) are to avoid regular checking for // constant node in ExecuteConstantNodesOnly and Infer methods std::vector constantGraphNodes; diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp index 91c5c569b7234f..03190828848fb3 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp @@ -176,6 +176,7 @@ void MKLDNNPlugin::MKLDNNInferRequest::InferImpl() { ThrowIfCanceled(); + // TODO [DS]: rewrite for dynamic shape execDataPreprocessing(_inputs); changeDefaultPtr(); @@ -207,6 +208,35 @@ std::map MKLDNNPlugin: return perfMap; } +void MKLDNNPlugin::MKLDNNInferRequest::createInputBlob(const std::string &name) { + MKLDNNNodeConstPtr inputNode = graph->GetInputNodeByName(name); + + if (inputNode->isDynamicNode() && !m_realShapes.count(name)) { + IE_THROW() << "Cannot create blob " << name << " with dynamic shapes"; + } + + InferenceEngine::TensorDesc origDesc = MemoryDescUtils::convertToTensorDesc(inputNode->getChildEdgesAtPort(0)[0]->getMemory().GetDesc()); + InferenceEngine::TensorDesc desc = origDesc; + + if (_networkInputs.find(name) != _networkInputs.end()) { + InferenceEngine::Layout l = _networkInputs[name]->getLayout(); + InferenceEngine::Precision p = _networkInputs[name]->getPrecision(); + InferenceEngine::SizeVector dims = inputNode->isDynamicNode() ? m_realShapes.at(name) : _networkInputs[name]->getTensorDesc().getDims(); + + desc = InferenceEngine::TensorDesc(p, dims, l); + } + + _inputs[name] = make_blob_with_precision(desc); + _inputs[name]->allocate(); + + // TODO [DS]: enable inplace for dynamic input/output + if (!inputNode->isDynamicNode() && + origDesc == desc && + graph->_normalizePreprocMap.find(name) == graph->_normalizePreprocMap.end() && !graph->getProperty().batchLimit) { + externalPtr[name] = _inputs[name]->buffer(); + } +} + InferenceEngine::Blob::Ptr MKLDNNPlugin::MKLDNNInferRequest::GetBlob(const std::string& name) { OV_ITT_SCOPED_TASK(itt::domains::MKLDNNPlugin, "GetBlob"); @@ -224,26 +254,16 @@ InferenceEngine::Blob::Ptr MKLDNNPlugin::MKLDNNInferRequest::GetBlob(const std:: } if (_inputs.find(name) == _inputs.end()) { - auto pBlob = graph->getInputBlob(name); - if (!pBlob) { - IE_THROW() << "MKLDNN graph doesn't contain input node with name: " << name; - } - - InferenceEngine::TensorDesc desc = pBlob->getTensorDesc(); - - if (_networkInputs.find(name) != _networkInputs.end()) { - InferenceEngine::Layout l = _networkInputs[name]->getLayout(); - InferenceEngine::Precision p = _networkInputs[name]->getPrecision(); - InferenceEngine::SizeVector dims = _networkInputs[name]->getTensorDesc().getDims(); - - desc = InferenceEngine::TensorDesc(p, dims, l); + createInputBlob(name); + } + MKLDNNNodeConstPtr inputNode = graph->GetInputNodeByName(name); + if (inputNode->isDynamicNode()) { + if (!m_realShapes.count(name)) { + IE_THROW() << "Cannot get blob " << name << " which contains dynamic shapes"; } - - _inputs[name] = make_blob_with_precision(desc); - _inputs[name]->allocate(); - if (pBlob->getTensorDesc() == desc && - graph->_normalizePreprocMap.find(name) == graph->_normalizePreprocMap.end() && !graph->getProperty().batchLimit) { - externalPtr[name] = _inputs[name]->buffer(); + if (_inputs[name]->getTensorDesc().getDims() != m_realShapes.at(name)) { + // TODO [DS]: reshape without reallocate? + createInputBlob(name); } } data = _inputs[name]; @@ -267,6 +287,10 @@ InferenceEngine::Blob::Ptr MKLDNNPlugin::MKLDNNInferRequest::GetBlob(const std:: if (graph->hasOutputWithName(name)) { if (_outputs.find(name) == _outputs.end()) { + if (graph->GetOutputNodeByName(name)->isDynamicNode()) { + IE_THROW(NotImplemented) << "[DS] Can't get output blob for dynamic shapes before inference"; + } + auto pBlob = graph->getOutputBlob(name); if (!pBlob) { IE_THROW() << "MKLDNN graph doesn't contain output node with name: " << name; @@ -360,29 +384,37 @@ void MKLDNNPlugin::MKLDNNInferRequest::SetBlob(const std::string& name, const In // pre-processing _preProcData[name]->setRoiBlob(data); } else { - size_t inputSize = foundInput->getTensorDesc().getLayout() != InferenceEngine::Layout::SCALAR - ? InferenceEngine::details::product(foundInput->getTensorDesc().getDims()) - : 1; - if (dataSize != inputSize) { - IE_THROW() << "Input blob size is not equal network input size (" - << dataSize << "!=" << inputSize << ")."; + auto inputNode = graph->GetInputNodeByName(name); + if (foundInput->getInputData()->getPartialShape().rank().get_length() != data->getTensorDesc().getDims().size()) { + IE_THROW(ParameterMismatch) << "Failed to set input blob. Rank mismatch."; } - if (foundInput->getTensorDesc().getDims() != data->getTensorDesc().getDims()) { - IE_THROW(ParameterMismatch) << "Failed to set input blob. Dimensions mismatch."; - } + if (foundInput->getInputData()->isDynamic()) { + const auto &newShape = data->getTensorDesc().getDims(); + m_realShapes[name] = newShape; + inputNode->resetOutputShape({newShape}); + } else { + size_t inputSize = foundInput->getTensorDesc().getLayout() != InferenceEngine::Layout::SCALAR ? + InferenceEngine::details::product(foundInput->getTensorDesc().getDims()) : 1; + if (dataSize != inputSize) { + IE_THROW() << "Input blob size is not equal network input size (" + << dataSize << "!=" << inputSize << ")."; + } - if (data->getTensorDesc().getLayout() != InferenceEngine::Layout::ANY && foundInput->getTensorDesc().getLayout() != InferenceEngine::Layout::ANY && - foundInput->getTensorDesc().getBlockingDesc() != data->getTensorDesc().getBlockingDesc()) { - IE_THROW(ParameterMismatch) << "Failed to set input blob. Blocking descriptor mismatch."; - } + if (foundInput->getTensorDesc().getDims() != data->getTensorDesc().getDims()) { + IE_THROW(ParameterMismatch) << "Failed to set input blob. Dimensions mismatch."; + } - auto pBlob = graph->getInputBlob(name); - if (!pBlob) { - IE_THROW() << "MKLDNN graph doesn't contain input node with name: " << name; + if (data->getTensorDesc().getLayout() != InferenceEngine::Layout::ANY && + foundInput->getTensorDesc().getLayout() != InferenceEngine::Layout::ANY && + foundInput->getTensorDesc().getBlockingDesc() != data->getTensorDesc().getBlockingDesc()) { + IE_THROW(ParameterMismatch) << "Failed to set input blob. Blocking descriptor mismatch."; + } } - if (data->getTensorDesc() == pBlob->getTensorDesc() && + // TODO [DS]: enable inplace for dynamic input/output + if (!inputNode->isDynamicNode() && + data->getTensorDesc() == MemoryDescUtils::convertToTensorDesc(inputNode->getChildEdgesAtPort(0)[0]->getMemory().GetDesc()) && graph->_normalizePreprocMap.find(name) == graph->_normalizePreprocMap.end() && !graph->getProperty().batchLimit) { externalPtr[name] = data->buffer(); } else if (externalPtr.find(name) != externalPtr.end()) { @@ -392,6 +424,9 @@ void MKLDNNPlugin::MKLDNNInferRequest::SetBlob(const std::string& name, const In } } if (foundOutput) { + if (foundOutput->isDynamic()) { + IE_THROW(NotImplemented) << "[DS] Can't set dynamic output blob"; + } if (compoundBlobPassed) { IE_THROW(NotImplemented) << "cannot set compound blob: supported only for input pre-processing"; @@ -435,8 +470,8 @@ static inline void changeEdgePtr(const MKLDNNPlugin::MKLDNNEdgePtr &edge, void * void MKLDNNPlugin::MKLDNNInferRequest::changeDefaultPtr() { for (auto& it : externalPtr) { - auto input = graph->inputNodesMap.find(it.first); - if (input != graph->inputNodesMap.end()) { + auto input = graph->GetInputNodesMap().find(it.first); + if (input != graph->GetInputNodesMap().end()) { if (input->second->getChildEdgeAt(0)->getMemory().GetPrimitive().get_data_handle() == it.second) continue; // Input cannot be in-place with other primitives @@ -470,7 +505,7 @@ void MKLDNNPlugin::MKLDNNInferRequest::changeDefaultPtr() { } MKLDNNNodePtr output; - for (auto& out : graph->outputNodesMap) { + for (auto& out : graph->GetOutputNodesMap()) { if (out.first == it.first) { output = out.second; break; @@ -536,3 +571,26 @@ void MKLDNNPlugin::MKLDNNInferRequest::ThrowIfCanceled() const { _asyncRequest->ThrowIfCanceled(); } } + +// TODO [DS]: analyze performance +// getPartialShape().compatible(newShape) +void MKLDNNPlugin::MKLDNNInferRequest::SetShape(const std::string& name, const InferenceEngine::SizeVector& dims) { + // Check partial shape compatibility + ngraph::PartialShape newShape(dims); + InferenceEngine::InputInfo::Ptr foundInput; + InferenceEngine::DataPtr foundOutput; + if (findInputAndOutputBlobByName(name, foundInput, foundOutput)) { + if (!foundInput->getInputData()->getPartialShape().compatible(newShape)) + IE_THROW() << "New shape " << newShape << " for " << name << " is incompatible with original shape " + << foundInput->getInputData()->getPartialShape(); + } else { + IE_THROW(NotImplemented) << "[DS] Can't SetShape for output node"; + // if (!foundOutput->getPartialShape().compatible(newShape)) + // IE_THROW() << "New shape " << newShape << " for " << name << " is incompatible with original shape " << foundOutput->getPartialShape(); + } + + m_realShapes[name] = dims; + + auto inputNode = graph->GetInputNodeByName(name); + inputNode->resetOutputShape({dims}); +} diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.h b/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.h index f99c42cc7e8139..2cf2625152187e 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.h +++ b/inference-engine/src/mkldnn_plugin/mkldnn_infer_request.h @@ -32,6 +32,8 @@ class MKLDNNInferRequest : public InferenceEngine::IInferRequestInternal { InferenceEngine::Blob::Ptr GetBlob(const std::string& name) override; + void SetShape(const std::string& name, const InferenceEngine::SizeVector& dims) override; + void SetBatch(int batch = -1) override; std::vector> QueryState() override; @@ -54,6 +56,8 @@ class MKLDNNInferRequest : public InferenceEngine::IInferRequestInternal { void pushInput(const std::string& inputName, InferenceEngine::Blob::Ptr& inputBlob, InferenceEngine::Precision dataType); + void createInputBlob(const std::string &name); + void changeDefaultPtr(); std::shared_ptr execNetwork; MKLDNNGraph* graph = nullptr; diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_memory.h b/inference-engine/src/mkldnn_plugin/mkldnn_memory.h index 870a044d846adb..cfaaae9121f8ff 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_memory.h +++ b/inference-engine/src/mkldnn_plugin/mkldnn_memory.h @@ -241,6 +241,10 @@ class MKLDNNMemory { //TODO [DS]: phase 2: move to reorder static void reorderData(const MKLDNNMemory& input, const MKLDNNMemory& output, size_t size = 0); + const std::vector& getStaticDims() const { + return GetDesc().getShape().getStaticDims(); + } + private: void Create(const mkldnn::memory::dims& dims, mkldnn::memory::data_type data_type, mkldnn::memory::format_tag format, const void* data = nullptr); diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp index b6b75083db9ea0..7de5470c30e6d6 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp @@ -257,11 +257,11 @@ MKLDNNNode::MKLDNNNode(const std::shared_ptr& op, const mkldnn::en for (size_t i = 0; i < op->get_input_size(); i++) { const auto &shape = op->get_input_partial_shape(i); - - bool isScalar = false; - if (shape.rank().is_static()) { - isScalar = shape.rank().get_length() == 0; + if (shape.rank().is_dynamic()) { + IE_THROW(Unexpected) << "CPU plug-in doesn't support operation with dynamic rank"; } + + bool isScalar = shape.rank().get_length() == 0; inputShapes.emplace_back(isScalar ? ngraph::PartialShape{1} : shape); originalInputPrecisions.emplace_back(details::convertPrecision(op->get_input_element_type(i))); } @@ -272,16 +272,19 @@ MKLDNNNode::MKLDNNNode(const std::shared_ptr& op, const mkldnn::en } for (size_t i = 0; i < op->get_output_size(); i++) { const auto &shape = op->get_output_partial_shape(i); - - bool isScalar = false; - if (shape.rank().is_static()) { - isScalar = shape.rank().get_length() == 0; + if (shape.rank().is_dynamic()) { + IE_THROW(Unexpected) << "CPU plug-in doesn't support operation with dynamic rank"; } + + bool isScalar = shape.rank().get_length() == 0; outputShapes.emplace_back(isScalar ? ngraph::PartialShape{1} : shape); originalOutputPrecisions.emplace_back(details::convertPrecision(op->get_output_element_type(i))); } } + isDynamic = std::any_of(inputShapes.begin(), inputShapes.end(), [](const Shape& shape){ return shape.isDynamic(); }) || + std::any_of(outputShapes.begin(), outputShapes.end(), [](const Shape& shape){ return shape.isDynamic(); }); + const auto& rtInfo = op->get_rt_info(); if (rtInfo.count("originalLayersNames")) { originalLayers = getRTInfoValue(rtInfo, "originalLayersNames"); @@ -636,6 +639,33 @@ void MKLDNNNode::execute(mkldnn::stream strm) { } } +void MKLDNNNode::executeDynamic(mkldnn::stream strm) { + resetOutputShape(); + executeDynamicImpl(strm); +} + +void MKLDNNNode::executeDynamicImpl(mkldnn::stream strm) { + IE_THROW() << "[DS] executeDynamicImpl not implemented for node with type: " << getTypeStr(); +} + +void MKLDNNNode::redefineOutputMemory(const std::vector> &newShapes) { + if (newShapes.size() != getOriginalOutputsNumber()) { + IE_THROW() << "Number shapes mismatch with real outputs number for node with name: " << getName(); + } + for (size_t i = 0; i < getOriginalOutputsNumber(); i++) { + getChildEdgesAtPort(i)[0]->getMemoryPtr()->redefineDesc(getOutputMemDescAtPort(i)->cloneWithNewDims(newShapes[i])); + } +} + +void MKLDNNNode::resetOutputShape() { + const auto newShapes = shapeInfer(); + redefineOutputMemory(newShapes); +} + +void MKLDNNNode::resetOutputShape(const std::vector> &newShapes) { + redefineOutputMemory(newShapes); +} + void MKLDNNNode::initSupportedPrimitiveDescriptors() { if (!supportedPrimitiveDescriptors.empty()) return; @@ -1048,6 +1078,7 @@ int MKLDNNNode::batchToProcess() { return dynBatchLim == 0 ? getMaxBatch() : std::min(getMaxBatch(), dynBatchLim); } +// TODO [DS]: how we should process this for dynamic shape? size_t MKLDNNNode::getMaxBatch() { // FIXME: batch != 0 dims number if (!inputShapes.empty()) { diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_node.h b/inference-engine/src/mkldnn_plugin/mkldnn_node.h index fb3ee702e2e471..e6d086caa4e941 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_node.h +++ b/inference-engine/src/mkldnn_plugin/mkldnn_node.h @@ -34,6 +34,7 @@ namespace MKLDNNPlugin { using MKLDNNNodePtr = std::shared_ptr; +using MKLDNNNodeConstPtr = std::shared_ptr; using MKLDNNNodeWeakPtr = std::weak_ptr; Type TypeFromName(const std::string type); @@ -490,7 +491,10 @@ class MKLDNNNode { virtual void setDynamicBatchLim(int lim); void resolveNotAllocatedEdges(); + virtual void execute(mkldnn::stream strm); + void executeDynamic(mkldnn::stream strm); + virtual void initSupportedPrimitiveDescriptors(); /** @@ -514,6 +518,9 @@ class MKLDNNNode { return created(); } + virtual void resetOutputShape(); + virtual void resetOutputShape(const std::vector> &newShapes); + /** * @brief Performs Node initialization based on graph context. * This is an auxiliary method that allows to use information not available in Node constructor (e.g. connection information with other nodes) @@ -669,7 +676,30 @@ class MKLDNNNode { bool canBePerformedAsScaleShift(const MKLDNNNode *parentNode = nullptr) const; + bool isDynamicNode() const { + return isDynamic; + } + + Shape getInputShapeAtPort(size_t port) const { + if (inputShapes.size() <= port) { + IE_THROW() << "Incorrect input port number for node " << getName(); + } + return inputShapes[port]; + } + + Shape getOutputShapeAtPort(size_t port) const { + if (outputShapes.size() <= port) { + IE_THROW() << "Incorrect output port number for node " << getName(); + } + return outputShapes[port]; + } + protected: + virtual std::vector> shapeInfer() const { + IE_THROW() << "MKLDNNNode::shapeInfer is not defined for node with type: " << getTypeStr(); + } + virtual void executeDynamicImpl(mkldnn::stream strm); + bool canFuseSimpleOperation(const MKLDNNNodePtr& node) const; // TODO [mandrono]: place outside of the node API void fillScalesAndShifts(const MKLDNNNode *parentNode, std::vector &scales, std::vector &shifts, const int align = -1); @@ -804,6 +834,10 @@ class MKLDNNNode { } private: + void redefineOutputMemory(const std::vector> &newShapes); + + bool isDynamic = false; + std::vector parentEdges; std::vector childEdges; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_broadcast_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_broadcast_node.cpp index ef9c14ad0d4eef..ef1b5ac5d08f73 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_broadcast_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_broadcast_node.cpp @@ -67,9 +67,9 @@ void MKLDNNBroadcastNode::initSupportedPrimitiveDescriptors() { } void MKLDNNBroadcastNode::execute(mkldnn::stream strm) { - size_t shape_size = (getParentEdgeAt(BROADCAST_SHAPE)->getMemory().GetDesc().getShape().getStaticDims())[0]; - SizeVector dst_dims = getChildEdgeAt(0)->getMemory().GetDesc().getShape().getStaticDims(); - SizeVector src_dims = getParentEdgeAt(BROADCAST_INPUT)->getMemory().GetDesc().getShape().getStaticDims(); + size_t shape_size = (getParentEdgeAt(BROADCAST_SHAPE)->getMemory().getStaticDims())[0]; + SizeVector dst_dims = getChildEdgeAt(0)->getMemory().getStaticDims(); + SizeVector src_dims = getParentEdgeAt(BROADCAST_INPUT)->getMemory().getStaticDims(); auto srcDesc = getParentEdgeAt(BROADCAST_INPUT)->getMemory().GetDescWithType(); SizeVector srcStrides = srcDesc.getStrides(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.cpp index c267697d57d1cd..dc29b14777738f 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.cpp @@ -958,13 +958,27 @@ std::map& op, std::string& errorMessage) noexcept { + try { + if (initializers.find(op->get_type_info()) == initializers.end()) { + errorMessage = "Doesn't support Eltwise algorithm: " + std::string(op->get_type_name()); + return false; + } + if (isDynamicNgraphNode(op)) { + errorMessage = "Doesn't support op with dynamic shapes"; + return false; + } + } catch (...) { + return false; + } + return true; +} + MKLDNNEltwiseNode::MKLDNNEltwiseNode(const std::shared_ptr& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache) : MKLDNNNode(op, eng, cache) { - if (initializers.find(op->get_type_info()) != initializers.end()) { - initializers[op->get_type_info()](op, *this); - } else { - IE_THROW(NotImplemented) - << "CPU Eltwise node doesn't support ngraph operation " << op->get_type_name() << " with name " << op->get_friendly_name(); + std::string errorMessage; + if (!isSupportedOperation(op, errorMessage)) { + IE_THROW(NotImplemented) << errorMessage; } } diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.h index e1719be037fac5..bd0ea83771492d 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.h @@ -88,6 +88,8 @@ class MKLDNNEltwiseNode : public MKLDNNNode { bool isWithBroadcast(); bool isSpecialConvolutionAddFusing() const { return specialConvolutionAddFusing; } + static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; + private: mkldnn::algorithm mkldnnAlgorithm = mkldnn::algorithm::undef; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_fake_quantize_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_fake_quantize_node.cpp index b08ebae30f4c41..48a955c424651e 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_fake_quantize_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_fake_quantize_node.cpp @@ -1295,8 +1295,8 @@ void MKLDNNFakeQuantizeNode::executeReference() { auto src = reinterpret_cast(srcMemory->GetPtr()); - auto srcDims = srcMemory->GetDesc().getShape().getStaticDims(); - auto dstDims = dstMemory->GetDesc().getShape().getStaticDims(); + auto srcDims = srcMemory->getStaticDims(); + auto dstDims = dstMemory->getStaticDims(); auto s_str = jqp.s_str; auto d_str = jqp.d_str; @@ -1416,7 +1416,7 @@ void MKLDNNFakeQuantizeNode::executeBinarization() { auto thresholds = reinterpret_cast(internalBlobMemory[0]->GetData()); auto output_mask = reinterpret_cast(internalBlobMemory[1]->GetData()); - auto src_dims = srcMemory->GetDesc().getShape().getStaticDims(); + auto src_dims = srcMemory->getStaticDims(); std::vector s_str = jqp.s_str; size_t tmp = s_str[s_str.size() - 1]; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_input_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_input_node.cpp index 33b6fdab4f4984..ad5b3111f8e107 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_input_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_input_node.cpp @@ -242,7 +242,7 @@ MKLDNNInputNode::MKLDNNInputNode(const std::shared_ptr& op, const if (constOp) { constant = ConstantType::Const; cloneBlobIfRequired(); - } + } } void MKLDNNInputNode::cloneBlobIfRequired() { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_input_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_input_node.h index 8c57ac8873007e..172a84c53f0249 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_input_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_input_node.h @@ -25,6 +25,9 @@ class MKLDNNInputNode : public MKLDNNNode { void withMeanImage(); MKLDNNMemoryCPtr getMemoryPtr() const; + void resetOutputShape() override {} + void executeDynamicImpl(mkldnn::stream strm) override {} + private: void cloneBlobIfRequired(); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_multiclass_nms.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_multiclass_nms.cpp index 64dccbdaeab47e..cd3eb8df3d6ef0 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_multiclass_nms.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_multiclass_nms.cpp @@ -135,7 +135,7 @@ void MKLDNNMultiClassNmsNode::execute(mkldnn::stream strm) { const float* boxes = reinterpret_cast(getParentEdgeAt(NMS_BOXES)->getMemoryPtr()->GetPtr()); const float* scores = reinterpret_cast(getParentEdgeAt(NMS_SCORES)->getMemoryPtr()->GetPtr()); - auto dims_boxes = getParentEdgeAt(NMS_BOXES)->getMemory().GetDesc().getShape().getStaticDims(); + auto dims_boxes = getParentEdgeAt(NMS_BOXES)->getMemory().getStaticDims(); if (max_output_boxes_per_class == 0) return; @@ -232,7 +232,7 @@ void MKLDNNMultiClassNmsNode::execute(mkldnn::stream strm) { }); } - const size_t selectedBoxesNum = getChildEdgeAt(NMS_SELECTEDINDICES)->getMemory().GetDesc().getShape().getStaticDims()[0]; + const size_t selectedBoxesNum = getChildEdgeAt(NMS_SELECTEDINDICES)->getMemory().getStaticDims()[0]; const size_t validOutputs = std::min(startOffset, selectedBoxesNum); std::vector m_selected_num; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.cpp index f7ddad8b6794a4..336d334219ecaa 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.cpp @@ -7,6 +7,7 @@ #include #include #include "common/blocked_desc_creator.h" +#include using namespace mkldnn; using namespace MKLDNNPlugin; @@ -21,6 +22,15 @@ MKLDNNReferenceNode::MKLDNNReferenceNode(const std::shared_ptr& op } setType(Reference); setTypeStr("Reference"); + + if (isDynamicNode()) { + ngraph::OutputVector inputsForShapeInfer; + for (size_t i = 0; i < inputShapes.size(); i++) { + inputsForShapeInfer.push_back(std::make_shared(ngraphOp->get_input_element_type(i), + ngraphOp->get_input_partial_shape(i))); + } + opToShapeInfer = ngraphOp->clone_with_new_inputs(inputsForShapeInfer); + } } void MKLDNNReferenceNode::getSupportedDescriptors() {} @@ -46,17 +56,36 @@ void MKLDNNReferenceNode::initSupportedPrimitiveDescriptors() { void MKLDNNReferenceNode::createPrimitive() {} +std::vector> MKLDNNReferenceNode::shapeInfer() const { + for (size_t i = 0; i < opToShapeInfer->get_input_size(); i++) { + opToShapeInfer->get_input_tensor(i).set_partial_shape( + getParentEdgesAtPort(i)[0]->getMemory().GetDesc().getShape().toPartialShape()); + } + + opToShapeInfer->validate_and_infer_types(); + + IE_ASSERT(opToShapeInfer->get_output_size() == getOriginalOutputsNumber()); + + std::vector> newShapes(getOriginalOutputsNumber()); + for (size_t i = 0; i < newShapes.size(); i++) { + newShapes[i] = opToShapeInfer->get_output_partial_shape(i).get_shape(); + } + return newShapes; +} + void MKLDNNReferenceNode::execute(mkldnn::stream strm) { ngraph::HostTensorVector inputs; for (size_t i = 0; i < inputShapes.size(); i++) { void *srcDataPtr = getParentEdgesAtPort(i)[0]->getMemory().GetPtr(); - inputs.push_back(std::make_shared(ngraphOp->get_input_element_type(i), ngraphOp->get_input_shape(i), srcDataPtr)); + inputs.push_back(std::make_shared(ngraphOp->get_input_element_type(i), + getParentEdgesAtPort(i)[0]->getMemory().getStaticDims(), srcDataPtr)); } ngraph::HostTensorVector outputs; for (size_t i = 0; i < outputShapes.size(); i++) { void *dstDataPtr = getChildEdgesAtPort(i)[0]->getMemory().GetPtr(); - outputs.push_back(std::make_shared(ngraphOp->get_output_element_type(i), ngraphOp->get_output_shape(i), dstDataPtr)); + outputs.push_back(std::make_shared(ngraphOp->get_output_element_type(i), + getChildEdgesAtPort(i)[0]->getMemory().getStaticDims(), dstDataPtr)); } if (!ngraphOp->evaluate(outputs, inputs)) { @@ -64,6 +93,10 @@ void MKLDNNReferenceNode::execute(mkldnn::stream strm) { } } +void MKLDNNReferenceNode::executeDynamicImpl(mkldnn::stream strm) { + execute(strm); +} + bool MKLDNNReferenceNode::created() const { return getType() == Reference; } diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.h index ce27028aa56700..8c60ff7b45fbf2 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reference_node.h @@ -20,8 +20,12 @@ class MKLDNNReferenceNode : public MKLDNNNode { void execute(mkldnn::stream strm) override; bool created() const override; + std::vector> shapeInfer() const override; + void executeDynamicImpl(mkldnn::stream strm) override; + private: const std::shared_ptr ngraphOp; + std::shared_ptr opToShapeInfer; const std::string additionalErrorMessage; }; diff --git a/inference-engine/src/mkldnn_plugin/utils/general_utils.h b/inference-engine/src/mkldnn_plugin/utils/general_utils.h index eb50d5ac734881..a7cf69f43f8b15 100644 --- a/inference-engine/src/mkldnn_plugin/utils/general_utils.h +++ b/inference-engine/src/mkldnn_plugin/utils/general_utils.h @@ -151,4 +151,12 @@ inline std::string dims2str(const std::vector& dims) { return output.str(); } +inline bool isDynamicNgraphNode(const std::shared_ptr& op) { + bool ret = op->is_dynamic(); + for (size_t i = 0; i < op->get_output_size(); i++) { + ret |= op->get_output_partial_shape(i).is_dynamic(); + } + return ret; +} + } // namespace MKLDNNPlugin \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_requset_dynamic.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_requset_dynamic.cpp new file mode 100644 index 00000000000000..3deb0bf58039e6 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/infer_requset_dynamic.cpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "behavior/infer_request_dynamic.hpp" + +using namespace BehaviorTestsDefinitions; + +namespace { + +const std::vector netPrecisions = { + InferenceEngine::Precision::FP32 +}; + +const std::vector> configs = { + {} +}; + +INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestDynamicTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(configs)), + InferRequestDynamicTests::getTestCaseName); + +} // namespace + diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_dynamic.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_dynamic.hpp index 5190b1c8c53615..f5a8e1eee14fd9 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_dynamic.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request_dynamic.hpp @@ -33,7 +33,14 @@ class InferRequestDynamicTests : public BehaviorTestsUtils::BehaviorTestsBasic { public: void SetUp() override { std::tie(netPrecision, targetDevice, configuration) = this->GetParam(); - function = ngraph::builder::subgraph::makeSplitConvConcat(); + std::vector inputShape = {1, 4, 20, 20}; + ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32; + auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); + params.front()->set_friendly_name("Param_1"); + auto relu1 = std::make_shared(params[0]); + ngraph::ResultVector results{std::make_shared(relu1)}; + function = std::make_shared(results, params); + function->set_friendly_name("DynamicRelu"); } }; @@ -77,7 +84,7 @@ TEST_P(InferRequestDynamicTests, InferDynamicNetworkBoundWithoutSetShape) { TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithGetBlob) { const std::string param_name = "Param_1"; const InferenceEngine::SizeVector refShape = {1, 4, 20, 20}; - const InferenceEngine::SizeVector refOutShape = {1, 10, 18, 18}; + const InferenceEngine::SizeVector refOutShape = {1, 4, 20, 20}; // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() // Create CNNNetwork from ngrpah::Function @@ -107,7 +114,7 @@ TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithGetBlob) { TEST_P(InferRequestDynamicTests, InferUpperBoundNetworkWithGetBlob) { const std::string param_name = "Param_1"; const InferenceEngine::SizeVector refShape = {1, 4, 20, 20}; - const InferenceEngine::SizeVector refOutShape = {1, 10, 18, 18}; + const InferenceEngine::SizeVector refOutShape = {1, 4, 20, 20}; // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() // Create CNNNetwork from ngrpah::Function @@ -184,8 +191,8 @@ TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithGetBlob2times) { const std::string param_name = "Param_1"; const InferenceEngine::SizeVector refShape = {1, 4, 20, 20}; const InferenceEngine::SizeVector refShape2 = {2, 4, 20, 20}; - const InferenceEngine::SizeVector refOutShape = {1, 10, 18, 18}; - const InferenceEngine::SizeVector refOutShape2 = {2, 10, 18, 18}; + const InferenceEngine::SizeVector refOutShape = {1, 4, 20, 20}; + const InferenceEngine::SizeVector refOutShape2 = {2, 4, 20, 20}; // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() // Create CNNNetwork from ngrpah::Function @@ -248,7 +255,7 @@ TEST_P(InferRequestDynamicTests, GetSameBlob2times) { TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithSetBlob) { const std::string param_name = "Param_1"; const InferenceEngine::SizeVector refShape = {1, 4, 20, 20}; - const InferenceEngine::SizeVector refOutShape = {1, 10, 18, 18}; + const InferenceEngine::SizeVector refOutShape = {1, 4, 20, 20}; // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() // Create CNNNetwork from ngrpah::Function @@ -278,8 +285,8 @@ TEST_P(InferRequestDynamicTests, InferDynamicNetworkWithSetBlob2times) { const std::string param_name = "Param_1"; const InferenceEngine::SizeVector refShape = {1, 4, 20, 20}; const InferenceEngine::SizeVector refShape2 = {2, 4, 20, 20}; - const InferenceEngine::SizeVector refOutShape = {1, 10, 18, 18}; - const InferenceEngine::SizeVector refOutShape2 = {2, 10, 18, 18}; + const InferenceEngine::SizeVector refOutShape = {1, 4, 20, 20}; + const InferenceEngine::SizeVector refOutShape2 = {2, 4, 20, 20}; // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() // Create CNNNetwork from ngrpah::Function