From 705c12bb49a4081c8780bf634b43cbbe72941453 Mon Sep 17 00:00:00 2001 From: Alexey Lebedev Date: Mon, 31 May 2021 12:25:13 +0300 Subject: [PATCH] [IE PYTHON] FIX InputInfoCPtr, DataPtr and CDataPtr deallocation (#5730) * linked ExecutableNetwork to InputInfo and Data * Add tests * Skip test_exec_graph_info_deallocation on ARM plugin --- .../src/openvino/inference_engine/ie_api.pxd | 6 +++- .../src/openvino/inference_engine/ie_api.pyx | 5 +++ .../openvino/inference_engine/ie_api_impl.cpp | 28 ++++++++------- .../openvino/inference_engine/ie_api_impl.hpp | 6 +++- .../inference_engine/ie_api_impl_defs.pxd | 3 ++ .../python/tests/test_ExecutableNetwork.py | 35 +++++++++++++++++++ .../python/tests/test_InputInfoCPtr.py | 18 ++++++++++ 7 files changed, 87 insertions(+), 14 deletions(-) diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pxd b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pxd index 29956c7defe23d..5d942f93050246 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pxd +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pxd @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 from .cimport ie_api_impl_defs as C -from .ie_api_impl_defs cimport CBlob, CTensorDesc, InputInfo, CPreProcessChannel, CPreProcessInfo +from .ie_api_impl_defs cimport CBlob, CTensorDesc, InputInfo, CPreProcessChannel, CPreProcessInfo, CExecutableNetwork import os @@ -43,6 +43,7 @@ cdef class InferRequest: cdef class IENetwork: cdef C.IENetwork impl + cdef shared_ptr[CExecutableNetwork] _ptr_plugin cdef class ExecutableNetwork: cdef unique_ptr[C.IEExecNetwork] impl @@ -64,9 +65,11 @@ cdef class IECore: cdef class DataPtr: cdef C.DataPtr _ptr cdef C.IENetwork * _ptr_network + cdef shared_ptr[CExecutableNetwork] _ptr_plugin cdef class CDataPtr: cdef C.CDataPtr _ptr + cdef shared_ptr[CExecutableNetwork] _ptr_plugin cdef class TensorDesc: cdef C.CTensorDesc impl @@ -77,6 +80,7 @@ cdef class InputInfoPtr: cdef class InputInfoCPtr: cdef InputInfo.CPtr _ptr + cdef shared_ptr[CExecutableNetwork] _ptr_plugin cdef class PreProcessInfo: cdef CPreProcessInfo* _ptr diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx index f7c7356de837d6..a103a7deb01483 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx @@ -751,6 +751,7 @@ cdef class InputInfoCPtr: cdef C.DataPtr c_data_ptr = deref(self._ptr).getInputData() data_ptr = DataPtr() data_ptr._ptr = c_data_ptr + data_ptr._ptr_plugin = self._ptr_plugin return data_ptr ## tensor_desc of this input @@ -918,6 +919,7 @@ cdef class ExecutableNetwork: for in_ in c_inputs: input_info_ptr = InputInfoCPtr() input_info_ptr._ptr = in_.second + input_info_ptr._ptr_plugin = deref(self.impl).getPluginLink() inputs[in_.first.decode()] = input_info_ptr return inputs @@ -937,6 +939,7 @@ cdef class ExecutableNetwork: for in_ in c_inputs: data_ptr = DataPtr() data_ptr._ptr = in_.second + data_ptr._ptr_plugin = deref(self.impl).getPluginLink() inputs[in_.first.decode()] = data_ptr return inputs @@ -949,6 +952,7 @@ cdef class ExecutableNetwork: for in_ in c_outputs: data_ptr = CDataPtr() data_ptr._ptr = in_.second + data_ptr._ptr_plugin = deref(self.impl).getPluginLink() outputs[in_.first.decode()] = data_ptr return outputs @@ -965,6 +969,7 @@ cdef class ExecutableNetwork: def get_exec_graph_info(self): ie_network = IENetwork() ie_network.impl = deref(self.impl).GetExecGraphInfo() + ie_network._ptr_plugin = deref(self.impl).getPluginLink() return ie_network ## Gets general runtime metric for an executable network. It can be network name, actual device ID on diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp index 6f8dad24fdfb15..1620f767c64922 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp @@ -324,23 +324,23 @@ void InferenceEnginePython::IEExecNetwork::infer() { } InferenceEnginePython::IENetwork InferenceEnginePython::IEExecNetwork::GetExecGraphInfo() { - return IENetwork(std::make_shared(actual.GetExecGraphInfo())); + return IENetwork(std::make_shared(actual->GetExecGraphInfo())); } PyObject* InferenceEnginePython::IEExecNetwork::getMetric(const std::string& metric_name) { - return parse_parameter(actual.GetMetric(metric_name)); + return parse_parameter(actual->GetMetric(metric_name)); } PyObject* InferenceEnginePython::IEExecNetwork::getConfig(const std::string& name) { - return parse_parameter(actual.GetConfig(name)); + return parse_parameter(actual->GetConfig(name)); } void InferenceEnginePython::IEExecNetwork::exportNetwork(const std::string& model_file) { - actual.Export(model_file); + actual->Export(model_file); } std::map InferenceEnginePython::IEExecNetwork::getInputs() { - InferenceEngine::ConstInputsDataMap inputsDataMap = actual.GetInputsInfo(); + InferenceEngine::ConstInputsDataMap inputsDataMap = actual->GetInputsInfo(); std::map pyInputs; for (const auto& item : inputsDataMap) { pyInputs[item.first] = item.second->getInputData(); @@ -349,7 +349,7 @@ std::map InferenceEnginePython::IEExecNet } std::map InferenceEnginePython::IEExecNetwork::getInputsInfo() { - InferenceEngine::ConstInputsDataMap inputsDataMap = actual.GetInputsInfo(); + InferenceEngine::ConstInputsDataMap inputsDataMap = actual->GetInputsInfo(); std::map pyInputs; for (const auto& item : inputsDataMap) { pyInputs[item.first] = item.second; @@ -358,7 +358,7 @@ std::map InferenceEnginePython::I } std::map InferenceEnginePython::IEExecNetwork::getOutputs() { - InferenceEngine::ConstOutputsDataMap outputsDataMap = actual.GetOutputsInfo(); + InferenceEngine::ConstOutputsDataMap outputsDataMap = actual->GetOutputsInfo(); std::map pyOutputs; for (const auto& item : outputsDataMap) { pyOutputs[item.first] = item.second; @@ -366,6 +366,10 @@ std::map InferenceEnginePython::IEExecNe return pyOutputs; } +std::shared_ptr InferenceEnginePython::IEExecNetwork::getPluginLink(){ + return actual; +} + void InferenceEnginePython::InferRequestWrap::setBlob(const std::string& blob_name, const InferenceEngine::Blob::Ptr& blob_ptr) { request_ptr.SetBlob(blob_name.c_str(), blob_ptr); @@ -512,7 +516,7 @@ int InferenceEnginePython::IdleInferRequestQueue::getIdleRequestId() { void InferenceEnginePython::IEExecNetwork::createInferRequests(int num_requests) { if (0 == num_requests) { - num_requests = getOptimalNumberOfRequests(actual); + num_requests = getOptimalNumberOfRequests(*actual); } infer_requests.resize(num_requests); @@ -521,7 +525,7 @@ void InferenceEnginePython::IEExecNetwork::createInferRequests(int num_requests) infer_request.index = i; request_queue_ptr->setRequestIdle(i); infer_request.request_queue_ptr = request_queue_ptr; - infer_request.request_ptr = actual.CreateInferRequest(); + infer_request.request_ptr = actual->CreateInferRequest(); infer_request.request_ptr.SetCompletionCallback>( @@ -564,7 +568,7 @@ std::unique_ptr InferenceEnginePython::IEC const std::map& config, int num_requests) { auto exec_network = InferenceEnginePython::make_unique(network.name, num_requests); - exec_network->actual = actual.LoadNetwork(*network.actual, deviceName, config); + exec_network->actual = std::make_shared(actual.LoadNetwork(*network.actual, deviceName, config)); exec_network->createInferRequests(num_requests); return exec_network; @@ -575,7 +579,7 @@ std::unique_ptr InferenceEnginePython::IEC const std::map& config, int num_requests) { auto exec_network = InferenceEnginePython::make_unique(modelPath, num_requests); - exec_network->actual = actual.LoadNetwork(modelPath, deviceName, config); + exec_network->actual = std::make_shared(actual.LoadNetwork(modelPath, deviceName, config)); exec_network->createInferRequests(num_requests); return exec_network; @@ -585,7 +589,7 @@ std::unique_ptr InferenceEnginePython::IEC const std::map& config, int num_requests) { auto exec_network = InferenceEnginePython::make_unique(EXPORTED_NETWORK_NAME, num_requests); - exec_network->actual = actual.ImportNetwork(modelFIle, deviceName, config); + exec_network->actual = std::make_shared(actual.ImportNetwork(modelFIle, deviceName, config)); exec_network->createInferRequests(num_requests); return exec_network; diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp index a583737d94a18c..c21464929b6420 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp @@ -134,8 +134,9 @@ struct InferRequestWrap { std::vector queryState(); }; + struct IEExecNetwork { - InferenceEngine::ExecutableNetwork actual; + std::shared_ptr actual; std::vector infer_requests; std::string name; IdleInferRequestQueue::Ptr request_queue_ptr; @@ -158,6 +159,9 @@ struct IEExecNetwork { int getIdleRequestId(); void createInferRequests(int num_requests); + + //binds plugin to InputInfo and Data, so that they can be destroyed before plugin (ussue 28996) + std::shared_ptr getPluginLink(); }; struct IECore { diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd index d0b2b2fc51906f..6f7fd9180896a9 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd @@ -13,6 +13,8 @@ from libc.stdint cimport int64_t, uint8_t cdef extern from "" namespace "InferenceEngine": ctypedef vector[size_t] SizeVector + cdef cppclass CExecutableNetwork "InferenceEngine::ExecutableNetwork" + cdef cppclass TBlob[T]: ctypedef shared_ptr[TBlob[T]] Ptr @@ -160,6 +162,7 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython": object getConfig(const string & metric_name) except + int wait(int num_requests, int64_t timeout) int getIdleRequestId() + shared_ptr[CExecutableNetwork] getPluginLink() except + cdef cppclass IENetwork: IENetwork() except + diff --git a/inference-engine/ie_bridges/python/tests/test_ExecutableNetwork.py b/inference-engine/ie_bridges/python/tests/test_ExecutableNetwork.py index 2193a6501c24bb..11e4b479dac7f5 100644 --- a/inference-engine/ie_bridges/python/tests/test_ExecutableNetwork.py +++ b/inference-engine/ie_bridges/python/tests/test_ExecutableNetwork.py @@ -304,3 +304,38 @@ def test_get_config(device): exec_net = ie_core.load_network(net, device) config = exec_net.get_config("PERF_COUNT") assert config == "NO" + + +# issue 28996 +# checks that objects can deallocate in this order, if not - segfault happends +def test_input_info_deallocation(device): + ie_core = ie.IECore() + net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) + exec_net = ie_core.load_network(net, device) + input_info = exec_net.input_info["data"] + del ie_core + del exec_net + del input_info + + +def test_outputs_deallocation(device): + ie_core = ie.IECore() + net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) + exec_net = ie_core.load_network(net, device) + output = exec_net.outputs["fc_out"] + del ie_core + del exec_net + del output + + +def test_exec_graph_info_deallocation(device): + ie_core = ie.IECore() + if device == "CPU": + if ie_core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON": + pytest.skip("Can't run on ARM plugin due-to get_exec_graph_info method isn't implemented") + net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) + exec_net = ie_core.load_network(net, device) + exec_graph_info = exec_net.get_exec_graph_info() + del ie_core + del exec_net + del exec_graph_info diff --git a/inference-engine/ie_bridges/python/tests/test_InputInfoCPtr.py b/inference-engine/ie_bridges/python/tests/test_InputInfoCPtr.py index abfab2ce1b35dd..4631f108441239 100644 --- a/inference-engine/ie_bridges/python/tests/test_InputInfoCPtr.py +++ b/inference-engine/ie_bridges/python/tests/test_InputInfoCPtr.py @@ -16,6 +16,7 @@ def test_name(device): exec_net = ie.load_network(net, device, num_requests=5) assert isinstance(exec_net.input_info['data'], InputInfoCPtr) assert exec_net.input_info['data'].name == "data", "Incorrect name" + del ie del exec_net @@ -25,6 +26,7 @@ def test_precision(device): exec_net = ie.load_network(net, device, num_requests=5) assert isinstance(exec_net.input_info['data'], InputInfoCPtr) assert exec_net.input_info['data'].precision == "FP32", "Incorrect precision" + del ie del exec_net @@ -36,6 +38,7 @@ def test_no_precision_setter(device): exec_net.input_info['data'].precision = "I8" assert "attribute 'precision' of 'openvino.inference_engine.ie_api.InputInfoCPtr' " \ "objects is not writable" in str(e.value) + del ie del exec_net @@ -45,9 +48,24 @@ def test_input_data(device): exec_net = ie.load_network(net, device, num_requests=5) assert isinstance(exec_net.input_info['data'], InputInfoCPtr) assert isinstance(exec_net.input_info['data'].input_data, DataPtr), "Incorrect precision for layer 'fc_out'" + del ie del exec_net +# issue 28996 +# checks that objects can deallocate in this order, if not - segfault happends +def test_input_data_deallocation(device): + ie = IECore() + net = ie.read_network(model=test_net_xml, weights=test_net_bin) + exec_net = ie.load_network(net, device) + input_info = exec_net.input_info['data'] + input_data = input_info.input_data + del ie + del exec_net + del input_info + del input_data + + def test_tensor_desc(device): ie = IECore() net = ie.read_network(model=test_net_xml, weights=test_net_bin)