diff --git a/ngraph/cmake/external_protobuf.cmake b/ngraph/cmake/external_protobuf.cmake index 2b8902b604087a..9a36fa2ebc6f65 100644 --- a/ngraph/cmake/external_protobuf.cmake +++ b/ngraph/cmake/external_protobuf.cmake @@ -99,7 +99,7 @@ else() CXX_VISIBILITY_PRESET default C_VISIBILITY_PRESET default VISIBILITY_INLINES_HIDDEN OFF) - set_target_properties(libprotobuf libprotobuf-lite PROPERTIES + set_target_properties(${_proto_libs} libprotobuf-lite PROPERTIES COMPILE_FLAGS "-Wno-unused-variable -Wno-inconsistent-missing-override") endif() diff --git a/ngraph/python/src/openvino/inference_engine/__init__.py b/ngraph/python/src/openvino/inference_engine/__init__.py index ca9ba301148da3..67499898c0559f 100644 --- a/ngraph/python/src/openvino/inference_engine/__init__.py +++ b/ngraph/python/src/openvino/inference_engine/__init__.py @@ -12,6 +12,7 @@ from openvino.pyopenvino import TensorDesc from openvino.pyopenvino import get_version from openvino.pyopenvino import StatusCode +from openvino.pyopenvino import InferQueue from openvino.pyopenvino import Blob from openvino.pyopenvino import PreProcessInfo from openvino.pyopenvino import MeanVariant diff --git a/ngraph/python/src/pyopenvino/inference_engine/common.cpp b/ngraph/python/src/pyopenvino/inference_engine/common.cpp index 2d2c0b5773c6da..a2ce6dab1342a0 100644 --- a/ngraph/python/src/pyopenvino/inference_engine/common.cpp +++ b/ngraph/python/src/pyopenvino/inference_engine/common.cpp @@ -6,73 +6,81 @@ #include "common.hpp" -namespace Common { - - namespace { - const std::unordered_map layout_int_to_str_map = {{0, "ANY"}, - {1, "NCHW"}, - {2, "NHWC"}, - {3, "NCDHW"}, - {4, "NDHWC"}, - {64, "OIHW"}, - {95, "SCALAR"}, - {96, "C"}, +namespace Common +{ + namespace + { + const std::unordered_map layout_int_to_str_map = {{0, "ANY"}, + {1, "NCHW"}, + {2, "NHWC"}, + {3, "NCDHW"}, + {4, "NDHWC"}, + {64, "OIHW"}, + {95, "SCALAR"}, + {96, "C"}, {128, "CHW"}, {192, "HW"}, {193, "NC"}, {194, "CN"}, {200, "BLOCKED"}}; - const std::unordered_map layout_str_to_enum = { - {"ANY", InferenceEngine::Layout::ANY}, - {"NHWC", InferenceEngine::Layout::NHWC}, - {"NCHW", InferenceEngine::Layout::NCHW}, - {"NCDHW", InferenceEngine::Layout::NCDHW}, - {"NDHWC", InferenceEngine::Layout::NDHWC}, - {"OIHW", InferenceEngine::Layout::OIHW}, - {"GOIHW", InferenceEngine::Layout::GOIHW}, - {"OIDHW", InferenceEngine::Layout::OIDHW}, - {"GOIDHW", InferenceEngine::Layout::GOIDHW}, - {"SCALAR", InferenceEngine::Layout::SCALAR}, - {"C", InferenceEngine::Layout::C}, - {"CHW", InferenceEngine::Layout::CHW}, - {"HW", InferenceEngine::Layout::HW}, - {"NC", InferenceEngine::Layout::NC}, - {"CN", InferenceEngine::Layout::CN}, - {"BLOCKED", InferenceEngine::Layout::BLOCKED} - }; - } + const std::unordered_map layout_str_to_enum = { + {"ANY", InferenceEngine::Layout::ANY}, + {"NHWC", InferenceEngine::Layout::NHWC}, + {"NCHW", InferenceEngine::Layout::NCHW}, + {"NCDHW", InferenceEngine::Layout::NCDHW}, + {"NDHWC", InferenceEngine::Layout::NDHWC}, + {"OIHW", InferenceEngine::Layout::OIHW}, + {"GOIHW", InferenceEngine::Layout::GOIHW}, + {"OIDHW", InferenceEngine::Layout::OIDHW}, + {"GOIDHW", InferenceEngine::Layout::GOIDHW}, + {"SCALAR", InferenceEngine::Layout::SCALAR}, + {"C", InferenceEngine::Layout::C}, + {"CHW", InferenceEngine::Layout::CHW}, + {"HW", InferenceEngine::Layout::HW}, + {"NC", InferenceEngine::Layout::NC}, + {"CN", InferenceEngine::Layout::CN}, + {"BLOCKED", InferenceEngine::Layout::BLOCKED}}; + } // namespace - InferenceEngine::Layout get_layout_from_string(const std::string &layout) { + InferenceEngine::Layout get_layout_from_string(const std::string& layout) + { return layout_str_to_enum.at(layout); } - const std::string &get_layout_from_enum(const InferenceEngine::Layout &layout) { + const std::string& get_layout_from_enum(const InferenceEngine::Layout& layout) + { return layout_int_to_str_map.at(layout); } - PyObject *parse_parameter(const InferenceEngine::Parameter ¶m) { + PyObject* parse_parameter(const InferenceEngine::Parameter& param) + { // Check for std::string - if (param.is()) { + if (param.is()) + { return PyUnicode_FromString(param.as().c_str()); } - // Check for int - else if (param.is()) { + // Check for int + else if (param.is()) + { auto val = param.as(); - return PyLong_FromLong((long) val); + return PyLong_FromLong((long)val); } - // Check for unsinged int - else if (param.is()) { + // Check for unsinged int + else if (param.is()) + { auto val = param.as(); - return PyLong_FromLong((unsigned long) val); + return PyLong_FromLong((unsigned long)val); } - // Check for float - else if (param.is()) { + // Check for float + else if (param.is()) + { auto val = param.as(); - return PyFloat_FromDouble((double) val); + return PyFloat_FromDouble((double)val); } - // Check for bool - else if (param.is()) { + // Check for bool + else if (param.is()) + { auto val = param.as(); return val ? Py_True : Py_False; } @@ -95,11 +103,13 @@ namespace Common { } return list; } - // Check for std::vector - else if (param.is < std::vector < unsigned int >> ()) { - auto val = param.as < std::vector < unsigned int >> (); - PyObject *list = PyList_New(0); - for (const auto &it : val) { + // Check for std::vector + else if (param.is>()) + { + auto val = param.as>(); + PyObject* list = PyList_New(0); + for (const auto& it : val) + { PyList_Append(list, PyLong_FromLong(it)); } return list; @@ -147,9 +157,12 @@ namespace Common { PyDict_SetItemString(dict, it.first.c_str(), PyLong_FromLong((long) it.second)); } return dict; - } else { - PyErr_SetString(PyExc_TypeError, "Failed to convert parameter to Python representation!"); - return (PyObject *) NULL; + } + else + { + PyErr_SetString(PyExc_TypeError, + "Failed to convert parameter to Python representation!"); + return (PyObject*)NULL; } } diff --git a/ngraph/python/src/pyopenvino/inference_engine/common.hpp b/ngraph/python/src/pyopenvino/inference_engine/common.hpp index 8c2dce070fac2a..f7bfb0642bb3e4 100644 --- a/ngraph/python/src/pyopenvino/inference_engine/common.hpp +++ b/ngraph/python/src/pyopenvino/inference_engine/common.hpp @@ -4,19 +4,25 @@ #pragma once +#include #include #include #include "Python.h" -#include +#include "ie_blob.h" #include "ie_common.h" #include namespace py = pybind11; -namespace Common { - InferenceEngine::Layout get_layout_from_string(const std::string &layout); +namespace py = pybind11; + +namespace Common +{ + InferenceEngine::Layout get_layout_from_string(const std::string& layout); + + const std::string& get_layout_from_enum(const InferenceEngine::Layout& layout); - const std::string& get_layout_from_enum(const InferenceEngine::Layout &layout); + PyObject* parse_parameter(const InferenceEngine::Parameter& param); PyObject *parse_parameter(const InferenceEngine::Parameter ¶m); diff --git a/ngraph/python/src/pyopenvino/inference_engine/ie_blob.hpp b/ngraph/python/src/pyopenvino/inference_engine/ie_blob.hpp index 35584ebf5fc7bb..e4a7402019ea07 100644 --- a/ngraph/python/src/pyopenvino/inference_engine/ie_blob.hpp +++ b/ngraph/python/src/pyopenvino/inference_engine/ie_blob.hpp @@ -1,6 +1,5 @@ // Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 -// #include @@ -20,7 +19,8 @@ namespace py = pybind11; void regclass_Blob(py::module m); template -void regclass_TBlob(py::module m, std::string typestring) { +void regclass_TBlob(py::module m, std::string typestring) +{ auto pyclass_name = py::detail::c_str((std::string("TBlob") + typestring)); py::class_, std::shared_ptr>> cls( @@ -42,7 +42,6 @@ void regclass_TBlob(py::module m, std::string typestring) { return py::array_t(shape, &blob_ptr[0], py::cast(self)); }); - cls.def_property_readonly("tensor_desc", [](InferenceEngine::TBlob& self) { - return self.getTensorDesc(); - }); + cls.def_property_readonly("tensor_desc", + [](InferenceEngine::TBlob& self) { return self.getTensorDesc(); }); } diff --git a/ngraph/python/src/pyopenvino/inference_engine/ie_executable_network.cpp b/ngraph/python/src/pyopenvino/inference_engine/ie_executable_network.cpp index d30b59195722d7..cf66924cdcf8eb 100644 --- a/ngraph/python/src/pyopenvino/inference_engine/ie_executable_network.cpp +++ b/ngraph/python/src/pyopenvino/inference_engine/ie_executable_network.cpp @@ -1,16 +1,15 @@ // Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 -// #include #include #include #include - #include "common.hpp" #include "pyopenvino/inference_engine/ie_executable_network.hpp" +#include "pyopenvino/inference_engine/ie_infer_request.hpp" #include "pyopenvino/inference_engine/ie_input_info.hpp" using PyConstInputsDataMap = @@ -26,24 +25,32 @@ void regclass_ExecutableNetwork(py::module m) std::shared_ptr> cls(m, "ExecutableNetwork"); - cls.def("create_infer_request", &InferenceEngine::ExecutableNetwork::CreateInferRequest); + cls.def("create_infer_request", [](InferenceEngine::ExecutableNetwork& self) { + return static_cast(self.CreateInferRequest()); + }); cls.def("get_exec_graph_info", &InferenceEngine::ExecutableNetwork::GetExecGraphInfo); - cls.def("export", [](InferenceEngine::ExecutableNetwork& self, - const std::string& modelFileName) { - self.Export(modelFileName); - }, py::arg("model_file")); - - cls.def("get_config", - [](InferenceEngine::ExecutableNetwork& self, const std::string& config_name) -> py::handle { - return Common::parse_parameter(self.GetConfig(config_name)); - }, py::arg("config_name")); - - cls.def("get_metric", - [](InferenceEngine::ExecutableNetwork& self, const std::string& metric_name) -> py::handle { - return Common::parse_parameter(self.GetMetric(metric_name)); - }, py::arg("metric_name")); + cls.def( + "export", + [](InferenceEngine::ExecutableNetwork& self, const std::string& modelFileName) { + self.Export(modelFileName); + }, + py::arg("model_file")); + + cls.def( + "get_config", + [](InferenceEngine::ExecutableNetwork& self, const std::string& config_name) -> py::handle { + return Common::parse_parameter(self.GetConfig(config_name)); + }, + py::arg("config_name")); + + cls.def( + "get_metric", + [](InferenceEngine::ExecutableNetwork& self, const std::string& metric_name) -> py::handle { + return Common::parse_parameter(self.GetMetric(metric_name)); + }, + py::arg("metric_name")); // cls.def("get_idle_request_id", &InferenceEngine::ExecutableNetwork::CreateInferRequest); // diff --git a/ngraph/python/src/pyopenvino/inference_engine/ie_infer_queue.cpp b/ngraph/python/src/pyopenvino/inference_engine/ie_infer_queue.cpp new file mode 100644 index 00000000000000..b04001e1f4cebc --- /dev/null +++ b/ngraph/python/src/pyopenvino/inference_engine/ie_infer_queue.cpp @@ -0,0 +1,164 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include "pyopenvino/inference_engine/common.hpp" +#include "pyopenvino/inference_engine/ie_infer_queue.hpp" + +namespace py = pybind11; + +class InferQueue +{ +public: + InferQueue(std::vector requests, + std::queue idle_handles, + std::vector user_ids) + : _requests(requests) + , _idle_handles(idle_handles) + , _user_ids(user_ids) + { + this->setDefaultCallbacks(); + } + + ~InferQueue() { _requests.clear(); } + + size_t getIdleRequestId() + { + // Wait for any of _idle_handles + py::gil_scoped_release release; + std::unique_lock lock(_mutex); + _cv.wait(lock, [this] { return !(_idle_handles.empty()); }); + py::gil_scoped_acquire acquire; + + size_t idle_request_id = _idle_handles.front(); + _idle_handles.pop(); + + return idle_request_id; + } + + std::vector waitAll() + { + // Wait for all requests to return with callback thus updating + // _idle_handles so it matches the size of requests + py::gil_scoped_release release; + std::unique_lock lock(_mutex); + _cv.wait(lock, [this] { return _idle_handles.size() == _requests.size(); }); + py::gil_scoped_acquire acquire; + + std::vector statuses; + + for (size_t handle = 0; handle < _requests.size(); handle++) + { + statuses.push_back( + _requests[handle].Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY)); + } + + return statuses; + } + + void setDefaultCallbacks() + { + py::function f_callback; + for (size_t handle = 0; handle < _requests.size(); handle++) + { + _requests[handle].SetCompletionCallback([this, handle /* ... */]() { + py::gil_scoped_acquire acquire; + _idle_handles.push(handle); + py::gil_scoped_release release; + _cv.notify_one(); + }); + } + } + + void setCustomCallbacks(py::function f_callback) + { + for (size_t handle = 0; handle < _requests.size(); handle++) + { + _requests[handle].SetCompletionCallback([this, f_callback, handle /* ... */]() { + // Acquire GIL, execute Python function and add idle handle to queue + // release GIL afterwards + py::gil_scoped_acquire acquire; + f_callback(_user_ids[handle], handle); + _idle_handles.push(handle); + py::gil_scoped_release release; + // Notify locks in getIdleRequestId() or waitAll() functions + _cv.notify_one(); + }); + } + } + + std::vector _requests; + std::vector _user_ids; // user ID can be any Python object + std::queue _idle_handles; + std::mutex _mutex; + std::condition_variable _cv; +}; + +void regclass_InferQueue(py::module m) +{ + py::class_> cls(m, "InferQueue"); + + cls.def(py::init([](InferenceEngine::ExecutableNetwork& net, size_t jobs) { + std::vector requests; + std::queue idle_handles; + std::vector user_ids(jobs); + + for (size_t handle = 0; handle < jobs; handle++) + { + requests.push_back(net.CreateInferRequest()); + idle_handles.push(handle); + } + + return new InferQueue(requests, idle_handles, user_ids); + })); + + cls.def("infer", [](InferQueue& self, py::object user_id, const py::dict inputs) { + // getIdleRequestId function has an intention to block InferQueue + // until there is at least one idle (free to use) InferRequest + auto handle = self.getIdleRequestId(); + // Set new inputs label/id from user + self._user_ids[handle] = user_id; + // Now GIL can be released since every instruction from this point + // use unique handle + py::gil_scoped_release release; + // Update inputs of picked InferRequest + for (auto&& input : inputs) + { + auto name = input.first.cast(); + auto blob = Common::cast_to_blob(input.second); + self._requests[handle].SetBlob(name, blob); + } + // Start InferRequest in asynchronus mode + self._requests[handle].StartAsync(); + }); + + cls.def("wait_all", [](InferQueue& self) { return self.waitAll(); }); + + cls.def("set_infer_callback", + [](InferQueue& self, py::function f_callback) { self.setCustomCallbacks(f_callback); }); + + cls.def("__len__", [](InferQueue& self) { return self._requests.size(); }); + + cls.def( + "__iter__", + [](InferQueue& self) { + return py::make_iterator(self._requests.begin(), self._requests.end()); + }, + py::keep_alive<0, 1>()); /* Keep set alive while iterator is used */ + + cls.def("__getitem__", [](InferQueue& self, size_t i) { return self._requests[i]; }); +} diff --git a/ngraph/python/src/pyopenvino/inference_engine/ie_infer_queue.hpp b/ngraph/python/src/pyopenvino/inference_engine/ie_infer_queue.hpp new file mode 100644 index 00000000000000..23aa72fd072496 --- /dev/null +++ b/ngraph/python/src/pyopenvino/inference_engine/ie_infer_queue.hpp @@ -0,0 +1,10 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_InferQueue(py::module m); diff --git a/ngraph/python/src/pyopenvino/inference_engine/ie_infer_request.cpp b/ngraph/python/src/pyopenvino/inference_engine/ie_infer_request.cpp index dbb4bb0ebdafde..5de054d53f9336 100644 --- a/ngraph/python/src/pyopenvino/inference_engine/ie_infer_request.cpp +++ b/ngraph/python/src/pyopenvino/inference_engine/ie_infer_request.cpp @@ -1,7 +1,7 @@ // Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 -// +#include #include #include @@ -13,9 +13,9 @@ #include #include "pyopenvino/inference_engine/common.hpp" +#include "pyopenvino/inference_engine/ie_executable_network.hpp" #include "pyopenvino/inference_engine/ie_infer_request.hpp" #include "pyopenvino/inference_engine/ie_preprocess_info.hpp" -#include "pyopenvino/inference_engine/ie_executable_network.hpp" namespace py = pybind11; @@ -24,23 +24,37 @@ void regclass_InferRequest(py::module m) py::class_> cls( m, "InferRequest"); - cls.def("infer", &InferenceEngine::InferRequest::Infer); + cls.def("set_batch", &InferenceEngine::InferRequest::SetBatch, py::arg("size")); + cls.def("get_blob", &InferenceEngine::InferRequest::GetBlob); + + cls.def("set_blob", + [](InferenceEngine::InferRequest& self, const std::string& name, py::handle blob) { + self.SetBlob(name, Common::cast_to_blob(blob)); + }); + cls.def("set_input", [](InferenceEngine::InferRequest& self, const py::dict& inputs) { for (auto&& input : inputs) { - auto name = input.first.cast().c_str(); + auto name = input.first.cast(); auto blob = Common::cast_to_blob(input.second); self.SetBlob(name, blob); } }); + cls.def("set_output", [](InferenceEngine::InferRequest& self, const py::dict& results) { for (auto&& result : results) { - auto name = result.first.cast().c_str(); + auto name = result.first.cast(); auto blob = Common::cast_to_blob(result.second); self.SetBlob(name, blob); } }); + cls.def("infer", &InferenceEngine::InferRequest::Infer); + + cls.def("async_infer", + &InferenceEngine::InferRequest::StartAsync, + py::call_guard()); + cls.def("set_blob", [](InferenceEngine::InferRequest& self, const std::string& name, py::handle blob) { @@ -54,27 +68,40 @@ void regclass_InferRequest(py::module m) self.SetBlob(name, Common::cast_to_blob(blob)); }); - cls.def("set_batch", &InferenceEngine::InferRequest::SetBatch, py::arg("size")); + cls.def("wait", + &InferenceEngine::InferRequest::Wait, + py::arg("millis_timeout") = InferenceEngine::IInferRequest::WaitMode::RESULT_READY, + py::call_guard()); + + cls.def("set_completion_callback", + [](InferenceEngine::InferRequest* self, py::function f_callback) { + self->SetCompletionCallback([f_callback]() { + py::gil_scoped_acquire acquire; + f_callback(); + py::gil_scoped_release release; + }); + }); cls.def("get_perf_counts", [](InferenceEngine::InferRequest& self) { std::map perfMap; perfMap = self.GetPerformanceCounts(); py::dict perf_map; - for (auto it : perfMap) { + for (auto it : perfMap) + { py::dict profile_info; - switch (it.second.status) { - case InferenceEngine::InferenceEngineProfileInfo::EXECUTED: - profile_info["status"] = "EXECUTED"; - break; - case InferenceEngine::InferenceEngineProfileInfo::NOT_RUN: - profile_info["status"] = "NOT_RUN"; - break; - case InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT: - profile_info["status"] = "OPTIMIZED_OUT"; - break; - default: - profile_info["status"] = "UNKNOWN"; + switch (it.second.status) + { + case InferenceEngine::InferenceEngineProfileInfo::EXECUTED: + profile_info["status"] = "EXECUTED"; + break; + case InferenceEngine::InferenceEngineProfileInfo::NOT_RUN: + profile_info["status"] = "NOT_RUN"; + break; + case InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT: + profile_info["status"] = "OPTIMIZED_OUT"; + break; + default: profile_info["status"] = "UNKNOWN"; } profile_info["exec_type"] = it.second.exec_type; profile_info["layer_type"] = it.second.layer_type; @@ -88,21 +115,15 @@ void regclass_InferRequest(py::module m) cls.def("preprocess_info", &InferenceEngine::InferRequest::GetPreProcess, py::arg("name")); -// cls.def_property_readonly("preprocess_info", [](InferenceEngine::InferRequest& self) { -// -// }); -// cls.def_property_readonly("input_blobs", [](){ -// -// }); -// cls.def_property_readonly("output_blobs", [](){ -// -// }); - -// cls.def("wait"); -// cls.def("set_completion_callback") -// cls.def("async_infer",); -// latency - - - //&InferenceEngine::InferRequest::SetOutput); + // cls.def_property_readonly("preprocess_info", [](InferenceEngine::InferRequest& self) { + // + // }); + // cls.def_property_readonly("input_blobs", [](){ + // + // }); + // cls.def_property_readonly("output_blobs", [](){ + // + // }); + + // latency } diff --git a/ngraph/python/src/pyopenvino/inference_engine/ie_infer_request.hpp b/ngraph/python/src/pyopenvino/inference_engine/ie_infer_request.hpp index 394be38b6af54a..332c085dedd6fc 100644 --- a/ngraph/python/src/pyopenvino/inference_engine/ie_infer_request.hpp +++ b/ngraph/python/src/pyopenvino/inference_engine/ie_infer_request.hpp @@ -6,6 +6,8 @@ #include +#include + namespace py = pybind11; void regclass_InferRequest(py::module m); diff --git a/ngraph/python/src/pyopenvino/pyopenvino.cpp b/ngraph/python/src/pyopenvino/pyopenvino.cpp index 12de824cfbee9a..d822df62d1583d 100644 --- a/ngraph/python/src/pyopenvino/pyopenvino.cpp +++ b/ngraph/python/src/pyopenvino/pyopenvino.cpp @@ -1,28 +1,29 @@ // Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 -// #include #include "inference_engine/ie_blob.hpp" #include "inference_engine/ie_core.hpp" +#include "inference_engine/ie_data.hpp" #include "inference_engine/ie_executable_network.hpp" +#include "inference_engine/ie_infer_queue.hpp" #include "inference_engine/ie_infer_request.hpp" +#include "inference_engine/ie_input_info.hpp" #include "inference_engine/ie_network.hpp" -#include "inference_engine/tensor_description.hpp" -#include "inference_engine/ie_version.hpp" #include "inference_engine/ie_parameter.hpp" -#include "inference_engine/ie_input_info.hpp" -#include "inference_engine/ie_data.hpp" #include "inference_engine/ie_preprocess_info.hpp" +#include "inference_engine/ie_version.hpp" +#include "inference_engine/tensor_description.hpp" -#include #include +#include #include - +#include namespace py = pybind11; -std::string get_version() { +std::string get_version() +{ auto version = InferenceEngine::GetInferenceEngineVersion(); std::string version_str = std::to_string(version->apiVersion.major) + "."; version_str += std::to_string(version->apiVersion.minor) + "."; @@ -30,28 +31,36 @@ std::string get_version() { return version_str; } -PYBIND11_MODULE(pyopenvino, m) { - +PYBIND11_MODULE(pyopenvino, m) +{ m.doc() = "Package openvino.pyopenvino which wraps openvino C++ APIs"; m.def("get_version", &get_version); py::enum_(m, "StatusCode") - .value("OK", InferenceEngine::StatusCode::OK) - .value("GENERAL_ERROR", InferenceEngine::StatusCode::GENERAL_ERROR) - .value("NOT_IMPLEMENTED", InferenceEngine::StatusCode::NOT_IMPLEMENTED) - .value("NETWORK_NOT_LOADED", InferenceEngine::StatusCode::NETWORK_NOT_LOADED) - .value("PARAMETER_MISMATCH", InferenceEngine::StatusCode::PARAMETER_MISMATCH) - .value("NOT_FOUND", InferenceEngine::StatusCode::NOT_FOUND) - .value("OUT_OF_BOUNDS", InferenceEngine::StatusCode::OUT_OF_BOUNDS) - .value("UNEXPECTED", InferenceEngine::StatusCode::UNEXPECTED) - .value("REQUEST_BUSY", InferenceEngine::StatusCode::REQUEST_BUSY) - .value("RESULT_NOT_READY", InferenceEngine::StatusCode::RESULT_NOT_READY) - .value("NOT_ALLOCATED", InferenceEngine::StatusCode::NOT_ALLOCATED) - .value("INFER_NOT_STARTED", InferenceEngine::StatusCode::INFER_NOT_STARTED) - .value("NETWORK_NOT_READ", InferenceEngine::StatusCode::NETWORK_NOT_READ) - .export_values(); + .value("OK", InferenceEngine::StatusCode::OK) + .value("GENERAL_ERROR", InferenceEngine::StatusCode::GENERAL_ERROR) + .value("NOT_IMPLEMENTED", InferenceEngine::StatusCode::NOT_IMPLEMENTED) + .value("NETWORK_NOT_LOADED", InferenceEngine::StatusCode::NETWORK_NOT_LOADED) + .value("PARAMETER_MISMATCH", InferenceEngine::StatusCode::PARAMETER_MISMATCH) + .value("NOT_FOUND", InferenceEngine::StatusCode::NOT_FOUND) + .value("OUT_OF_BOUNDS", InferenceEngine::StatusCode::OUT_OF_BOUNDS) + .value("UNEXPECTED", InferenceEngine::StatusCode::UNEXPECTED) + .value("REQUEST_BUSY", InferenceEngine::StatusCode::REQUEST_BUSY) + .value("RESULT_NOT_READY", InferenceEngine::StatusCode::RESULT_NOT_READY) + .value("NOT_ALLOCATED", InferenceEngine::StatusCode::NOT_ALLOCATED) + .value("INFER_NOT_STARTED", InferenceEngine::StatusCode::INFER_NOT_STARTED) + .value("NETWORK_NOT_READ", InferenceEngine::StatusCode::NETWORK_NOT_READ) + .export_values(); + py::enum_(m, "WaitMode") + .value("RESULT_READY", InferenceEngine::IInferRequest::WaitMode::RESULT_READY) + .value("STATUS_ONLY", InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY) + .export_values(); regclass_IECore(m); + regclass_IENetwork(m); + + regclass_Data(m); + regclass_TensorDecription(m); // Registering template of Blob regclass_Blob(m); @@ -67,13 +76,11 @@ PYBIND11_MODULE(pyopenvino, m) { regclass_TBlob(m, "Int8"); regclass_TBlob(m, "Uint8"); - regclass_IENetwork(m); regclass_ExecutableNetwork(m); regclass_InferRequest(m); - regclass_TensorDecription(m); regclass_Version(m); regclass_Parameter(m); - regclass_Data(m); regclass_InputInfo(m); + regclass_InferQueue(m); regclass_PreProcessInfo(m); } diff --git a/ngraph/python/tests/test_inference_engine/test_blob.py b/ngraph/python/tests/test_inference_engine/test_blob.py index 3cc06103eddcdc..6ea9647ec718ce 100644 --- a/ngraph/python/tests/test_inference_engine/test_blob.py +++ b/ngraph/python/tests/test_inference_engine/test_blob.py @@ -139,8 +139,8 @@ def test_write_numpy_scalar_int64(): scalar = np.array(0, dtype=np.int64) blob = Blob(tensor_desc, scalar) scalar_to_write = np.array(1, dtype=np.int64) - blob.buffer[:] = scalar_to_write - assert np.array_equal(blob.buffer, np.atleast_1d(scalar_to_write)) + blob.buffer[()] = scalar_to_write + assert np.array_equal(blob.buffer, scalar_to_write) def test_incompatible_array_and_td(): @@ -166,4 +166,4 @@ def test_incompatible_input_precision(): with pytest.raises(ValueError) as e: Blob(tensor_desc, image) assert "Data type float64 of provided numpy array " \ - "doesn't match to TensorDesc precision FP32" in str(e.value) + "does not match TensorDesc precision FP32" in str(e.value)