Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Async infer and Blob dispatch #17

Merged
merged 33 commits into from
Apr 2, 2021
Merged
Show file tree
Hide file tree
Changes from 27 commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
40b399f
Added async infer
Jan 15, 2021
4ebda00
Merge remote-tracking branch 'akuporos/mk/ov_pybind_poc' into mk/ov_p…
Feb 16, 2021
8b3a61c
worksave
Feb 22, 2021
b4bec46
worksave queue
Mar 4, 2021
abbc19b
no-deadlock async infer
Mar 5, 2021
cbc7c13
add lock handle in waitAll
Mar 8, 2021
8760df3
Merge remote-tracking branch 'akuporos/mk/ov_pybind_poc' into mk/ov_p…
Mar 8, 2021
61dc9de
Merge remote-tracking branch 'upstream/master' into mk/ov_pybind_poc_…
Mar 9, 2021
827fad7
fix building issues with includes
Mar 10, 2021
e2cd7f8
update of setup and cmakelist
Mar 12, 2021
ea73f2b
Merge remote-tracking branch 'akuporos/mk/ov_pybind_poc' into mk/ov_p…
Mar 15, 2021
0bd15e1
remove set from cmake
Mar 18, 2021
1451543
Update Blob class with precisions
Mar 18, 2021
9c28f11
Rewrite test_write_numpy_scalar_int64
Mar 18, 2021
f9a6c44
Generic Blob casting in infer queue
Mar 18, 2021
d724f39
worksave
Mar 19, 2021
be64417
added template for setblob
Mar 19, 2021
ecdbd2c
Added blob convert in infer request
Mar 22, 2021
f9f05af
Merge remote-tracking branch 'akuporos/mk/ov_pybind_poc' into mk/ov_p…
Mar 22, 2021
071e446
move blob casting to common namespace
Mar 23, 2021
e64162a
add user_id to callbacks
Mar 25, 2021
5ce1689
Merge remote-tracking branch 'akuporos/mk/ov_pybind_poc' into mk/ov_p…
Mar 25, 2021
c2b39e7
remove hardcoded root dir
Mar 26, 2021
1d1b8cd
refactor code and comments
Mar 26, 2021
a584074
blob refactoring
akuporos Mar 30, 2021
ac01b25
Fix sync in InferQueue and add default callbacks
Mar 31, 2021
c2e6b00
patch for protobuf cmake
Mar 31, 2021
66f3768
Merge branch 'mk/ov_pybind_poc' into mk/ov_pybind_poc_async
Mar 31, 2021
fbb9655
Merge branch 'mk/ov_pybind_poc_async' of https://github.com/jiwaszki/…
Apr 1, 2021
141add9
Merge remote-tracking branch 'akuporos/mk/ov_pybind_poc' into mk/ov_p…
Apr 1, 2021
d991366
rename to cast_to_blob in infer queue
Apr 1, 2021
dd98585
add missing cast_to_blob
Apr 1, 2021
413c1ca
change license
Apr 2, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions ngraph/python/src/openvino/inference_engine/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,14 @@
from openvino.pyopenvino import TensorDesc
from openvino.pyopenvino import get_version
from openvino.pyopenvino import StatusCode
from openvino.pyopenvino import InferQueue
from openvino.pyopenvino import Blob
from openvino.pyopenvino import PreProcessInfo
from openvino.pyopenvino import MeanVariant
from openvino.pyopenvino import ResizeAlgorithm
from openvino.pyopenvino import ColorFormat
from openvino.pyopenvino import PreProcessChannel

from openvino.inference_engine.ie_api import BlobPatch
from openvino.inference_engine.ie_api import BlobWrapper
# Patching for Blob class
Blob = BlobPatch
Blob = BlobWrapper
106 changes: 71 additions & 35 deletions ngraph/python/src/openvino/inference_engine/ie_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,51 +2,87 @@
# SPDX-License-Identifier: Apache-2.0

from openvino.pyopenvino import TBlobFloat32
from openvino.pyopenvino import TBlobFloat64
from openvino.pyopenvino import TBlobFloat16
from openvino.pyopenvino import TBlobInt64
from openvino.pyopenvino import TBlobUint64
from openvino.pyopenvino import TBlobInt32
from openvino.pyopenvino import TBlobUint32
from openvino.pyopenvino import TBlobInt16
from openvino.pyopenvino import TBlobUint16
from openvino.pyopenvino import TBlobInt8
from openvino.pyopenvino import TBlobUint8
from openvino.pyopenvino import TensorDesc

from openvino.pyopenvino import TensorDesc
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

already imported above


import numpy as np

# Patch for Blobs to dispatch types on Python side
class BlobPatch:
def __new__(cls, tensor_desc, arr : np.ndarray = None):
# TODO: create tensor_desc based on arr itself
# if tenosr_desc is not given
if arr is not None:

precision_map = {'FP32': np.float32,
'FP64': np.float64,
'FP16': np.int16,
'BF16': np.int16,
'I16': np.int16,
'I8': np.int8,
'BIN': np.int8,
'I32': np.int32,
'I64': np.int64,
'U8': np.uint8,
'BOOL': np.uint8,
'U16': np.uint16,
'U32': np.uint32,
'U64': np.uint64}

# Dispatch Blob types on Python side.
class BlobWrapper:
def __new__(cls, tensor_desc : TensorDesc, arr : np.ndarray = None):
arr_size = 0
precision = ""
if arr is not None and tensor_desc is not None:
arr = np.array(arr) # Keeping array as numpy array
size_arr = np.prod(arr.shape)
if arr is not None:
if np.isfortran(arr):
arr = arr.ravel(order="F")
else:
arr = arr.ravel(order="C")
# Return TBlob depends on numpy array dtype
# TODO: add dispatching based on tensor_desc precision value
if tensor_desc is not None and arr is None:
arr_size = np.prod(arr.shape)
tensor_desc_size = np.prod(tensor_desc.dims)
precision = tensor_desc.precision
if precision == "FP32":
return TBlobFloat32(tensor_desc)
if np.isfortran(arr):
arr = arr.ravel(order="F")
else:
raise ValueError("not supported precision")
elif tensor_desc is not None and arr is not None:
if arr.dtype in [np.float32]:
return TBlobFloat32(tensor_desc, arr, size_arr)
# elif arr.dtype in [np.float64]:
# return TBlobFloat32(tensor_desc, arr.view(dtype=np.float32), size_arr)
# elif arr.dtype in [np.int64]:
# return TBlobInt64(tensor_desc, arr, size)
# elif arr.dtype in [np.int32]:
# return TBlobInt32(tensor_desc, arr, size)
# elif arr.dtype in [np.int16]:
# return TBlobInt16(tensor_desc, arr, size)
# elif arr.dtype in [np.int8]:
# return TBlobInt8(tensor_desc, arr, size)
# elif arr.dtype in [np.uint8]:
# return TBlobUint8(tensor_desc, arr, size)
arr = arr.ravel(order="C")
if arr_size != tensor_desc_size:
raise AttributeError(f'Number of elements in provided numpy array '
f'{arr_size} and required by TensorDesc '
f'{tensor_desc_size} are not equal')
if arr.dtype != precision_map[precision]:
raise ValueError(f"Data type {arr.dtype} of provided numpy array "
f"doesn't match to TensorDesc precision {precision}")
if not arr.flags['C_CONTIGUOUS']:
arr = np.ascontiguousarray(arr)
elif arr is None and tensor_desc is not None:
arr = np.empty(0, dtype=precision_map[precision])
else:
raise AttributeError("TensorDesc can't be None")

if precision in ["FP32"]:
return TBlobFloat32(tensor_desc, arr, arr_size)
elif precision in ["FP64"]:
return TBlobFloat64(tensor_desc, arr, arr_size)
elif precision in ["FP16", "BF16"]:
return TBlobFloat16(tensor_desc, arr.view(dtype=np.int16), arr_size)
elif precision in ["I64"]:
return TBlobInt64(tensor_desc, arr, arr_size)
elif precision in ["U64"]:
return TBlobUint64(tensor_desc, arr, arr_size)
elif precision in ["I32"]:
return TBlobInt32(tensor_desc, arr, arr_size)
elif precision in ["U32"]:
return TBlobUint32(tensor_desc, arr, arr_size)
elif precision in ["I16"]:
return TBlobInt16(tensor_desc, arr, arr_size)
elif precision in ["U16"]:
return TBlobUint16(tensor_desc, arr, arr_size)
elif precision in ["I8", "BIN"]:
return TBlobInt8(tensor_desc, arr, arr_size)
elif precision in ["U8", "BOOL"]:
return TBlobUint8(tensor_desc, arr, arr_size)
else:
# TODO: raise error
return None
raise AttributeError(f'Unsupported precision {precision} for Blob')
176 changes: 106 additions & 70 deletions ngraph/python/src/pyopenvino/inference_engine/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,79 +6,87 @@

#include "common.hpp"

namespace Common {

namespace {
const std::unordered_map<int, std::string> layout_int_to_str_map = {{0, "ANY"},
{1, "NCHW"},
{2, "NHWC"},
{3, "NCDHW"},
{4, "NDHWC"},
{64, "OIHW"},
{95, "SCALAR"},
{96, "C"},
namespace Common
{
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

please, undo this style changes

namespace
{
const std::unordered_map<int, std::string> layout_int_to_str_map = {{0, "ANY"},
{1, "NCHW"},
{2, "NHWC"},
{3, "NCDHW"},
{4, "NDHWC"},
{64, "OIHW"},
{95, "SCALAR"},
{96, "C"},
{128, "CHW"},
{192, "HW"},
{193, "NC"},
{194, "CN"},
{200, "BLOCKED"}};

const std::unordered_map <std::string, InferenceEngine::Layout> layout_str_to_enum = {
{"ANY", InferenceEngine::Layout::ANY},
{"NHWC", InferenceEngine::Layout::NHWC},
{"NCHW", InferenceEngine::Layout::NCHW},
{"NCDHW", InferenceEngine::Layout::NCDHW},
{"NDHWC", InferenceEngine::Layout::NDHWC},
{"OIHW", InferenceEngine::Layout::OIHW},
{"GOIHW", InferenceEngine::Layout::GOIHW},
{"OIDHW", InferenceEngine::Layout::OIDHW},
{"GOIDHW", InferenceEngine::Layout::GOIDHW},
{"SCALAR", InferenceEngine::Layout::SCALAR},
{"C", InferenceEngine::Layout::C},
{"CHW", InferenceEngine::Layout::CHW},
{"HW", InferenceEngine::Layout::HW},
{"NC", InferenceEngine::Layout::NC},
{"CN", InferenceEngine::Layout::CN},
{"BLOCKED", InferenceEngine::Layout::BLOCKED}
};
}
const std::unordered_map<std::string, InferenceEngine::Layout> layout_str_to_enum = {
{"ANY", InferenceEngine::Layout::ANY},
{"NHWC", InferenceEngine::Layout::NHWC},
{"NCHW", InferenceEngine::Layout::NCHW},
{"NCDHW", InferenceEngine::Layout::NCDHW},
{"NDHWC", InferenceEngine::Layout::NDHWC},
{"OIHW", InferenceEngine::Layout::OIHW},
{"GOIHW", InferenceEngine::Layout::GOIHW},
{"OIDHW", InferenceEngine::Layout::OIDHW},
{"GOIDHW", InferenceEngine::Layout::GOIDHW},
{"SCALAR", InferenceEngine::Layout::SCALAR},
{"C", InferenceEngine::Layout::C},
{"CHW", InferenceEngine::Layout::CHW},
{"HW", InferenceEngine::Layout::HW},
{"NC", InferenceEngine::Layout::NC},
{"CN", InferenceEngine::Layout::CN},
{"BLOCKED", InferenceEngine::Layout::BLOCKED}};
} // namespace

InferenceEngine::Layout get_layout_from_string(const std::string &layout) {
InferenceEngine::Layout get_layout_from_string(const std::string& layout)
{
return layout_str_to_enum.at(layout);
}

const std::string &get_layout_from_enum(const InferenceEngine::Layout &layout) {
const std::string& get_layout_from_enum(const InferenceEngine::Layout& layout)
{
return layout_int_to_str_map.at(layout);
}

PyObject *parse_parameter(const InferenceEngine::Parameter &param) {
PyObject* parse_parameter(const InferenceEngine::Parameter& param)
{
// Check for std::string
if (param.is<std::string>()) {
if (param.is<std::string>())
{
return PyUnicode_FromString(param.as<std::string>().c_str());
}
// Check for int
else if (param.is<int>()) {
// Check for int
else if (param.is<int>())
{
auto val = param.as<int>();
return PyLong_FromLong((long) val);
return PyLong_FromLong((long)val);
}
// Check for unsinged int
else if (param.is<unsigned int>()) {
// Check for unsinged int
else if (param.is<unsigned int>())
{
auto val = param.as<unsigned int>();
return PyLong_FromLong((unsigned long) val);
return PyLong_FromLong((unsigned long)val);
}
// Check for float
else if (param.is<float>()) {
// Check for float
else if (param.is<float>())
{
auto val = param.as<float>();
return PyFloat_FromDouble((double) val);
return PyFloat_FromDouble((double)val);
}
// Check for bool
else if (param.is<bool>()) {
// Check for bool
else if (param.is<bool>())
{
auto val = param.as<bool>();
return val ? Py_True : Py_False;
}
// Check for std::vector<std::string>
else if (param.is < std::vector < std::string >> ()) {
auto val = param.as < std::vector < std::string >> ();
else if (param.is<std::vector<std::string>>()) {
auto val = param.as<std::vector<std::string>>();
PyObject *list = PyList_New(0);
for (const auto &it : val) {
PyObject *str_val = PyUnicode_FromString(it.c_str());
Expand All @@ -87,72 +95,100 @@ namespace Common {
return list;
}
// Check for std::vector<int>
else if (param.is < std::vector < int >> ()) {
auto val = param.as < std::vector < int >> ();
else if (param.is<std::vector<int>>()) {
auto val = param.as<std::vector<int>>();
PyObject *list = PyList_New(0);
for (const auto &it : val) {
PyList_Append(list, PyLong_FromLong(it));
}
return list;
}
// Check for std::vector<unsigned int>
else if (param.is < std::vector < unsigned int >> ()) {
auto val = param.as < std::vector < unsigned int >> ();
PyObject *list = PyList_New(0);
for (const auto &it : val) {
// Check for std::vector<unsigned int>
else if (param.is<std::vector<unsigned int>>())
{
auto val = param.as<std::vector<unsigned int>>();
PyObject* list = PyList_New(0);
for (const auto& it : val)
{
PyList_Append(list, PyLong_FromLong(it));
}
return list;
}
// Check for std::vector<float>
else if (param.is < std::vector < float >> ()) {
auto val = param.as < std::vector < float >> ();
else if (param.is<std::vector<float>>()) {
auto val = param.as<std::vector<float>>();
PyObject *list = PyList_New(0);
for (const auto &it : val) {
PyList_Append(list, PyFloat_FromDouble((double) it));
}
return list;
}
// Check for std::tuple<unsigned int, unsigned int>
else if (param.is < std::tuple < unsigned int, unsigned int >> ()) {
auto val = param.as < std::tuple < unsigned int,
unsigned int >> ();
else if (param.is<std::tuple<unsigned int, unsigned int>>()) {
auto val = param.as<std::tuple<unsigned int, unsigned int>>();
PyObject *tuple = PyTuple_New(2);
PyTuple_SetItem(tuple, 0, PyLong_FromUnsignedLong((unsigned long) std::get<0>(val)));
PyTuple_SetItem(tuple, 1, PyLong_FromUnsignedLong((unsigned long) std::get<1>(val)));
return tuple;
}
// Check for std::tuple<unsigned int, unsigned int, unsigned int>
else if (param.is < std::tuple < unsigned int, unsigned int, unsigned int >> ()) {
auto val = param.as < std::tuple < unsigned int,
unsigned int, unsigned int >> ();
else if (param.is<std::tuple<unsigned int, unsigned int, unsigned int>>()) {
auto val = param.as<std::tuple<unsigned int, unsigned int, unsigned int>>();
PyObject *tuple = PyTuple_New(3);
PyTuple_SetItem(tuple, 0, PyLong_FromUnsignedLong((unsigned long) std::get<0>(val)));
PyTuple_SetItem(tuple, 1, PyLong_FromUnsignedLong((unsigned long) std::get<1>(val)));
PyTuple_SetItem(tuple, 2, PyLong_FromUnsignedLong((unsigned long) std::get<2>(val)));
return tuple;
}
// Check for std::map<std::string, std::string>
else if (param.is < std::map < std::string, std::string >> ()) {
auto val = param.as < std::map < std::string, std::string>>();
else if (param.is <std::map<std::string, std::string>>()) {
auto val = param.as <std::map<std::string, std::string>>();
PyObject *dict = PyDict_New();
for (const auto &it : val) {
PyDict_SetItemString(dict, it.first.c_str(), PyUnicode_FromString(it.second.c_str()));
}
return dict;
}
// Check for std::map<std::string, int>
else if (param.is < std::map < std::string, int >> ()) {
auto val = param.as < std::map < std::string,
int >> ();
else if (param.is<std::map<std::string, int>>()) {
auto val = param.as<std::map<std::string, int>>();
PyObject *dict = PyDict_New();
for (const auto &it : val) {
PyDict_SetItemString(dict, it.first.c_str(), PyLong_FromLong((long) it.second));
}
return dict;
}
else
{
PyErr_SetString(PyExc_TypeError,
"Failed to convert parameter to Python representation!");
return (PyObject*)NULL;
}
}

const std::shared_ptr<InferenceEngine::Blob> convert_to_blob(const py::handle& blob) {
if (py::isinstance<InferenceEngine::TBlob<float>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<float>> &>();
} else if (py::isinstance<InferenceEngine::TBlob<double>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<double>> &>();
} else if (py::isinstance<InferenceEngine::TBlob<int8_t>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<int8_t>> &>();
} else if (py::isinstance<InferenceEngine::TBlob<int16_t>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<int16_t>> &>();
} else if (py::isinstance<InferenceEngine::TBlob<int32_t>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<int32_t>> &>();
} else if (py::isinstance<InferenceEngine::TBlob<int64_t>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<int64_t>> &>();
} else if (py::isinstance<InferenceEngine::TBlob<uint8_t>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<uint8_t>> &>();
} else if (py::isinstance<InferenceEngine::TBlob<uint16_t>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<uint16_t>> &>();
} else if (py::isinstance<InferenceEngine::TBlob<uint32_t>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<uint32_t>> &>();
} else if (py::isinstance<InferenceEngine::TBlob<uint64_t>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<uint64_t>> &>();
} else {
PyErr_SetString(PyExc_TypeError, "Failed to convert parameter to Python representation!");
return (PyObject *) NULL;
// Throw error
}
}
};
}; // namespace Common
Loading