Skip to content

Commit

Permalink
blob refactoring
Browse files Browse the repository at this point in the history
  • Loading branch information
akuporos committed Mar 30, 2021
1 parent 7382f6c commit 9421507
Show file tree
Hide file tree
Showing 8 changed files with 146 additions and 94 deletions.
4 changes: 2 additions & 2 deletions ngraph/python/src/openvino/inference_engine/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,6 @@
from openvino.pyopenvino import ColorFormat
from openvino.pyopenvino import PreProcessChannel

from openvino.inference_engine.ie_api import BlobPatch
from openvino.inference_engine.ie_api import BlobWrapper
# Patching for Blob class
Blob = BlobPatch
Blob = BlobWrapper
106 changes: 71 additions & 35 deletions ngraph/python/src/openvino/inference_engine/ie_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,51 +2,87 @@
# SPDX-License-Identifier: Apache-2.0

from openvino.pyopenvino import TBlobFloat32
from openvino.pyopenvino import TBlobFloat64
from openvino.pyopenvino import TBlobFloat16
from openvino.pyopenvino import TBlobInt64
from openvino.pyopenvino import TBlobUint64
from openvino.pyopenvino import TBlobInt32
from openvino.pyopenvino import TBlobUint32
from openvino.pyopenvino import TBlobInt16
from openvino.pyopenvino import TBlobUint16
from openvino.pyopenvino import TBlobInt8
from openvino.pyopenvino import TBlobUint8
from openvino.pyopenvino import TensorDesc


import numpy as np

# Patch for Blobs to dispatch types on Python side
class BlobPatch:
def __new__(cls, tensor_desc, arr : np.ndarray = None):
# TODO: create tensor_desc based on arr itself
# if tenosr_desc is not given
if arr is not None:

precision_map = {'FP32': np.float32,
'FP64': np.float64,
'FP16': np.int16,
'BF16': np.int16,
'I16': np.int16,
'I8': np.int8,
'BIN': np.int8,
'I32': np.int32,
'I64': np.int64,
'U8': np.uint8,
'BOOL': np.uint8,
'U16': np.uint16,
'U32': np.uint32,
'U64': np.uint64}

# Dispatch Blob types on Python side.
class BlobWrapper:
def __new__(cls, tensor_desc : TensorDesc, arr : np.ndarray = None):
arr_size = 0
precision = ""
if arr is not None and tensor_desc is not None:
arr = np.array(arr) # Keeping array as numpy array
size_arr = np.prod(arr.shape)
if arr is not None:
if np.isfortran(arr):
arr = arr.ravel(order="F")
else:
arr = arr.ravel(order="C")
# Return TBlob depends on numpy array dtype
# TODO: add dispatching based on tensor_desc precision value
if tensor_desc is not None and arr is None:
arr_size = np.prod(arr.shape)
tensor_desc_size = np.prod(tensor_desc.dims)
precision = tensor_desc.precision
if precision == "FP32":
return TBlobFloat32(tensor_desc)
if np.isfortran(arr):
arr = arr.ravel(order="F")
else:
raise ValueError("not supported precision")
elif tensor_desc is not None and arr is not None:
if arr.dtype in [np.float32]:
return TBlobFloat32(tensor_desc, arr, size_arr)
# elif arr.dtype in [np.float64]:
# return TBlobFloat32(tensor_desc, arr.view(dtype=np.float32), size_arr)
# elif arr.dtype in [np.int64]:
# return TBlobInt64(tensor_desc, arr, size)
# elif arr.dtype in [np.int32]:
# return TBlobInt32(tensor_desc, arr, size)
# elif arr.dtype in [np.int16]:
# return TBlobInt16(tensor_desc, arr, size)
# elif arr.dtype in [np.int8]:
# return TBlobInt8(tensor_desc, arr, size)
# elif arr.dtype in [np.uint8]:
# return TBlobUint8(tensor_desc, arr, size)
arr = arr.ravel(order="C")
if arr_size != tensor_desc_size:
raise AttributeError(f'Number of elements in provided numpy array '
f'{arr_size} and required by TensorDesc '
f'{tensor_desc_size} are not equal')
if arr.dtype != precision_map[precision]:
raise ValueError(f"Data type {arr.dtype} of provided numpy array "
f"doesn't match to TensorDesc precision {precision}")
if not arr.flags['C_CONTIGUOUS']:
arr = np.ascontiguousarray(arr)
elif arr is None and tensor_desc is not None:
arr = np.empty(0, dtype=precision_map[precision])
else:
raise AttributeError("TensorDesc can't be None")

if precision in ["FP32"]:
return TBlobFloat32(tensor_desc, arr, arr_size)
elif precision in ["FP64"]:
return TBlobFloat64(tensor_desc, arr, arr_size)
elif precision in ["FP16", "BF16"]:
return TBlobFloat16(tensor_desc, arr.view(dtype=np.int16), arr_size)
elif precision in ["I64"]:
return TBlobInt64(tensor_desc, arr, arr_size)
elif precision in ["U64"]:
return TBlobUint64(tensor_desc, arr, arr_size)
elif precision in ["I32"]:
return TBlobInt32(tensor_desc, arr, arr_size)
elif precision in ["U32"]:
return TBlobUint32(tensor_desc, arr, arr_size)
elif precision in ["I16"]:
return TBlobInt16(tensor_desc, arr, arr_size)
elif precision in ["U16"]:
return TBlobUint16(tensor_desc, arr, arr_size)
elif precision in ["I8", "BIN"]:
return TBlobInt8(tensor_desc, arr, arr_size)
elif precision in ["U8", "BOOL"]:
return TBlobUint8(tensor_desc, arr, arr_size)
else:
# TODO: raise error
return None
raise AttributeError(f'Unsupported precision '
f'{precision} for Blob')
58 changes: 41 additions & 17 deletions ngraph/python/src/pyopenvino/inference_engine/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,8 @@ namespace Common {
return val ? Py_True : Py_False;
}
// Check for std::vector<std::string>
else if (param.is < std::vector < std::string >> ()) {
auto val = param.as < std::vector < std::string >> ();
else if (param.is<std::vector<std::string>>()) {
auto val = param.as<std::vector<std::string>>();
PyObject *list = PyList_New(0);
for (const auto &it : val) {
PyObject *str_val = PyUnicode_FromString(it.c_str());
Expand All @@ -87,8 +87,8 @@ namespace Common {
return list;
}
// Check for std::vector<int>
else if (param.is < std::vector < int >> ()) {
auto val = param.as < std::vector < int >> ();
else if (param.is<std::vector<int>>()) {
auto val = param.as<std::vector<int>>();
PyObject *list = PyList_New(0);
for (const auto &it : val) {
PyList_Append(list, PyLong_FromLong(it));
Expand All @@ -105,46 +105,43 @@ namespace Common {
return list;
}
// Check for std::vector<float>
else if (param.is < std::vector < float >> ()) {
auto val = param.as < std::vector < float >> ();
else if (param.is<std::vector<float>>()) {
auto val = param.as<std::vector<float>>();
PyObject *list = PyList_New(0);
for (const auto &it : val) {
PyList_Append(list, PyFloat_FromDouble((double) it));
}
return list;
}
// Check for std::tuple<unsigned int, unsigned int>
else if (param.is < std::tuple < unsigned int, unsigned int >> ()) {
auto val = param.as < std::tuple < unsigned int,
unsigned int >> ();
else if (param.is<std::tuple<unsigned int, unsigned int>>()) {
auto val = param.as<std::tuple<unsigned int, unsigned int>>();
PyObject *tuple = PyTuple_New(2);
PyTuple_SetItem(tuple, 0, PyLong_FromUnsignedLong((unsigned long) std::get<0>(val)));
PyTuple_SetItem(tuple, 1, PyLong_FromUnsignedLong((unsigned long) std::get<1>(val)));
return tuple;
}
// Check for std::tuple<unsigned int, unsigned int, unsigned int>
else if (param.is < std::tuple < unsigned int, unsigned int, unsigned int >> ()) {
auto val = param.as < std::tuple < unsigned int,
unsigned int, unsigned int >> ();
else if (param.is<std::tuple<unsigned int, unsigned int, unsigned int>>()) {
auto val = param.as<std::tuple<unsigned int, unsigned int, unsigned int>>();
PyObject *tuple = PyTuple_New(3);
PyTuple_SetItem(tuple, 0, PyLong_FromUnsignedLong((unsigned long) std::get<0>(val)));
PyTuple_SetItem(tuple, 1, PyLong_FromUnsignedLong((unsigned long) std::get<1>(val)));
PyTuple_SetItem(tuple, 2, PyLong_FromUnsignedLong((unsigned long) std::get<2>(val)));
return tuple;
}
// Check for std::map<std::string, std::string>
else if (param.is < std::map < std::string, std::string >> ()) {
auto val = param.as < std::map < std::string, std::string>>();
else if (param.is <std::map<std::string, std::string>>()) {
auto val = param.as <std::map<std::string, std::string>>();
PyObject *dict = PyDict_New();
for (const auto &it : val) {
PyDict_SetItemString(dict, it.first.c_str(), PyUnicode_FromString(it.second.c_str()));
}
return dict;
}
// Check for std::map<std::string, int>
else if (param.is < std::map < std::string, int >> ()) {
auto val = param.as < std::map < std::string,
int >> ();
else if (param.is<std::map<std::string, int>>()) {
auto val = param.as<std::map<std::string, int>>();
PyObject *dict = PyDict_New();
for (const auto &it : val) {
PyDict_SetItemString(dict, it.first.c_str(), PyLong_FromLong((long) it.second));
Expand All @@ -155,4 +152,31 @@ namespace Common {
return (PyObject *) NULL;
}
}

const std::shared_ptr<InferenceEngine::Blob> convert_to_blob(const py::handle& blob) {
if (py::isinstance<InferenceEngine::TBlob<float>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<float>> &>();
} else if (py::isinstance<InferenceEngine::TBlob<double>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<double>> &>();
} else if (py::isinstance<InferenceEngine::TBlob<int8_t>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<int8_t>> &>();
} else if (py::isinstance<InferenceEngine::TBlob<int16_t>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<int16_t>> &>();
} else if (py::isinstance<InferenceEngine::TBlob<int32_t>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<int32_t>> &>();
} else if (py::isinstance<InferenceEngine::TBlob<int64_t>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<int64_t>> &>();
} else if (py::isinstance<InferenceEngine::TBlob<uint8_t>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<uint8_t>> &>();
} else if (py::isinstance<InferenceEngine::TBlob<uint16_t>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<uint16_t>> &>();
} else if (py::isinstance<InferenceEngine::TBlob<uint32_t>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<uint32_t>> &>();
} else if (py::isinstance<InferenceEngine::TBlob<uint64_t>>(blob)) {
return blob.cast<const std::shared_ptr<InferenceEngine::TBlob<uint64_t>> &>();
} else {
// Throw error
}
}

};
8 changes: 7 additions & 1 deletion ngraph/python/src/pyopenvino/inference_engine/common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,21 @@

#pragma once

#include <pybind11/pybind11.h>
#include <string>
#include "Python.h"
#include <ie_parameter.hpp>
#include "ie_common.h"
#include <ie_blob.h>

namespace py = pybind11;

namespace Common {
InferenceEngine::Layout get_layout_from_string(const std::string &layout);

const std::string& get_layout_from_enum(const InferenceEngine::Layout &layout);

PyObject *parse_parameter(const InferenceEngine::Parameter &param);
};

const std::shared_ptr<InferenceEngine::Blob> convert_to_blob(const py::handle& blob);
}; // namespace Common
21 changes: 3 additions & 18 deletions ngraph/python/src/pyopenvino/inference_engine/ie_blob.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,28 +26,13 @@ void regclass_TBlob(py::module m, std::string typestring) {
py::class_<InferenceEngine::TBlob<T>, std::shared_ptr<InferenceEngine::TBlob<T>>> cls(
m, pyclass_name);

cls.def(py::init([](const InferenceEngine::TensorDesc& tensorDesc) {
return std::make_shared<InferenceEngine::TBlob<T>>(tensorDesc);
}));

cls.def(py::init([](const InferenceEngine::TensorDesc& tensorDesc, py::array_t<T> arr) {
auto size = arr.size(); // or copy from tensorDesc getDims product?
// py::print(arr.dtype()); // validate tensorDesc with this???
// assert arr.size() == TensorDesc.getDims().product? ???
T* ptr = const_cast<T*>(arr.data(0)); // Note: obligatory removal of const!
return std::make_shared<InferenceEngine::TBlob<T>>(tensorDesc, ptr, size);
}));

cls.def(py::init(
[](const InferenceEngine::TensorDesc& tensorDesc, py::array_t<T>& arr, size_t size = 0) {
if (size == 0) {
size = arr.size(); // or copy from tensorDesc getDims product?
}
auto blob = InferenceEngine::make_shared_blob<T>(tensorDesc);
blob->allocate();
std::copy(arr.data(0), arr.data(0) + size, blob->rwmap().template as<T*>());
// py::print(arr.dtype()); // validate tensorDesc with this???
// assert arr.size() == TensorDesc.getDims().product?10 ???
if (size != 0) {
std::copy(arr.data(0), arr.data(0) + size, blob->rwmap().template as<T *>());
}
return blob;
}));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include <cpp/ie_infer_request.hpp>
#include <ie_common.h>

#include "pyopenvino/inference_engine/common.hpp"
#include "pyopenvino/inference_engine/ie_infer_request.hpp"
#include "pyopenvino/inference_engine/ie_preprocess_info.hpp"
#include "pyopenvino/inference_engine/ie_executable_network.hpp"
Expand All @@ -28,29 +29,29 @@ void regclass_InferRequest(py::module m)
cls.def("set_input", [](InferenceEngine::InferRequest& self, const py::dict& inputs) {
for (auto&& input : inputs) {
auto name = input.first.cast<std::string>().c_str();
const std::shared_ptr<InferenceEngine::TBlob<float>>& blob = input.second.cast<const std::shared_ptr<InferenceEngine::TBlob<float>>&>();
auto blob = Common::convert_to_blob(input.second);
self.SetBlob(name, blob);
}
});
cls.def("set_output", [](InferenceEngine::InferRequest& self, const py::dict& results) {
for (auto&& result : results) {
auto name = result.first.cast<std::string>().c_str();
const std::shared_ptr<InferenceEngine::TBlob<float>>& blob = result.second.cast<const std::shared_ptr<InferenceEngine::TBlob<float>>&>();
auto blob = Common::convert_to_blob(result.second);
self.SetBlob(name, blob);
}
});

cls.def("set_blob", [](InferenceEngine::InferRequest& self,
const std::string& name,
const InferenceEngine::TBlob<float>::Ptr& blob) {
self.SetBlob(name, blob);
py::handle blob) {
self.SetBlob(name, Common::convert_to_blob(blob));
});

cls.def("set_blob", [](InferenceEngine::InferRequest& self,
const std::string& name,
const InferenceEngine::TBlob<float>::Ptr& blob,
py::handle blob,
const InferenceEngine::PreProcessInfo& info) {
self.SetBlob(name, blob);
self.SetBlob(name, Common::convert_to_blob(blob));
});

cls.def("set_batch", &InferenceEngine::InferRequest::SetBatch, py::arg("size"));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include <pybind11/stl.h>

#include "pyopenvino/inference_engine/ie_preprocess_info.hpp"
#include "pyopenvino/inference_engine/common.hpp"

#include <ie_preprocess.hpp>
#include <ie_common.h>
Expand All @@ -27,13 +28,13 @@ void regclass_PreProcessInfo(py::module m) {
cls.def("get_number_of_channels", &InferenceEngine::PreProcessInfo::getNumberOfChannels);
cls.def("init", &InferenceEngine::PreProcessInfo::init);
cls.def("set_mean_image", [](InferenceEngine::PreProcessInfo& self,
const InferenceEngine::TBlob<float>::Ptr& meanImage) {
self.setMeanImage(meanImage);
py::handle meanImage) {
self.setMeanImage(Common::convert_to_blob(meanImage));
});
cls.def("set_mean_image_for_channel", [](InferenceEngine::PreProcessInfo& self,
const InferenceEngine::TBlob<float>::Ptr& meanImage,
py::handle meanImage,
const size_t channel) {
self.setMeanImageForChannel(meanImage, channel);
self.setMeanImageForChannel(Common::convert_to_blob(meanImage), channel);
});
cls.def_property("mean_variant", &InferenceEngine::PreProcessInfo::getMeanVariant,
&InferenceEngine::PreProcessInfo::setVariant);
Expand Down
21 changes: 10 additions & 11 deletions ngraph/python/src/pyopenvino/pyopenvino.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,21 +53,20 @@ PYBIND11_MODULE(pyopenvino, m) {

regclass_IECore(m);

// GeneralBlob
// Registering template of Blob
regclass_Blob(m);
// Specific type Blobs
// Registering specific types of Blobs
regclass_TBlob<float>(m, "Float32");
regclass_TBlob<double>(m, "Float64");
regclass_TBlob<short>(m, "Float16");
regclass_TBlob<int64_t>(m, "Int64");
regclass_TBlob<uint64_t>(m, "Uint64");
regclass_TBlob<int32_t>(m, "Int32");
regclass_TBlob<uint32_t>(m, "Uint32");
regclass_TBlob<int16_t>(m, "Int16");
regclass_TBlob<uint16_t>(m, "Uint16");
regclass_TBlob<int8_t>(m, "Int8");
regclass_TBlob<uint8_t>(m, "Uint8");
regclass_TBlob<int16_t>(m, "Int16");
// regclass_TBlob<uint16_t>(m, "Uint16");
regclass_TBlob<int32_t>(m, "Int32");
// regclass_TBlob<uint32_t>(m, "Uint32");
regclass_TBlob<long>(m, "Int64");
// regclass_TBlob<unsigned long>(m, "UInt64");
// regclass_TBlob<long long>(m);
// regclass_TBlob<unsigned long long>(m);
// regclass_TBlob<double>(m);

regclass_IENetwork(m);
regclass_ExecutableNetwork(m);
Expand Down

0 comments on commit 9421507

Please sign in to comment.