Skip to content

Commit

Permalink
Extend tensor API (#15811)
Browse files Browse the repository at this point in the history
* Added some new tensor API

* Added tests on constructors

* Small changes

* Fixed tensor tests

* Fixed tests

* Added parametrized tests

* Extend tests and delete copy_to from remote tensor
  • Loading branch information
ilyachur authored Feb 22, 2023
1 parent 27ea9ea commit 893f96a
Show file tree
Hide file tree
Showing 6 changed files with 410 additions and 18 deletions.
1 change: 1 addition & 0 deletions src/core/include/openvino/core/node_output.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ class OPENVINO_API Output<Node> {
bool operator>(const Output& other) const;
bool operator<=(const Output& other) const;
bool operator>=(const Output& other) const;
operator Output<const Node>() const;

private:
std::shared_ptr<Node> m_node;
Expand Down
26 changes: 25 additions & 1 deletion src/core/include/openvino/runtime/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,23 @@ class OPENVINO_API Tensor {
*/
Tensor(const element::Type type, const Shape& shape, void* host_ptr, const Strides& strides = {});

/**
* @brief Constructs Tensor using port from node. Allocate internal host storage using default allocator
* @param port port from node
* @param allocator allocates memory for internal tensor storage
*/
Tensor(const ov::Output<const ov::Node>& port, const Allocator& allocator = {});

/**
* @brief Constructs Tensor using port from node. Wraps allocated host memory.
* @note Does not perform memory allocation internally
* @param port port from node
* @param host_ptr Pointer to pre-allocated host memory
* @param strides Optional strides parameters in bytes. Strides are supposed to be computed automatically based
* on shape and element size
*/
Tensor(const ov::Output<const ov::Node>& port, void* host_ptr, const Strides& strides = {});

/**
* @brief Constructs region of interest (ROI) tensor form another tensor.
* @note Does not perform memory allocation internally
Expand Down Expand Up @@ -143,10 +160,17 @@ class OPENVINO_API Tensor {
*/
Shape get_shape() const;

/**
* @brief Copy tensor, destination tensor should have the same element type and shape
*
* @param dst destination tensor
*/
void copy_to(ov::Tensor& dst) const;

/**
* @brief Reports whether the tensor is continuous or not
*
* @return true if blob is continuous
* @return true if tensor is continuous
*/
bool is_continuous() const;

Expand Down
4 changes: 4 additions & 0 deletions src/core/src/node_output.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,10 @@ bool Output<Node>::operator>=(const Output& other) const {
return !(*this < other);
}

Output<Node>::operator Output<const Node>() const {
return Output<const Node>(get_node(), get_index());
}

Output<const Node>::Output(const Node* node, size_t index) : m_index(index) {
OPENVINO_ASSERT(node, "Cannot create ov::Output<const ov::Node> from nullptr!");
m_node = node->shared_from_this();
Expand Down
172 changes: 155 additions & 17 deletions src/core/src/runtime/ov_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@
#include "blob_factory.hpp" // IE private header
#include "ie_ngraph_utils.hpp" // IE private header
#include "openvino/core/except.hpp"
#include "openvino/core/shape.hpp"
#include "openvino/core/strides.hpp"
#include "openvino/runtime/remote_tensor.hpp"
#include "openvino/runtime/tensor.hpp"
#include "runtime/blob_allocator.hpp"
#include "shape_util.hpp"
Expand Down Expand Up @@ -94,6 +97,17 @@ Tensor::Tensor(const Tensor& owner, const Coordinate& begin, const Coordinate& e
}
}

Tensor::Tensor(const ov::Output<const ov::Node>& port, const Allocator& allocator)
: Tensor(port.get_element_type(),
port.get_partial_shape().is_dynamic() ? ov::Shape{0} : port.get_shape(),
allocator) {}

Tensor::Tensor(const ov::Output<const ov::Node>& port, void* host_ptr, const Strides& byte_strides)
: Tensor(port.get_element_type(),
port.get_partial_shape().is_dynamic() ? ov::Shape{0} : port.get_shape(),
host_ptr,
byte_strides) {}

element::Type Tensor::get_element_type() const {
OV_TENSOR_STATEMENT(return ie::details::convertPrecision(_impl->getTensorDesc().getPrecision()));
}
Expand All @@ -113,6 +127,128 @@ Shape Tensor::get_shape() const {
OV_TENSOR_STATEMENT({ return _impl->getTensorDesc().getBlockingDesc().getBlockDims(); });
}

void Tensor::copy_to(ov::Tensor& dst) const {
OV_TENSOR_STATEMENT({
OPENVINO_ASSERT(dst, "Destination tensor was not initialized.");
OPENVINO_ASSERT(!is<ov::RemoteTensor>(), "Default copy to doesn't support copy from remote tensor.");
OPENVINO_ASSERT(!dst.is<ov::RemoteTensor>(), "Default copy to doesn't support copy to remote tensor.");
OPENVINO_ASSERT(dst.get_element_type() == get_element_type(),
"Tensor element types are not equal. (src: ",
get_element_type(),
" != dst: ",
dst.get_element_type(),
")");
if (dst.get_shape() == ov::Shape{0})
dst.set_shape(get_shape());
OPENVINO_ASSERT(dst.get_shape() == get_shape(),
"Tensor shapes are not equal. (src: ",
get_shape(),
" != dst: ",
dst.get_shape(),
")");
const auto& shape = get_shape();
auto* src_data = static_cast<const uint8_t*>(data());
auto* dst_data = static_cast<uint8_t*>(dst.data());
ov::Strides src_strides{get_byte_size()};
ov::Strides dst_strides{dst.get_byte_size()};
ov::Shape cur_pos{0};
ov::Shape max_pos{1};

if (get_element_type().bitwidth() < 8 || (get_strides() == dst.get_strides() && is_continuous())) {
// OpenVINO doesn't support strides for LP types
// or both tensors have default strides
// Strides and positions already initialized
} else {
// Tensors have default strides
const auto& type = get_element_type();
std::vector<size_t> strides(shape.size());
if (!shape.empty()) {
strides[shape.size() - 1] = 1;
}
auto size = shape.size();
for (size_t i = 1; i < size; i++) {
strides[size - i - 1] = strides[size - i] * shape[size - i];
}

ov::Strides default_strides(strides.size());
for (size_t i = 0; i < strides.size(); ++i)
default_strides[i] = strides[i] * type.size();

src_strides = get_strides();
dst_strides = dst.get_strides();

ov::Strides src_str, dst_str;

// Calculate src and dst shapes
bool found_step = false;
for (size_t i = 0; i < shape.size(); i++) {
size_t inverted_idx = shape.size() - i - 1;
if (!found_step) {
if (default_strides[inverted_idx] == src_strides[inverted_idx] &&
src_strides[inverted_idx] == dst_strides[inverted_idx]) {
continue;
} else {
found_step = true;
size_t strides_size = inverted_idx + 1;
// Set right size
src_str.resize(strides_size + 1);
dst_str.resize(strides_size + 1);
max_pos.resize(strides_size + 1);
cur_pos.resize(strides_size + 1);
// In case of default continuous strides we can copy several elements
// In other case only one element
size_t dim = 1;
size_t strides = type.size();

if (strides_size < default_strides.size()) {
strides = default_strides[strides_size];
dim = get_shape()[strides_size];
}
src_str[strides_size] = strides;
dst_str[strides_size] = strides;
max_pos[strides_size] = dim;
cur_pos[strides_size] = 0;
}
}
src_str[inverted_idx] = src_strides[inverted_idx];
dst_str[inverted_idx] = dst_strides[inverted_idx];
max_pos[inverted_idx] = shape[inverted_idx];
cur_pos[inverted_idx] = 0;
}
src_strides = src_str;
dst_strides = dst_str;
}

const auto update_index = [](const ov::Shape& pos, const ov::Shape& shape, const ov::Strides& strides) {
size_t offset = 0;

for (size_t i = 0; i < pos.size(); i++) {
offset += pos[i] * strides[i];
}
return offset;
};

bool finish = false;
for (size_t dst_idx = 0, src_idx = 0; !finish;) {
memcpy(dst_data + dst_idx, src_data + src_idx, src_strides[src_strides.size() - 1]);
// update indexes
for (size_t i = 0; i < cur_pos.size(); i++) {
size_t inverted_idx = cur_pos.size() - i - 1;
cur_pos[inverted_idx]++;
if (cur_pos[inverted_idx] != max_pos[inverted_idx]) {
break;
}
if (inverted_idx)
cur_pos[inverted_idx] = 0;
else
finish = true;
}
src_idx = update_index(cur_pos, max_pos, src_strides);
dst_idx = update_index(cur_pos, max_pos, dst_strides);
}
});
}

Strides Tensor::get_strides() const {
OPENVINO_ASSERT(get_element_type().bitwidth() >= 8,
"Could not get strides for types with bitwidths less then 8 bit. Tensor type: ",
Expand Down Expand Up @@ -174,24 +310,26 @@ Tensor::operator bool() const noexcept {
}

bool Tensor::is_continuous() const {
if (get_element_type().bitwidth() < 8)
// OpenVINO doesn't support strides for lp types
return true;
const auto& shape = get_shape();
const auto& type = get_element_type();
std::vector<size_t> strides(shape.size());
if (!shape.empty()) {
strides[shape.size() - 1] = 1;
}
auto size = shape.size();
for (size_t i = 1; i < size; i++) {
strides[size - i - 1] = strides[size - i] * shape[size - i];
}
OV_TENSOR_STATEMENT({
if (get_element_type().bitwidth() < 8)
// OpenVINO doesn't support strides for lp types
return true;
const auto& shape = get_shape();
const auto& type = get_element_type();
std::vector<size_t> strides(shape.size());
if (!shape.empty()) {
strides[shape.size() - 1] = 1;
}
auto size = shape.size();
for (size_t i = 1; i < size; i++) {
strides[size - i - 1] = strides[size - i] * shape[size - i];
}

ov::Strides byte_strides(strides.size());
for (size_t i = 0; i < strides.size(); ++i)
byte_strides[i] = strides[i] * type.size();
return byte_strides == get_strides();
ov::Strides byte_strides(strides.size());
for (size_t i = 0; i < strides.size(); ++i)
byte_strides[i] = strides[i] * type.size();
return byte_strides == get_strides();
});
}

} // namespace ov
Loading

0 comments on commit 893f96a

Please sign in to comment.