Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Removed runtime::Tensor alias #22429

Merged
merged 5 commits into from
Jan 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 1 addition & 7 deletions src/core/include/openvino/runtime/allocator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@

#include "openvino/core/any.hpp"
#include "openvino/core/core_visibility.hpp"
#include "openvino/core/deprecated.hpp"

namespace ov {

class Tensor;

/**
Expand Down Expand Up @@ -158,10 +158,4 @@ class OPENVINO_API Allocator {
explicit operator bool() const noexcept;
};

OPENVINO_SUPPRESS_DEPRECATED_START
namespace runtime {
using ov::Allocator;
} // namespace runtime
OPENVINO_SUPPRESS_DEPRECATED_END

} // namespace ov
5 changes: 0 additions & 5 deletions src/core/include/openvino/runtime/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -259,9 +259,4 @@ class OPENVINO_API Tensor {
*/
using TensorVector = std::vector<Tensor>;

namespace runtime {
using ov::Tensor;
using ov::TensorVector;
} // namespace runtime

} // namespace ov
2 changes: 1 addition & 1 deletion src/core/src/preprocess/preprocess_impls.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ class InputTensorInfo::InputTensorInfoImpl : public TensorInfoImplBase {
m_shape_set = true;
}

void set_from(const ov::runtime::Tensor& runtime_tensor) {
void set_from(const ov::Tensor& runtime_tensor) {
set_shape(runtime_tensor.get_shape());
set_element_type(runtime_tensor.get_element_type());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ class GatherLayerTestCPU : public testing::WithParamInterface<GatherLayerTestCPU

for (size_t i = 0; i < funcInputs.size(); ++i) {
const auto& funcInput = funcInputs[i];
ov::runtime::Tensor tensor;
ov::Tensor tensor;
ov::test::utils::InputGenerateData in_data;

if (funcInput.get_node()->get_friendly_name() == "data") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ class GridSampleLayerTestCPU : public testing::WithParamInterface<GridSampleLaye

for (size_t i = 0; i < funcInputs.size(); ++i) {
const auto& funcInput = funcInputs[i];
ov::runtime::Tensor tensor;
ov::Tensor tensor;
ov::test::utils::InputGenerateData in_data;

if (funcInput.get_node()->get_friendly_name() == "data") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ class NmsLayerCPUTest : public testing::WithParamInterface<NmsParams>, virtual p
auto node = funcInputs[2].get_node_shared_ptr();
auto it = inputs.find(node);
if (it == inputs.end()) return;
auto tensor = ov::runtime::Tensor(node->get_element_type(), targetInputStaticShapes[2], &maxOutBoxesPerClass);
auto tensor = ov::Tensor(node->get_element_type(), targetInputStaticShapes[2], &maxOutBoxesPerClass);
inputs[node] = tensor;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,21 +152,21 @@ class RDFTTestCPU : public testing::WithParamInterface<std::tuple<ov::element::T
const auto& funcInputs = function->inputs();
auto funcInput = funcInputs.begin();
inputs.clear();
runtime::Tensor data_tensor = test::utils::create_and_fill_tensor_normal_distribution(funcInput->get_element_type(),
ov::Tensor data_tensor = test::utils::create_and_fill_tensor_normal_distribution(funcInput->get_element_type(),
targetInputStaticShapes[0], 0, 1, 0);

inputs.insert({funcInput->get_node_shared_ptr(), data_tensor});
funcInput++;
if (!constAxes && funcInput != funcInputs.end()) {
ASSERT_TRUE(inputIdx < axes.size());
auto tensor = ov::runtime::Tensor{funcInput->get_element_type(), Shape{axes[inputIdx].size()}};
auto tensor = ov::Tensor{funcInput->get_element_type(), Shape{axes[inputIdx].size()}};
std::memcpy(tensor.data(), axes[inputIdx].data(), axes[inputIdx].size() * sizeof(axes[0][0]));
inputs.insert({funcInput->get_node_shared_ptr(), tensor});
funcInput++;
}
if (!constSignalSizes && funcInput != funcInputs.end()) {
ASSERT_TRUE(inputIdx < signalSizes.size());
auto tensor = ov::runtime::Tensor{funcInput->get_element_type(), Shape{signalSizes[inputIdx].size()}};
auto tensor = ov::Tensor{funcInput->get_element_type(), Shape{signalSizes[inputIdx].size()}};
std::memcpy(tensor.data(), signalSizes[inputIdx].data(), signalSizes[inputIdx].size() * sizeof(signalSizes[0][0]));
inputs.insert({funcInput->get_node_shared_ptr(), tensor});
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,11 +83,11 @@ class ShapeOpsCPUTest : public testing::WithParamInterface<shapeOpsParams>,
const auto& funcInputs = function->inputs();
for (size_t i = 0; i < funcInputs.size(); ++i) {
const auto& funcInput = funcInputs[i];
ov::runtime::Tensor tensor;
ov::Tensor tensor;
if (i == 1) {
#define RESHAPE_TEST_CASE(INT_TYPE) \
case ov::element::Type_t::INT_TYPE: { \
tensor = ov::runtime::Tensor{ov::element::INT_TYPE, targetInputStaticShapes[i]}; \
tensor = ov::Tensor{ov::element::INT_TYPE, targetInputStaticShapes[i]}; \
auto inputData = tensor.data<ov::element_type_traits<ov::element::INT_TYPE>::value_type>(); \
ASSERT_TRUE(idx < data.size()); \
for (size_t j = 0lu; j < data[idx].size(); ++j) { \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ class UniqueLayerTestCPU : public testing::WithParamInterface<UniqueLayerTestCPU

for (size_t i = 0; i < funcInputs.size(); ++i) {
const auto& funcInput = funcInputs[i];
ov::runtime::Tensor tensor;
ov::Tensor tensor;

if (funcInput.get_node()->get_friendly_name() == "data") {
int32_t range = std::accumulate(targetInputStaticShapes[0].begin(), targetInputStaticShapes[0].end(), 1, std::multiplies<size_t>());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,9 @@ class InPlaceReshapeFromConstantCheck : public SubgraphBaseTest {
const auto& funcInputs = function->inputs();
for (size_t i = 0; i < funcInputs.size(); ++i) {
const auto& funcInput = funcInputs[i];
ov::runtime::Tensor tensor;
ov::Tensor tensor;
if (i == 1) {
tensor = ov::runtime::Tensor{ov::element::i32, targetInputStaticShapes[i]};
tensor = ov::Tensor{ov::element::i32, targetInputStaticShapes[i]};
auto inputData = tensor.data<ov::element_type_traits<ov::element::i32>::value_type>();
const std::vector<unsigned> data = {38, 38, 15, 4};
for (size_t j = 0lu; j < data.size(); ++j) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ class GridSampleLayerTestGPU : public testing::WithParamInterface<GridSampleLaye

for (size_t i = 0; i < funcInputs.size(); ++i) {
const auto& funcInput = funcInputs[i];
ov::runtime::Tensor tensor;
ov::Tensor tensor;
ov::test::utils::InputGenerateData in_data;

if (funcInput.get_node()->get_friendly_name() == "data") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ class NmsLayerGPUTest : public testing::WithParamInterface<NmsLayerTestParams>,
auto node = funcInputs[2].get_node_shared_ptr();
auto it = inputs.find(node);
if (it == inputs.end()) return;
auto tensor = ov::runtime::Tensor(node->get_element_type(), targetInputStaticShapes[2], &maxOutBoxesPerClass);
auto tensor = ov::Tensor(node->get_element_type(), targetInputStaticShapes[2], &maxOutBoxesPerClass);
inputs[node] = tensor;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ class EmptyTensorDynamicGPUTest : public testing::WithParamInterface<emptyTensor
const auto& funcInputs = function->inputs();
for (size_t i = 0; i < funcInputs.size(); ++i) {
auto node = funcInputs[i].get_node_shared_ptr();
auto tensor = ov::runtime::Tensor(node->get_element_type(), targetInputStaticShapes[i]);
auto tensor = ov::Tensor(node->get_element_type(), targetInputStaticShapes[i]);
if (i == 0) {
// All zero inputs for non_zero op
auto tensor_ptr = static_cast<int32_t*>(tensor.data());
Expand Down
16 changes: 8 additions & 8 deletions src/plugins/template/backend/executable.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,16 +19,16 @@ class Executable {
Executable();
virtual ~Executable();

/// \param outputs vector of runtime::Tensor used as outputs
/// \param inputs vector of runtime::Tensor used as inputs
/// \param outputs vector of Tensor used as outputs
/// \param inputs vector of Tensor used as inputs
/// \param collect_performance Enable per operation performance statistic
/// \returns true if iteration is successful, false otherwise
virtual bool call(std::vector<ov::Tensor>& outputs,
const std::vector<ov::Tensor>& inputs,
bool collect_performance = false) = 0;

/// \param outputs vector of runtime::Tensor used as outputs
/// \param inputs vector of runtime::Tensor used as inputs
/// \param outputs vector of Tensor used as outputs
/// \param inputs vector of Tensor used as inputs
/// \param context Evaluation context
/// \param collect_performance Enable per operation performance statistic
/// \returns true if iteration is successful, false otherwise
Expand All @@ -41,14 +41,14 @@ class Executable {
virtual void cancel() = 0;

/// \brief Executes a single iteration of a Function.
/// \param outputs vector of runtime::Tensor used as outputs
/// \param inputs vector of runtime::Tensor used as inputs
/// \param outputs vector of Tensor used as outputs
/// \param inputs vector of Tensor used as inputs
/// \returns true if iteration is successful, false otherwise
bool call_with_validate(std::vector<ov::Tensor>& outputs, const std::vector<ov::Tensor>& inputs);

/// \brief Validates a Function.
/// \param outputs vector of runtime::Tensor used as outputs
/// \param inputs vector of runtime::Tensor used as inputs
/// \param outputs vector of Tensor used as outputs
/// \param inputs vector of Tensor used as inputs
void validate(const std::vector<ov::Tensor>& outputs, const std::vector<ov::Tensor>& inputs);

/// \brief Query the input Parameters
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,8 @@ struct InterpolateV1Params {
Shape outShape;
element::Type inType;
element::Type outType;
runtime::Tensor inData;
runtime::Tensor outData;
ov::Tensor inData;
ov::Tensor outData;
std::shared_ptr<op::v0::Constant> outShapeInput;
op::v0::Interpolate::Attributes attrs;
};
Expand Down Expand Up @@ -82,8 +82,8 @@ struct InterpolateV4Params {
Shape outShape;
element::Type inType;
element::Type outType;
runtime::Tensor inData;
runtime::Tensor outData;
ov::Tensor inData;
ov::Tensor outData;
std::vector<size_t> outShapeInput;
element::Type outShapeInputType;
std::vector<float> scales;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ struct ReadValueAssignParams {
Shape m_output_shape;
element::Type m_input_type;
element::Type m_output_type;
runtime::Tensor m_input_data;
runtime::Tensor m_expected_data;
ov::Tensor m_input_data;
ov::Tensor m_expected_data;
std::string m_variable_id;
};

Expand Down Expand Up @@ -245,8 +245,8 @@ struct MemoryTestParams {
ov::Shape m_output_shape;
ov::element::Type m_input_type;
ov::element::Type m_output_type;
ov::runtime::Tensor m_input_data;
ov::runtime::Tensor m_expected_data;
ov::Tensor m_input_data;
ov::Tensor m_expected_data;
std::vector<std::string> m_variable_id;
size_t m_count_runs;
size_t m_reset_on_run;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,8 @@ struct RegionYoloParams {
ov::PartialShape inputShape;
ov::element::Type inType;
ov::element::Type outType;
ov::runtime::Tensor inputData;
ov::runtime::Tensor refData;
ov::Tensor inputData;
ov::Tensor refData;
std::string testcaseName;
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ std::vector<ov::Tensor> ReadIRTest::calculate_refs() {

size_t pos = 0;
for (const auto& output : functionRefs->outputs()) {
auto out_tensor = ov::runtime::Tensor(output.get_element_type(), output.get_shape(), &ref_buffer[pos]);
auto out_tensor = ov::Tensor(output.get_element_type(), output.get_shape(), &ref_buffer[pos]);
pos += out_tensor.get_byte_size();
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class OVInferRequestDynamicTests : public testing::WithParamInterface<OVInferReq

protected:
void SetUp() override;
bool checkOutput(const ov::runtime::Tensor& in, const ov::runtime::Tensor& actual);
bool checkOutput(const ov::Tensor& in, const ov::Tensor& actual);

std::shared_ptr<ov::Core> ie = utils::PluginCache::get().core();
std::shared_ptr<Model> function;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ struct OVIterationChaining : public OVInferRequestTests {

private:
static std::shared_ptr<ov::Model> getIterativeFunction();
bool checkOutput(const ov::runtime::Tensor& in, const ov::runtime::Tensor& actual);
bool checkOutput(const ov::Tensor& in, const ov::Tensor& actual);
};

} // namespace behavior
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ void OVInferRequestDynamicTests::SetUp() {
APIBaseTest::SetUp();
}

bool OVInferRequestDynamicTests::checkOutput(const ov::runtime::Tensor& in, const ov::runtime::Tensor& actual) {
bool OVInferRequestDynamicTests::checkOutput(const ov::Tensor& in, const ov::Tensor& actual) {
bool result = true;
auto net = ie->compile_model(function, ov::test::utils::DEVICE_TEMPLATE);
ov::InferRequest req;
Expand Down Expand Up @@ -96,7 +96,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetwork) {
ov::test::utils::InputGenerateData in_data;
in_data.start_from = -50;
in_data.range = 100;
ov::runtime::Tensor inTensor = ov::test::utils::create_and_fill_tensor(element::f32, shape, in_data);
ov::Tensor inTensor = ov::test::utils::create_and_fill_tensor(element::f32, shape, in_data);
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
OV_ASSERT_NO_THROW(req.set_tensor("input_tensor", inTensor));
OV_ASSERT_NO_THROW(req.infer());
Expand All @@ -115,7 +115,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkSetUnexpectedOutputTensorB
auto execNet = ie->compile_model(function, target_device, configuration);
// Create InferRequest
ov::InferRequest req;
ov::runtime::Tensor tensor, otensor;
ov::Tensor tensor, otensor;
const std::string outputname = function->outputs().back().get_any_name();
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
ov::test::utils::InputGenerateData in_data;
Expand Down Expand Up @@ -144,7 +144,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkSetOutputTensorPreAllocate
auto execNet = ie->compile_model(function, target_device, configuration);
// Create InferRequest
ov::InferRequest req;
ov::runtime::Tensor tensor;
ov::Tensor tensor;
const std::string outputname = function->outputs().back().get_any_name();
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
ov::test::utils::InputGenerateData in_data;
Expand All @@ -153,7 +153,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkSetOutputTensorPreAllocate
tensor = ov::test::utils::create_and_fill_tensor(element::f32, refShape, in_data);
OV_ASSERT_NO_THROW(req.set_tensor("input_tensor", tensor));
float ptr[5000];
ov::runtime::Tensor otensor(element::f32, refOutShape, ptr);
ov::Tensor otensor(element::f32, refOutShape, ptr);
OV_ASSERT_NO_THROW(req.set_tensor(outputname, otensor));
OV_ASSERT_NO_THROW(req.infer());
ASSERT_EQ(req.get_tensor(outputname).data<float>(), ptr);
Expand All @@ -172,7 +172,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkSetOutputShapeBeforeInfer)
auto execNet = ie->compile_model(function, target_device, configuration);
// Create InferRequest
ov::InferRequest req;
ov::runtime::Tensor tensor, otensor;
ov::Tensor tensor, otensor;
const std::string outputname = function->outputs().back().get_any_name();
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
ov::test::utils::InputGenerateData in_data;
Expand All @@ -199,7 +199,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkGetOutputThenSetOutputTens
auto execNet = ie->compile_model(function, target_device, configuration);
// Create InferRequest
ov::InferRequest req;
ov::runtime::Tensor tensor;
ov::Tensor tensor;
const std::string outputname = function->outputs().back().get_any_name();
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
ov::test::utils::InputGenerateData in_data;
Expand All @@ -213,7 +213,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkGetOutputThenSetOutputTens
ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputname)));
// then, set output tensor
float ptr[5000];
ov::runtime::Tensor otensor(element::f32, refOutShape, ptr);
ov::Tensor otensor(element::f32, refOutShape, ptr);
OV_ASSERT_NO_THROW(req.set_tensor(outputname, otensor));
OV_ASSERT_NO_THROW(req.infer());
ASSERT_EQ(req.get_tensor(outputname).data<float>(), ptr);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ TEST_P(OVInferRequestIOTensorTest, InferStaticNetworkSetChangedInputTensorThrow)
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
OV_ASSERT_NO_THROW(req.infer());
// Get input_tensor
ov::runtime::Tensor tensor;
ov::Tensor tensor;
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
// Set shape
OV_ASSERT_NO_THROW(tensor.set_shape(shape2));
Expand All @@ -258,7 +258,7 @@ TEST_P(OVInferRequestIOTensorTest, InferStaticNetworkSetChangedOutputTensorThrow
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
OV_ASSERT_NO_THROW(req.infer());
// Get output_tensor
ov::runtime::Tensor tensor;
ov::Tensor tensor;
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->outputs().back().get_any_name()););
// Set shape
OV_ASSERT_NO_THROW(tensor.set_shape(shape2));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ void OVIterationChaining::TearDown() {
OVInferRequestTests::TearDown();
}

bool OVIterationChaining::checkOutput(const ov::runtime::Tensor& in, const ov::runtime::Tensor& actual) {
bool OVIterationChaining::checkOutput(const ov::Tensor& in, const ov::Tensor& actual) {
bool result = true;
auto net = core->compile_model(function, ov::test::utils::DEVICE_TEMPLATE);
ov::InferRequest req;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ namespace utils {
using CompareMap = std::map<ov::NodeTypeInfo, std::function<void(
const std::shared_ptr<ov::Node> &node,
size_t port,
const ov::runtime::Tensor &expected,
const ov::runtime::Tensor &actual,
const ov::Tensor &expected,
const ov::Tensor &actual,
double absThreshold,
double relThreshold)>>;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ void reset_const_ranges();

std::vector<uint8_t> color_test_image(size_t height, size_t width, int b_step, ov::preprocess::ColorFormat format);

using InputsMap = std::map<ov::NodeTypeInfo, std::function<ov::runtime::Tensor(
using InputsMap = std::map<ov::NodeTypeInfo, std::function<ov::Tensor(
const std::shared_ptr<ov::Node>& node,
size_t port,
const ov::element::Type& elemType,
Expand Down
Loading
Loading