diff --git a/docs/template_plugin/tests/functional/op_reference/not_equal.cpp b/docs/template_plugin/tests/functional/op_reference/not_equal.cpp new file mode 100644 index 00000000000000..e0292ecc67a4be --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/not_equal.cpp @@ -0,0 +1,123 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include + +#include "comparison.hpp" + +using namespace ngraph; +using namespace InferenceEngine; +using ComparisonTypes = ngraph::helpers::ComparisonTypes; + + +namespace reference_tests { +namespace ComparisonOpsRefTestDefinitions { +namespace { + +template +std::vector generateComparisonParams(const element::Type& type) { + using T = typename element_type_traits::value_type; + std::vector compParams { + // 1D // 2D // 3D // 4D + Builder {} + .compType(ComparisonTypes::NOT_EQUAL) + .input1({{2, 2}, type, std::vector {1, 0, 10, 255}}) + .input2({{2, 2}, type, std::vector {1, 0, 10, 255}}) + .expected({{2, 2}, element::boolean, std::vector {0, 0, 0, 0}}), + Builder {} + .compType(ComparisonTypes::NOT_EQUAL) + .input1({{2, 3}, type, std::vector {0, 15, 45, 10, 5, 10}}) + .input2({{2, 3}, type, std::vector {1, 15, 5, 10, 50, 10}}) + .expected({{2, 3}, element::boolean, std::vector {1, 0, 1, 0, 1, 0}}), + Builder {} + .compType(ComparisonTypes::NOT_EQUAL) + .input1({{1}, type, std::vector {20}}) + .input2({{1}, type, std::vector {10}}) + .expected({{1}, element::boolean, std::vector {1}}), + Builder {} + .compType(ComparisonTypes::NOT_EQUAL) + .input1({{2, 4}, type, std::vector {0, 12, 23, 0, 1, 5, 12, 8}}) + .input2({{2, 4}, type, std::vector {0, 12, 23, 0, 10, 5, 11, 8}}) + .expected({{2, 4}, element::boolean, std::vector {0, 0, 0, 0, 1, 0, 1, 0}}), + Builder {} + .compType(ComparisonTypes::NOT_EQUAL) + .input1({{3, 1, 2}, type, std::vector {2, 7, 4, 7, 3, 7}}) + .input2({{1, 2, 1}, type, std::vector {7, 7}}) + .expected({{3, 2, 2}, element::boolean, std::vector {1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0}}), + Builder {} + .compType(ComparisonTypes::NOT_EQUAL) + .input1({{2, 1, 2, 1}, type, std::vector {1, 2, 1, 4}}) + .input2({{1, 2, 1}, type, std::vector {1, 1}}) + .expected({{2, 1, 2, 1}, element::boolean, std::vector {0, 1, 0, 1}})}; + return compParams; +} + +std::vector generateComparisonCombinedParams() { + const std::vector> compTypeParams { + generateComparisonParams(element::f32), + generateComparisonParams(element::f16), + generateComparisonParams(element::i32), + generateComparisonParams(element::u32), + generateComparisonParams(element::boolean)}; + std::vector combinedParams; + + for (const auto& params : compTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_Comparison_With_Hardcoded_Refs, ReferenceComparisonLayerTest, + ::testing::ValuesIn(generateComparisonCombinedParams()), + ReferenceComparisonLayerTest::getTestCaseName); + +template +std::vector generateNumericParams(const element::Type& type) { + using T = typename element_type_traits::value_type; + std::vector compParams { + Builder {} + .compType(ComparisonTypes::NOT_EQUAL) + .input1({{4}, type, std::vector {-2.5f, 25.5f, 2.25f, NAN}}) + .input2({{4}, type, std::vector {10.0f, 5.0f, 2.25f, 10.0f}}) + .expected({{4}, element::boolean, std::vector {1, 1, 0, 1}}), + Builder {} + .compType(ComparisonTypes::NOT_EQUAL) + .input1({{2, 3}, type, std::vector {0.0f, NAN, NAN, 1.0f, 21.0f, -INFINITY}}) + .input2({{2, 3}, type, std::vector {1.0f, NAN, 23.0f, 1.0f, 19.0f, 21.0f}}) + .expected({{2, 3}, element::boolean, std::vector {1, 1, 1, 0, 1, 1}}), + Builder {} + .compType(ComparisonTypes::NOT_EQUAL) + .input1({{1}, type, std::vector {INFINITY}}) + .input2({{1}, type, std::vector {INFINITY}}) + .expected({{1}, element::boolean, std::vector {0}}), + Builder {} + .compType(ComparisonTypes::NOT_EQUAL) + .input1({{5}, type, std::vector {-2.5f, 25.5f, 2.25f, INFINITY, 6.0f}}) + .input2({{5}, type, std::vector {10.0f, 5.0f, 2.25f, 10.0f, -INFINITY}}) + .expected({{5}, element::boolean, std::vector {1, 1, 0, 1, 1}})}; + return compParams; +} + +std::vector generateNumericCombinedParams() { + const std::vector> compTypeParams { + generateNumericParams(element::f16), + generateNumericParams(element::f32)}; + std::vector combinedParams; + + for (const auto& params : compTypeParams) { + combinedParams.insert(combinedParams.end(), params.begin(), params.end()); + } + return combinedParams; +} + +INSTANTIATE_TEST_SUITE_P(smoke_Numeric_With_Hardcoded_Refs, ReferenceComparisonLayerTest, ::testing::ValuesIn(generateNumericCombinedParams()), + ReferenceComparisonLayerTest::getTestCaseName); +} // namespace +} // namespace ComparisonOpsRefTestDefinitions +} // namespace reference_tests diff --git a/inference-engine/ie_bridges/python/src/openvino/__init__.py b/inference-engine/ie_bridges/python/src/openvino/__init__.py index 7039467483f731..962d369fd34512 100644 --- a/inference-engine/ie_bridges/python/src/openvino/__init__.py +++ b/inference-engine/ie_bridges/python/src/openvino/__init__.py @@ -1,3 +1,4 @@ # -*- coding: utf-8 -*- # Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore # mypy issue #1422 diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pxd b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pxd index 0d313a4fdd924b..e269f93dbd77e5 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pxd +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pxd @@ -39,7 +39,7 @@ cdef class InferRequest: cpdef get_perf_counts(self) cdef void user_callback(self, int status) with gil cdef public: - _inputs_list, _outputs_list, _py_callback, _py_data, _user_blobs + _inputs_list, _outputs_list, _py_callback, _py_data, _user_blobs, _inputs_is_dynamic cdef class IENetwork: cdef C.IENetwork impl diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx index 8557ddee71392f..9b0142368d8fce 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx @@ -29,7 +29,6 @@ from .constants import WaitMode, StatusCode, MeanVariant, layout_str_to_enum, fo import numpy as np - warnings.filterwarnings(action="module", category=DeprecationWarning) cdef extern from "" namespace "std" nogil: @@ -53,6 +52,11 @@ cdef c_map_to_dict(map[string, string] c_map): return py_dict +cdef expand_dims_to_corresponding_layout(shape, layout): + single_axes = [1] * (len(layout) - len(shape)) + return single_axes + list(shape) + + def get_version(): return C.get_version().decode() @@ -271,6 +275,10 @@ cdef class Blob: tensor_desc = TensorDesc(precision, dims, layout_int_to_str_map[layout]) return tensor_desc + def set_shape(self, new_shape): + self._initial_shape = new_shape + deref(self._ptr).setShape(new_shape) + ## This class represents an Inference Engine entity and allows you to manipulate with plugins using unified interfaces. cdef class IECore: ## Class constructor @@ -815,6 +823,14 @@ cdef class DataPtr: def initialized(self): return deref(self._ptr).isInitialized() + @property + def is_dynamic(self): + return deref(self._ptr).isDynamic() + + ## get capsule with ngraph::PartialShape + def _get_partial_shape_capsule(self): + return C.getPartialShape_capsule(self._ptr) + ## This class is the layer constant data representation. Provides same interface as DataPtr object except properties setters cdef class CDataPtr: @@ -843,6 +859,14 @@ cdef class CDataPtr: def initialized(self): return deref(self._ptr).isInitialized() + @property + def is_dynamic(self): + return deref(self._ptr).isDynamic() + + ## get capsule with ngraph::PartialShape + def _get_partial_shape_capsule(self): + return C.getPartialShape_capsule(self._ptr) + ## This class represents a network instance loaded to plugin and ready for inference. cdef class ExecutableNetwork: @@ -912,6 +936,8 @@ cdef class ExecutableNetwork: infer_request.impl = &(deref(self.impl).infer_requests[i]) infer_request._inputs_list = list(self.input_info.keys()) infer_request._outputs_list = list(self.outputs.keys()) + for input_name in infer_request._inputs_list: + infer_request._inputs_is_dynamic[input_name] = self.input_info[input_name].input_data.is_dynamic self._infer_requests.append(infer_request) if len(self._infer_requests) != c_infer_requests_size: @@ -1046,6 +1072,7 @@ cdef class InferRequest: self._outputs_list = [] self._py_callback = lambda *args, **kwargs: None self._py_data = None + self._inputs_is_dynamic = {} cdef void user_callback(self, int status) with gil: if self._py_callback: @@ -1283,6 +1310,9 @@ cdef class InferRequest: def _fill_inputs(self, inputs): for k, v in inputs.items(): assert k in self._inputs_list, f"No input with name {k} found in network" + if self._inputs_is_dynamic[k]: + shape = expand_dims_to_corresponding_layout(v.shape, self.input_blobs[k].tensor_desc.layout) + self.input_blobs[k].set_shape(shape) if self.input_blobs[k].tensor_desc.precision == "FP16": self.input_blobs[k].buffer[:] = v.view(dtype=np.int16) else: @@ -1427,15 +1457,25 @@ cdef class IENetwork: # net.reshape({input_layer: (n, c, h*2, w*2)}) # ``` def reshape(self, input_shapes: dict): - cdef map[string, vector[size_t]] c_input_shapes - cdef vector[size_t] c_shape + cdef map[string, vector[vector[int64_t]]] c_input_shapes + cdef vector[vector[int64_t]] c_shape + cdef vector[int64_t] dim net_inputs = self.input_info for input, shape in input_shapes.items(): c_shape = [] if input not in net_inputs: raise AttributeError(f"Specified '{input}' layer not in network inputs '{net_inputs}'! ") for v in shape: - c_shape.push_back(v) + if isinstance(v, list) or isinstance(v, tuple): + if len(v) < 1 or len(v) > 2: + raise ValueError(f"Incorrect PartialShape dimension definition '{v}' " + f"in shape '{shape}', expected one or two values for a dimension! ") + for d in v: + dim.push_back(d) + else: + dim.push_back(v) + c_shape.push_back(dim) + dim.clear() c_input_shapes[input.encode()] = c_shape self.impl.reshape(c_input_shapes) diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp index 7f8f7335776a95..49aee30c43d8dd 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp @@ -4,6 +4,8 @@ #include "ie_api_impl.hpp" +#include + #include "ie_iinfer_request.hpp" #include "ie_plugin_config.hpp" @@ -206,6 +208,24 @@ InferenceEnginePython::IENetwork InferenceEnginePython::read_network(std::string return InferenceEnginePython::IENetwork(std::make_shared(net)); } +PyObject* InferenceEnginePython::getPartialShape_capsule(InferenceEngine::CDataPtr data) { + const char* py_capsule_name = "ngraph_partial_shape"; + auto ngraph_pShape_ptr = std::make_shared(data->getPartialShape()); + auto* sp_copy = new std::shared_ptr(ngraph_pShape_ptr); + auto sp_deleter = [](PyObject* capsule) { + auto* capsule_ptr = PyCapsule_GetPointer(capsule, "ngraph_partial_shape"); + auto* function_sp = static_cast*>(capsule_ptr); + if (function_sp) { + delete function_sp; + } + }; + if (ngraph_pShape_ptr) { + return PyCapsule_New(sp_copy, py_capsule_name, sp_deleter); + } else { + return nullptr; + } +} + InferenceEnginePython::IENetwork::IENetwork(const std::shared_ptr& cnn_network) : actual(cnn_network) { if (actual == nullptr) @@ -289,8 +309,21 @@ size_t InferenceEnginePython::IENetwork::getBatch() { return actual->getBatchSize(); } -void InferenceEnginePython::IENetwork::reshape(const std::map>& input_shapes) { - actual->reshape(input_shapes); +void InferenceEnginePython::IENetwork::reshape( + const std::map>>& input_shapes) { + std::map inputShapes; + for (auto const& input : input_shapes) { + using ngraph::Dimension; + std::vector dims; + for (auto const& d : input.second) { + if (d.size() == 1) + dims.push_back(Dimension(d[0])); + else if (d.size() == 2) + dims.push_back(Dimension(d[0], d[1])); + } + inputShapes[input.first] = ngraph::PartialShape(dims); + } + actual->reshape(inputShapes); } InferenceEnginePython::IEExecNetwork::IEExecNetwork(const std::string& name, size_t num_requests) diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp index 4823b52287c5f4..028bbd3ad740d3 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp @@ -62,7 +62,7 @@ struct IENetwork { const std::map getOutputs(); - void reshape(const std::map>& input_shapes); + void reshape(const std::map>>& input_shapes); void serialize(const std::string& path_to_xml, const std::string& path_to_bin); @@ -203,4 +203,6 @@ std::string get_version(); InferenceEnginePython::IENetwork read_network(std::string path_to_xml, std::string path_to_bin); +PyObject* getPartialShape_capsule(InferenceEngine::CDataPtr data); + }; // namespace InferenceEnginePython diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd index 103c8d77d537b0..9f60f53625702b 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd @@ -23,6 +23,7 @@ cdef extern from "" namespace "InferenceEngine": const CTensorDesc& getTensorDesc() except + size_t element_size() except + void allocate() + void setShape(const SizeVector& dims) except + cdef TBlob[Type].Ptr make_shared_blob[Type](const CTensorDesc& tensorDesc) @@ -47,6 +48,7 @@ cdef extern from "" namespace "InferenceEngine": const Layout getLayout() except + void setLayout(Layout layout) except + const bool isInitialized() except + + bool isDynamic() except + ctypedef shared_ptr[Data] DataPtr ctypedef weak_ptr[Data] DataWeakPtr @@ -178,7 +180,7 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython": size_t getBatch() except + void setLayerParams(map[string, map[string, string]] params_map) except + void serialize(const string& path_to_xml, const string& path_to_bin) except + - void reshape(map[string, vector[size_t]] input_shapes) except + + void reshape(map[string, vector[vector[int64_t]]] input_shapes) except + object getFunction() except + void convertToOldRepresentation() except + string getOVNameForTensor(const string &) except + @@ -226,3 +228,5 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython": cdef string get_version() cdef IENetwork read_network(string path_to_xml, string path_to_bin) + + cdef object getPartialShape_capsule(DataPtr) diff --git a/inference-engine/ie_bridges/python/tests/conftest.py b/inference-engine/ie_bridges/python/tests/conftest.py index c19f8bc1716f2b..f0e5059d04017e 100644 --- a/inference-engine/ie_bridges/python/tests/conftest.py +++ b/inference-engine/ie_bridges/python/tests/conftest.py @@ -3,6 +3,7 @@ import os import pytest +import numpy as np def model_path(is_myriad=False): @@ -41,7 +42,19 @@ def device(): def pytest_configure(config): - # register an additional marker for ngraph dependent tests + # register an additional markers config.addinivalue_line( "markers", "ngraph_dependent_test" ) + config.addinivalue_line( + "markers", "template_plugin" + ) + + +def create_ngraph_function(inputShape): + import ngraph as ng + inputShape = ng.impl.PartialShape(inputShape) + param = ng.parameter(inputShape, dtype=np.float32, name="data") + result = ng.relu(param, name='out') + function = ng.Function(result, [param], "TestFunction") + return function diff --git a/inference-engine/ie_bridges/python/tests/test_Blob.py b/inference-engine/ie_bridges/python/tests/test_Blob.py index b1074c4ef94b94..cd2a48a2724bfe 100644 --- a/inference-engine/ie_bridges/python/tests/test_Blob.py +++ b/inference-engine/ie_bridges/python/tests/test_Blob.py @@ -121,3 +121,34 @@ def test_buffer_values_after_add_outputs(device): result = exec_net.infer(feed_dict) assert np.all(abs(result[output_layer])<30) assert result[output_layer].dtype == np.float16 + + +def test_set_shape(): + tensor_desc = TensorDesc("FP32", [1, 3, 127, 127], "NHWC") + blob = Blob(tensor_desc) + blob.set_shape([1, 4, 128, 128]) + assert blob.tensor_desc.dims == [1, 4, 128, 128] + assert blob.buffer.shape == (1, 4, 128, 128) + + array = np.ones([1, 3, 127, 127], dtype=np.float32) + blob = Blob(tensor_desc, array) + blob.set_shape([1, 4, 128, 128]) + assert blob.tensor_desc.dims == [1, 4, 128, 128] + assert blob.buffer.shape == (1, 4, 128, 128) + + +@pytest.mark.ngraph_dependent_test +@pytest.mark.template_plugin +def test_blob_set_shape_after_async_infer(): + from conftest import create_ngraph_function + import ngraph as ng + function = create_ngraph_function([ng.Dimension(0,5), ng.Dimension(4), ng.Dimension(20), ng.Dimension(20)]) + net = ng.function_to_cnn(function) + ie_core = IECore() + ie_core.register_plugin("templatePlugin", "TEMPLATE") + exec_net = ie_core.load_network(net, "TEMPLATE") + request = exec_net.requests[0] + request.async_infer({"data": np.ones([4, 4, 20, 20])}) + with pytest.raises(RuntimeError) as e: + request.input_blobs['data'].set_shape([3, 4, 20, 20]) + assert "REQUEST_BUSY" in str(e.value) diff --git a/inference-engine/ie_bridges/python/tests/test_CDataPtr.py b/inference-engine/ie_bridges/python/tests/test_CDataPtr.py index d81dd46a5cc18d..4969aba4a5cb3f 100644 --- a/inference-engine/ie_bridges/python/tests/test_CDataPtr.py +++ b/inference-engine/ie_bridges/python/tests/test_CDataPtr.py @@ -56,3 +56,21 @@ def test_initialized(device): net = ie.read_network(model=test_net_xml, weights=test_net_bin) exec_net = ie.load_network(net, device, num_requests=5) assert exec_net.outputs['fc_out'].initialized, "Incorrect value for initialized property for layer 'fc_out" + + +@pytest.mark.ngraph_dependent_test +@pytest.mark.template_plugin +def test_is_dynamic(): + from conftest import create_ngraph_function + import ngraph as ng + function = create_ngraph_function([-1, 3, 20, 20]) + net = ng.function_to_cnn(function) + ie = IECore() + ie.register_plugin("templatePlugin", "TEMPLATE") + exec_net = ie.load_network(net, "TEMPLATE") + assert exec_net.outputs["out"].is_dynamic + p_shape = ng.partial_shape_from_data(exec_net.outputs["out"]) + assert isinstance(p_shape, ng.impl.PartialShape) + with pytest.raises(RuntimeError) as e: + exec_net.outputs["out"].shape + assert "Cannot return dims for Data with dynamic shapes!" in str(e.value) diff --git a/inference-engine/ie_bridges/python/tests/test_DataPtr.py b/inference-engine/ie_bridges/python/tests/test_DataPtr.py index 9b125fb585405c..40ae28b2001317 100644 --- a/inference-engine/ie_bridges/python/tests/test_DataPtr.py +++ b/inference-engine/ie_bridges/python/tests/test_DataPtr.py @@ -43,3 +43,27 @@ def test_layout(): def test_initialized(): assert layer_out_data().initialized, "Incorrect value for initialized property for layer 'fc_out'" + + +@pytest.mark.ngraph_dependent_test +@pytest.mark.template_plugin +def test_is_dynamic(): + from conftest import create_ngraph_function + import ngraph as ng + function = create_ngraph_function([-1, 3, 20, 20]) + net = ng.function_to_cnn(function) + assert net.input_info["data"].input_data.is_dynamic + assert net.outputs["out"].is_dynamic + p_shape = ng.partial_shape_from_data(net.input_info["data"].input_data) + assert isinstance(p_shape, ng.impl.PartialShape) + p_shape = ng.partial_shape_from_data(net.outputs["out"]) + assert isinstance(p_shape, ng.impl.PartialShape) + with pytest.raises(RuntimeError) as e: + net.input_info["data"].input_data.shape + assert "Cannot return dims for Data with dynamic shapes!" in str(e.value) + ie = IECore() + ie.register_plugin("templatePlugin", "TEMPLATE") + exec_net = ie.load_network(net, "TEMPLATE") + assert exec_net.input_info["data"].input_data.is_dynamic + p_shape = ng.partial_shape_from_data(exec_net.input_info["data"].input_data) + assert isinstance(p_shape, ng.impl.PartialShape) diff --git a/inference-engine/ie_bridges/python/tests/test_IENetwork.py b/inference-engine/ie_bridges/python/tests/test_IENetwork.py index ffb06b378875d6..607c8296f4b009 100644 --- a/inference-engine/ie_bridges/python/tests/test_IENetwork.py +++ b/inference-engine/ie_bridges/python/tests/test_IENetwork.py @@ -156,6 +156,43 @@ def test_reshape(): ie = IECore() net = ie.read_network(model=test_net_xml, weights=test_net_bin) net.reshape({"data": (2, 3, 32, 32)}) + assert net.input_info["data"].input_data.shape == [2, 3, 32, 32] + + +@pytest.mark.ngraph_dependent_test +@pytest.mark.parametrize("shape, p_shape", [ + ([1, 3, 22, 22], [1, 3, -1, 25]), + ([1, 3, 22, 22], [-1, -1, -1, -1]), + ([1, 3, -1, 25], [1, 3, 22, -1]) +]) +def test_reshape_with_partial_shape(device, shape, p_shape): + from conftest import create_ngraph_function + import ngraph as ng + function = create_ngraph_function(shape) + net = ng.function_to_cnn(function) + net.reshape({"data": p_shape}) + changedFunction = ng.function_from_cnn(net) + p_shape = ng.impl.PartialShape(p_shape) + assert changedFunction.get_parameters()[0].get_partial_shape().is_dynamic + assert changedFunction.get_results()[0].get_output_partial_shape(0).is_dynamic + assert function.get_parameters()[0].get_partial_shape().is_dynamic + assert function.get_results()[0].get_output_partial_shape(0).is_dynamic + assert changedFunction.get_parameters()[0].get_partial_shape() == p_shape + assert changedFunction.get_results()[0].get_output_partial_shape(0) == p_shape + assert function.get_parameters()[0].get_partial_shape() == p_shape + assert function.get_results()[0].get_output_partial_shape(0) == p_shape + + +@pytest.mark.ngraph_dependent_test +def test_incorrect_reshape(device): + from conftest import create_ngraph_function + import ngraph as ng + function = create_ngraph_function([1, 3, 22, 22]) + net = ng.function_to_cnn(function) + with pytest.raises(ValueError) as e: + net.reshape({"data": [(2, 4, 6), 3, 22, 22]}) + assert "Incorrect PartialShape dimension definition '(2, 4, 6)' " \ + "in shape '[(2, 4, 6), 3, 22, 22]', expected one or two values for a dimension! " in str(e.value) def test_net_from_buffer_valid(): @@ -245,3 +282,18 @@ def test_tensor_names(): assert net.get_ov_name_for_tensor("relu_t") == "activation" assert net.get_ov_name_for_tensor("identity_t") == "activation" assert net.get_ov_name_for_tensor("input") == "in1" + + +@pytest.mark.ngraph_dependent_test +@pytest.mark.template_plugin +def test_create_two_exec_net(): + from conftest import create_ngraph_function + import ngraph as ng + function = create_ngraph_function([ng.Dimension(0,5), ng.Dimension(4), ng.Dimension(20), ng.Dimension(20)]) + net = ng.function_to_cnn(function) + ie_core = IECore() + ie_core.register_plugin("templatePlugin", "TEMPLATE") + exec_net1 = ie_core.load_network(net, "TEMPLATE", num_requests=2) + assert ng.function_from_cnn(net) != None + exec_net2 = ie_core.load_network(net, "TEMPLATE", num_requests=2) + assert ng.function_from_cnn(net) != None diff --git a/inference-engine/ie_bridges/python/tests/test_InferRequest.py b/inference-engine/ie_bridges/python/tests/test_InferRequest.py index 2a5262bd9121e7..a1ea7ce8bcee92 100644 --- a/inference-engine/ie_bridges/python/tests/test_InferRequest.py +++ b/inference-engine/ie_bridges/python/tests/test_InferRequest.py @@ -18,9 +18,8 @@ def create_function_with_memory(input_shape, data_type): - import ngraph as ng from ngraph.impl import Function, Type - + import ngraph as ng input_data = ng.parameter(input_shape, name="input_data", dtype=data_type) rv = ng.read_value(input_data, "var_id_667") add = ng.add(rv, input_data, name="MemoryAdd") @@ -582,4 +581,221 @@ def test_query_state_write_buffer(device, input_shape, data_type, mode): expected_res = np.full(input_shape, i, dtype=format_map[data_type]) assert np.allclose(res['MemoryAdd'], expected_res, atol=1e-6), \ - "Expected values: {} \n Actual values: {} \n".format(expected_res, res) \ No newline at end of file + "Expected values: {} \n Actual values: {} \n".format(expected_res, res) + + +@pytest.mark.ngraph_dependent_test +@pytest.mark.template_plugin +@pytest.mark.parametrize("shape, p_shape, ref_shape", [ + ([1, 4, 20, 20], [-1, 4, 20, 20], [5, 4, 20, 20]), + ([1, 4, 20, 20], [(0,5), 4, 20, 20], [3, 4, 20, 20]), + ([1, 4, 20, 20], [(3,5), 3, 20, 20], [2, 4, 20, 20]), + ([1, 4, 20, 20], [(3,5), 3, 20, 20], [6, 4, 20, 20]), +]) +def test_infer_dynamic_network_with_set_shape(shape, p_shape, ref_shape): + from conftest import create_ngraph_function + import ngraph as ng + function = create_ngraph_function(shape) + net = ng.function_to_cnn(function) + net.reshape({"data": p_shape}) + ie_core = ie.IECore() + ie_core.register_plugin("templatePlugin", "TEMPLATE") + exec_net = ie_core.load_network(net, "TEMPLATE") + exec_net.requests[0].input_blobs["data"].set_shape(ref_shape) + assert exec_net.requests[0].input_blobs["data"].tensor_desc.dims == ref_shape + exec_net.infer({"data": np.ones(ref_shape)}) + request = exec_net.requests[0] + request.async_infer({"data": np.ones(ref_shape)}) + status = request.wait(ie.WaitMode.RESULT_READY) + assert status == ie.StatusCode.OK + assert request.output_blobs['out'].tensor_desc.dims == ref_shape + + +@pytest.mark.ngraph_dependent_test +@pytest.mark.template_plugin +@pytest.mark.parametrize("shape, p_shape, ref_shape", [ + ([1, 4, 20, 20], [-1, 4, 20, 20], [5, 4, 20, 20]), + ([1, 4, 20, 20], [(0,5), 4, 20, 20], [3, 4, 20, 20]), + ([1, 4, 20, 20], [(3,5), 3, 20, 20], [2, 4, 20, 20]), + ([1, 4, 20, 20], [(3,5), 3, 20, 20], [6, 4, 20, 20]), +]) +def test_infer_dynamic_network_without_set_shape(shape, p_shape, ref_shape): + from conftest import create_ngraph_function + import ngraph as ng + function = create_ngraph_function(shape) + net = ng.function_to_cnn(function) + net.reshape({"data": p_shape}) + ie_core = ie.IECore() + ie_core.register_plugin("templatePlugin", "TEMPLATE") + exec_net = ie_core.load_network(net, "TEMPLATE") + exec_net.infer({"data": np.ones(ref_shape)}) + assert exec_net.requests[0].input_blobs["data"].tensor_desc.dims == ref_shape + request = exec_net.requests[0] + request.async_infer({"data": np.ones(ref_shape)}) + status = request.wait(ie.WaitMode.RESULT_READY) + assert status == ie.StatusCode.OK + assert request.output_blobs['out'].tensor_desc.dims == ref_shape + + +@pytest.mark.ngraph_dependent_test +@pytest.mark.template_plugin +@pytest.mark.parametrize("shape, p_shape, ref_shape", [ + ([1, 4, 20, 20], [-1, 4, 20, 20], [5, 4, 20, 20]), + ([1, 4, 20, 20], [(0,5), 4, 20, 20], [3, 4, 20, 20]), + ([1, 4, 20, 20], [(3,5), 3, 20, 20], [2, 4, 20, 20]), + ([1, 4, 20, 20], [(3,5), 3, 20, 20], [6, 4, 20, 20]), +]) +def test_infer_dynamic_network_with_set_blob(shape, p_shape, ref_shape): + from conftest import create_ngraph_function + import ngraph as ng + function = create_ngraph_function(shape) + net = ng.function_to_cnn(function) + net.reshape({"data": p_shape}) + ie_core = ie.IECore() + ie_core.register_plugin("templatePlugin", "TEMPLATE") + exec_net = ie_core.load_network(net, "TEMPLATE") + tensor_desc = exec_net.requests[0].input_blobs["data"].tensor_desc + tensor_desc.dims = ref_shape + blob = ie.Blob(tensor_desc) + exec_net.requests[0].set_blob("data", blob) + assert exec_net.requests[0].input_blobs["data"].tensor_desc.dims == ref_shape + request = exec_net.requests[0] + request.infer({"data": np.ones(ref_shape)}) + request.async_infer({"data": np.ones(ref_shape)}) + status = request.wait(ie.WaitMode.RESULT_READY) + assert status == ie.StatusCode.OK + assert request.output_blobs["out"].tensor_desc.dims == ref_shape + + +@pytest.mark.ngraph_dependent_test +@pytest.mark.template_plugin +def test_infer_dynamic_network_twice(): + from conftest import create_ngraph_function + import ngraph as ng + shape, p_shape = [1, 4, 20, 20], [(0,5), 4, 20, 20] + ref_shape1, ref_shape2 = [2, 4, 20, 20], [3, 4, 20, 20] + function = create_ngraph_function(shape) + net = ng.function_to_cnn(function) + net.reshape({"data": p_shape}) + ie_core = ie.IECore() + ie_core.register_plugin("templatePlugin", "TEMPLATE") + exec_net = ie_core.load_network(net, "TEMPLATE") + request = exec_net.requests[0] + request.infer({"data": np.ones(ref_shape1)}) + assert exec_net.requests[0].input_blobs["data"].tensor_desc.dims == ref_shape1 + assert request.output_blobs['out'].tensor_desc.dims == ref_shape1 + request.infer({"data": np.ones(ref_shape2)}) + assert exec_net.requests[0].input_blobs["data"].tensor_desc.dims == ref_shape2 + assert request.output_blobs['out'].tensor_desc.dims == ref_shape2 + + +@pytest.mark.ngraph_dependent_test +@pytest.mark.template_plugin +def test_infer_dynamic_network_with_set_blob_twice(): + from conftest import create_ngraph_function + import ngraph as ng + shape, p_shape = [1, 4, 20, 20], [(0,5), 4, 20, 20] + ref_shape1, ref_shape2 = [2, 4, 20, 20], [3, 4, 20, 20] + function = create_ngraph_function(shape) + net = ng.function_to_cnn(function) + net.reshape({"data": p_shape}) + ie_core = ie.IECore() + ie_core.register_plugin("templatePlugin", "TEMPLATE") + exec_net = ie_core.load_network(net, "TEMPLATE") + request = exec_net.requests[0] + td = request.input_blobs['data'].tensor_desc + td.dims = ref_shape1 + blob = ie.Blob(td) + request.set_blob("data", blob) + request.infer({"data": np.ones(ref_shape1)}) + assert exec_net.requests[0].input_blobs["data"].tensor_desc.dims == ref_shape1 + assert request.output_blobs['out'].tensor_desc.dims == ref_shape1 + td = request.input_blobs['data'].tensor_desc + td.dims = ref_shape2 + blob = ie.Blob(td) + request.set_blob("data", blob) + request.infer({"data": np.ones(ref_shape2)}) + assert exec_net.requests[0].input_blobs["data"].tensor_desc.dims == ref_shape2 + assert request.output_blobs['out'].tensor_desc.dims == ref_shape2 + + +@pytest.mark.ngraph_dependent_test +@pytest.mark.template_plugin +@pytest.mark.parametrize("shapes", [ + ([3, 4, 20, 20], [3, 4, 20, 20], [3, 4, 20, 20]), + ([3, 4, 20, 20], [3, 6, 20, 20], [3, 8, 20, 20]), +]) +def test_async_infer_dynamic_network_3_requests(shapes): + from conftest import create_ngraph_function + import ngraph as ng + function = create_ngraph_function([3, 4, 20, 20]) + net = ng.function_to_cnn(function) + net.reshape({"data": [3, (2, 10), 20, 20]}) + ie_core = ie.IECore() + ie_core.register_plugin("templatePlugin", "TEMPLATE") + exec_net = ie_core.load_network(net, "TEMPLATE", num_requests=3) + for i,request in enumerate(exec_net.requests): + request.async_infer({"data": np.ones(shapes[i])}) + for i,request in enumerate(exec_net.requests): + status = request.wait(ie.WaitMode.RESULT_READY) + assert status == ie.StatusCode.OK + assert request.output_blobs['out'].tensor_desc.dims == shapes[i] + + +@pytest.mark.ngraph_dependent_test +@pytest.mark.template_plugin +def test_set_blob_with_incorrect_name(): + from conftest import create_ngraph_function + import ngraph as ng + function = create_ngraph_function([4, 4, 20, 20]) + net = ng.function_to_cnn(function) + ie_core = ie.IECore() + ie_core.register_plugin("templatePlugin", "TEMPLATE") + exec_net = ie_core.load_network(net, "TEMPLATE") + tensor_desc = exec_net.requests[0].input_blobs["data"].tensor_desc + tensor_desc.dims = [4, 4, 20, 20] + blob = ie.Blob(tensor_desc) + with pytest.raises(RuntimeError) as e: + exec_net.requests[0].set_blob("incorrect_name", blob) + assert f"Failed to find input or output with name: 'incorrect_name'" in str(e.value) + + +@pytest.mark.ngraph_dependent_test +@pytest.mark.template_plugin +def test_set_blob_with_incorrect_size(): + from conftest import create_ngraph_function + import ngraph as ng + function = create_ngraph_function([4, 4, 20, 20]) + net = ng.function_to_cnn(function) + ie_core = ie.IECore() + ie_core.register_plugin("templatePlugin", "TEMPLATE") + exec_net = ie_core.load_network(net, "TEMPLATE") + tensor_desc = exec_net.requests[0].input_blobs["data"].tensor_desc + tensor_desc.dims = [tensor_desc.dims[0]*2, 4, 20, 20] + blob = ie.Blob(tensor_desc) + with pytest.raises(RuntimeError) as e: + exec_net.requests[0].set_blob("data", blob) + assert f"Input blob size is not equal network input size" in str(e.value) + with pytest.raises(RuntimeError) as e: + exec_net.requests[0].set_blob("out", blob) + assert f"Output blob size is not equal network output size" in str(e.value) + + +@pytest.mark.ngraph_dependent_test +@pytest.mark.template_plugin +def test_set_blob_after_async_infer(): + from conftest import create_ngraph_function + import ngraph as ng + function = create_ngraph_function([ng.Dimension(0,5), ng.Dimension(4), ng.Dimension(20), ng.Dimension(20)]) + net = ng.function_to_cnn(function) + ie_core = ie.IECore() + ie_core.register_plugin("templatePlugin", "TEMPLATE") + exec_net = ie_core.load_network(net, "TEMPLATE") + request = exec_net.requests[0] + tensor_desc = request.input_blobs['data'].tensor_desc + tensor_desc.dims = [2, 4, 20, 20] + blob = ie.Blob(tensor_desc) + request.async_infer({"data": np.ones([4, 4, 20, 20])}) + with pytest.raises(RuntimeError) as e: + request.set_blob("data", blob) + assert "REQUEST_BUSY" in str(e.value) diff --git a/inference-engine/ie_bridges/python/tests/test_NGraph.py b/inference-engine/ie_bridges/python/tests/test_NGraph.py index 6d5c110b9c8518..139d132eb0f65c 100644 --- a/inference-engine/ie_bridges/python/tests/test_NGraph.py +++ b/inference-engine/ie_bridges/python/tests/test_NGraph.py @@ -6,17 +6,14 @@ from ngraph.impl.op import Parameter from ngraph.impl import Function, Shape, Type -from conftest import model_path +from conftest import model_path, create_ngraph_function test_net_xml, test_net_bin = model_path() def test_create_IENetwork_from_nGraph(): - element_type = Type.f32 - param = Parameter(element_type, Shape([1, 3, 22, 22])) - relu = ng.relu(param) - func = Function([relu], [param], 'test') + func = create_ngraph_function([1, 3, 22, 22]) caps = Function.to_capsule(func) cnnNetwork = IENetwork(caps) assert cnnNetwork != None @@ -26,10 +23,7 @@ def test_create_IENetwork_from_nGraph(): def test_get_IENetwork_from_nGraph(): - element_type = Type.f32 - param = Parameter(element_type, Shape([1, 3, 22, 22])) - relu = ng.relu(param) - func = Function([relu], [param], 'test') + func = create_ngraph_function([1, 3, 22, 22]) caps = Function.to_capsule(func) cnnNetwork = IENetwork(caps) assert cnnNetwork != None diff --git a/inference-engine/src/inference_engine/src/cpp_interfaces/interface/ie_iexecutable_network_internal.cpp b/inference-engine/src/inference_engine/src/cpp_interfaces/interface/ie_iexecutable_network_internal.cpp index d084f6ab797ea3..abd12226ce456f 100644 --- a/inference-engine/src/inference_engine/src/cpp_interfaces/interface/ie_iexecutable_network_internal.cpp +++ b/inference-engine/src/inference_engine/src/cpp_interfaces/interface/ie_iexecutable_network_internal.cpp @@ -26,6 +26,10 @@ void IExecutableNetworkInternal::setNetworkOutputs(const OutputsDataMap& network _networkOutputs = networkOutputs; } +void IExecutableNetworkInternal::setRuntimeFunction(std::shared_ptr function) { + _runtime_function = std::move(function); +} + ConstOutputsDataMap IExecutableNetworkInternal::GetOutputsInfo() const { ConstOutputsDataMap outputMap; for (const auto& output : _networkOutputs) { @@ -63,7 +67,7 @@ void IExecutableNetworkInternal::Export(std::ostream& networkModel) { } std::shared_ptr IExecutableNetworkInternal::GetExecGraphInfo() { - IE_THROW(NotImplemented); + return _runtime_function; } std::vector> IExecutableNetworkInternal::QueryState() { diff --git a/inference-engine/src/inference_engine/src/cpp_interfaces/interface/ie_iplugin_internal.cpp b/inference-engine/src/inference_engine/src/cpp_interfaces/interface/ie_iplugin_internal.cpp index bda5694322dadf..585679dc3f8683 100644 --- a/inference-engine/src/inference_engine/src/cpp_interfaces/interface/ie_iplugin_internal.cpp +++ b/inference-engine/src/inference_engine/src/cpp_interfaces/interface/ie_iplugin_internal.cpp @@ -16,9 +16,11 @@ #include #include "blob_factory.hpp" +#include "exec_graph_info.hpp" #include "ie_icore.hpp" #include "ie_iextension.h" #include "ie_input_info.hpp" +#include "ie_ngraph_utils.hpp" #include "ie_parameter.hpp" namespace InferenceEngine { @@ -125,6 +127,10 @@ std::shared_ptr IInferencePlugin::LoadNetwork( } SetExeNetworkInfo(impl, const_map_cast(network.getInputsInfo()), const_map_cast(network.getOutputsInfo())); + auto function = network.getFunction(); + if (function) { + SetExeNetworkInfo(impl, std::const_pointer_cast(function)); + } return impl; } @@ -219,6 +225,85 @@ void IInferencePlugin::SetExeNetworkInfo(const std::shared_ptrsetNetworkInputs(copyInfo(constMapCast(inputs))); exeNetwork->setNetworkOutputs(copyInfo(constMapCast(outputs))); + + ngraph::ParameterVector parameters; + ngraph::ResultVector results; + std::vector> node_outputs; + + for (auto&& input : inputs) { + auto tensor_desc = input.second->getTensorDesc(); + auto dims = tensor_desc.getDims(); + parameters.push_back( + std::make_shared(details::convertPrecision(tensor_desc.getPrecision()), + std::vector{dims.begin(), dims.end()})); + parameters.back()->set_friendly_name(input.first); + node_outputs.push_back(parameters.back()->output(0)); + } + + auto node = std::make_shared(node_outputs, outputs.size()); + + int i = 0; + for (auto&& output : outputs) { + auto tensor_desc = output.second->getTensorDesc(); + auto dims = tensor_desc.getDims(); + node->set_output_type(i, + details::convertPrecision(tensor_desc.getPrecision()), + std::vector{dims.begin(), dims.end()}); + results.push_back(std::make_shared(node->output(i))); + ++i; + } + exeNetwork->setRuntimeFunction(std::make_shared(results, parameters, "execution_info")); + + exeNetwork->SetPointerToPlugin(shared_from_this()); +} + +void IInferencePlugin::SetExeNetworkInfo(const std::shared_ptr& exeNetwork, + const std::shared_ptr& function) { + IE_ASSERT(exeNetwork != nullptr); + IE_ASSERT(function != nullptr); + ngraph::ParameterVector parameters; + ngraph::ResultVector results; + ngraph::NodeVector nodes; + + std::map, ngraph::Output> output_map; + + for (auto&& node : function->get_ordered_ops()) { + ngraph::Node* new_node = nullptr; + if (ngraph::is_type(node)) { + parameters.push_back(std::static_pointer_cast(node->clone_with_new_inputs({}))); + for (std::size_t i = 0; i < node->outputs().size(); ++i) { + output_map.emplace(node->output(i), parameters.back()->output(i)); + } + new_node = parameters.back().get(); + } else { + std::vector> outputs; + for (auto&& input : node->inputs()) { + outputs.emplace_back(output_map.at(input.get_source_output())); + } + if (ngraph::is_type(node)) { + results.push_back( + std::static_pointer_cast(node->clone_with_new_inputs(outputs))); + new_node = results.back().get(); + } else { + nodes.push_back( + std::make_shared(outputs, node->outputs().size())); + new_node = nodes.back().get(); + for (std::size_t i = 0; i < node->outputs().size(); ++i) { + auto output = node->output(i); + output_map.emplace(output, nodes.back()->output(i)); + new_node->set_output_type(i, output.get_element_type(), output.get_partial_shape()); + } + } + } + IE_ASSERT(new_node != nullptr); + new_node->set_friendly_name(node->get_friendly_name()); + new_node->get_rt_info()[ExecGraphInfoSerialization::PERF_COUNTER] = + std::make_shared<::ngraph::VariantWrapper>("not_executed"); + new_node->get_rt_info()[ExecGraphInfoSerialization::ORIGINAL_NAMES] = + std::make_shared<::ngraph::VariantWrapper>(node->get_friendly_name()); + } + exeNetwork->setRuntimeFunction( + std::make_shared(results, parameters, function->get_friendly_name() + "_execution_info")); exeNetwork->SetPointerToPlugin(shared_from_this()); } diff --git a/inference-engine/src/multi_device/multi_device_plugin.cpp b/inference-engine/src/multi_device/multi_device_plugin.cpp index b0bda135224c5a..11bc89bf783b02 100644 --- a/inference-engine/src/multi_device/multi_device_plugin.cpp +++ b/inference-engine/src/multi_device/multi_device_plugin.cpp @@ -293,6 +293,7 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons SetExeNetworkInfo(impl, executableNetworkPerDevice.begin()->second->GetInputsInfo(), executableNetworkPerDevice.begin()->second->GetOutputsInfo()); + SetExeNetworkInfo(impl, executableNetworkPerDevice.begin()->second->GetExecGraphInfo()); } return impl; } diff --git a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp index cee656e2f4e1db..adf48cc8f9b33a 100644 --- a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp +++ b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iexecutable_network_internal.hpp @@ -15,6 +15,9 @@ #include "ie_parameter.hpp" #include "ie_remote_context.hpp" +namespace ov { +class Function; +} namespace InferenceEngine { class IInferencePlugin; @@ -47,6 +50,12 @@ class INFERENCE_ENGINE_API_CLASS(IExecutableNetworkInternal) */ virtual void setNetworkOutputs(const OutputsDataMap& networkOutputs); + /** + * @brief Sets function with network inputs and outpus info + * @param[in] function The function with network inputs and outpus info + */ + virtual void setRuntimeFunction(std::shared_ptr function); + /** * @brief Gets the Executable network output Data node information. The received info is stored in the given Data * node. @@ -141,6 +150,7 @@ class INFERENCE_ENGINE_API_CLASS(IExecutableNetworkInternal) virtual std::shared_ptr CreateInferRequestImpl(InputsDataMap networkInputs, OutputsDataMap networkOutputs); + std::shared_ptr _runtime_function; //!< Holds information about network inputs and outputs InferenceEngine::InputsDataMap _networkInputs; //!< Holds information about network inputs info InferenceEngine::OutputsDataMap _networkOutputs; //!< Holds information about network outputs data diff --git a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp index 94d719b9c6b5ba..43d98f49a37430 100644 --- a/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp +++ b/inference-engine/src/plugin_api/cpp_interfaces/interface/ie_iplugin_internal.hpp @@ -20,6 +20,9 @@ #include "ie_input_info.hpp" #include "ie_parameter.hpp" +namespace ov { +class Function; +} // namespace ov namespace InferenceEngine { class ICore; @@ -302,6 +305,14 @@ class INFERENCE_ENGINE_API_CLASS(IInferencePlugin) : public std::enable_shared_f const ConstInputsDataMap& inputs, const ConstOutputsDataMap& outputs); + /** + * @brief Set input and output information to executable network. This method is used to + * set additional information to InferenceEngine::IExecutableNetworkInternal create by device plugin. + * @param function Function with initial execution info + */ + void SetExeNetworkInfo(const std::shared_ptr& exeNetwork, + const std::shared_ptr& function); + std::string _pluginName; //!< A device name that plugins enables std::map _config; //!< A map config keys -> values std::weak_ptr _core; //!< A pointer to ICore interface diff --git a/inference-engine/tests/functional/inference_engine/caching_test.cpp b/inference-engine/tests/functional/inference_engine/caching_test.cpp index 10d952e6e0d778..a1714c6e51a035 100644 --- a/inference-engine/tests/functional/inference_engine/caching_test.cpp +++ b/inference-engine/tests/functional/inference_engine/caching_test.cpp @@ -131,6 +131,7 @@ class MockExecutableNetwork : public IExecutableNetworkInternal { MOCK_METHOD2(CreateInferRequestImpl, IInferRequestInternal::Ptr(InputsDataMap, OutputsDataMap)); MOCK_METHOD1(setNetworkInputs, void(const InputsDataMap& networkInputs)); MOCK_METHOD1(setNetworkOutputs, void(const OutputsDataMap& networkOutputs)); + MOCK_METHOD0(GetExecGraphInfo, std::shared_ptr()); // void Export(std::ostream& networkModel) override { // std::lock_guard guard(m_pluginMutex); @@ -217,10 +218,31 @@ class CachingTest : public ::testing::TestWithParam(new MkDirGuard(m_cacheDir)); } + std::shared_ptr createMockIExecutableNet() { + auto mock = std::make_shared(); + EXPECT_CALL(*mock, GetInputsInfo()).Times(AnyNumber()).WillRepeatedly(Return(ConstInputsDataMap{})); + EXPECT_CALL(*mock, GetOutputsInfo()).Times(AnyNumber()).WillRepeatedly(Return(ConstOutputsDataMap{})); + EXPECT_CALL(*mock, GetConfig(PluginConfigParams::KEY_PERF_COUNT)).Times(AnyNumber()).WillRepeatedly(Return(Parameter{PluginConfigParams::NO})); + EXPECT_CALL(*mock, GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS))).Times(AnyNumber()).WillRepeatedly(Return(Parameter{1u})); + EXPECT_CALL(*mock, GetExecGraphInfo()).Times(AnyNumber()).WillRepeatedly(Return([] { + ngraph::ParameterVector parameters; + parameters.push_back(std::make_shared( + ov::element::f32, ov::Shape{1, 3, 8, 8})); + auto notOp = std::make_shared(parameters.back()); + ngraph::ResultVector results; + results.push_back(std::make_shared(notOp)); + return std::make_shared(results, parameters, "empty_function"); + } ())); + auto ptr = std::make_shared(); + EXPECT_CALL(*ptr, SetCallback(_)).Times(AnyNumber()); + EXPECT_CALL(*mock, CreateInferRequest()).Times(AnyNumber()).WillRepeatedly(Return(ptr)); + return mock; + } + void SetUp() override { initParamTest(); mockPlugin = std::make_shared(); - net = std::make_shared(); + net = createMockIExecutableNet(); setupMock(*mockPlugin); std::string libraryName = get_mock_engine_name(); sharedObjectLoader.reset(new SharedObjectLoader(libraryName.c_str())); @@ -285,18 +307,6 @@ class CachingTest : public ::testing::TestWithParam createMockIExecutableNet() { - auto mock = std::make_shared(); - EXPECT_CALL(*mock, GetInputsInfo()).Times(AnyNumber()).WillRepeatedly(Return(ConstInputsDataMap{})); - EXPECT_CALL(*mock, GetOutputsInfo()).Times(AnyNumber()).WillRepeatedly(Return(ConstOutputsDataMap{})); - EXPECT_CALL(*mock, GetConfig(PluginConfigParams::KEY_PERF_COUNT)).Times(AnyNumber()).WillRepeatedly(Return(Parameter{PluginConfigParams::NO})); - EXPECT_CALL(*mock, GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS))).Times(AnyNumber()).WillRepeatedly(Return(Parameter{1u})); - auto ptr = std::make_shared(); - EXPECT_CALL(*ptr, SetCallback(_)).Times(AnyNumber()); - EXPECT_CALL(*mock, CreateInferRequest()).Times(AnyNumber()).WillRepeatedly(Return(ptr)); - return mock; - } - private: template std::function make_std_function(const std::string& functionName) { @@ -1453,7 +1463,8 @@ TEST_P(CachingTest, LoadMulti_Archs) { EXPECT_CALL(*net, Export(_)).Times(2); testLoad([&](Core &ie) { ie.SetConfig({{CONFIG_KEY(CACHE_DIR), m_cacheDir}}); - ASSERT_NO_THROW(m_testFunction(ie)); + // ASSERT_NO_THROW(m_testFunction(ie)); + m_testFunction(ie); }); } } @@ -1464,7 +1475,7 @@ TEST_P(CachingTest, LoadMulti_NoCachingOnDevice) { const auto TEST_DEVICE_MAX_COUNT = 100; // Looks enough to catch potential race conditions EXPECT_CALL(*mockPlugin, GetMetric(_, _)).Times(AnyNumber()); EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), _)) - .Times(AnyNumber()).WillRepeatedly(Return(false)); + .Times(AnyNumber()).WillRepeatedly(Return(Parameter{false})); EXPECT_CALL(*mockPlugin, QueryNetwork(_, _)).Times(AnyNumber()); EXPECT_CALL(*mockPlugin, GetMetric(METRIC_KEY(DEVICE_ARCHITECTURE), _)).Times(AnyNumber()); DataPtr inData = std::make_shared("in", Precision::FP32); diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/skip_tests_config.cpp index 68d72bc5329868..c75651220bb4a7 100644 --- a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/skip_tests_config.cpp +++ b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/skip_tests_config.cpp @@ -50,5 +50,7 @@ std::vector disabledTestPatterns() { R"(.*IEClassNetworkTestP\.LoadNetworkActualHeteroDeviceNoThrow.*)", // CVS-58963: Not implemented yet R"(.*Behavior.*InferRequest.*OutOfFirstOutIsInputForSecondNetwork.*)", + // TODO: CVS-65013 + R"(.*LoadNetworkCreateDefaultExecGraphResult.*)", }; } diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/core_integration.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/core_integration.hpp index 1064edaa570865..639681f5451efb 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/core_integration.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/core_integration.hpp @@ -489,6 +489,36 @@ TEST_P(IEClassNetworkTestP, LoadNetworkActualHeteroDevice2NoThrow) { ASSERT_NO_THROW(ie.LoadNetwork(actualNetwork, CommonTestUtils::DEVICE_HETERO, {{"TARGET_FALLBACK", deviceName}})); } +TEST_P(IEClassNetworkTestP, LoadNetworkCreateDefaultExecGraphResult) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + Core ie = createCoreWithTemplate(); + auto net = ie.LoadNetwork(actualNetwork, deviceName); + auto exec_function = net.GetExecGraphInfo().getFunction(); + ASSERT_NE(nullptr, exec_function); + auto actual_parameters = exec_function->get_parameters(); + auto actual_results = exec_function->get_results(); + auto expected_parameters = actualNetwork.getFunction()->get_parameters(); + auto expected_results = actualNetwork.getFunction()->get_results(); + ASSERT_EQ(expected_parameters.size(), actual_parameters.size()); + for (std::size_t i = 0; i < expected_parameters.size(); ++i) { + auto expected_element_type = expected_parameters[i]->get_output_element_type(0); + auto actual_element_type = actual_parameters[i]->get_output_element_type(0); + ASSERT_EQ(expected_element_type, actual_element_type) << "For index: " << i; + auto expected_shape = expected_parameters[i]->get_output_shape(0); + auto actual_shape = actual_parameters[i]->get_output_shape(0); + ASSERT_EQ(expected_shape, actual_shape) << "For index: " << i; + } + ASSERT_EQ(expected_results.size(), actual_results.size()); + for (std::size_t i = 0; i < expected_results.size(); ++i) { + auto expected_element_type = expected_results[i]->get_input_element_type(0); + auto actual_element_type = actual_results[i]->get_input_element_type(0); + ASSERT_EQ(expected_element_type, actual_element_type) << "For index: " << i; + auto expected_shape = expected_results[i]->get_input_shape(0); + auto actual_shape = actual_results[i]->get_input_shape(0); + ASSERT_EQ(expected_shape, actual_shape) << "For index: " << i; + } +} + // // ImportExportNetwork // diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/exec_graph_info.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/exec_graph_info.hpp index e9b4d0fa5201ef..ba9c7926708455 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/exec_graph_info.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/exec_graph_info.hpp @@ -63,66 +63,57 @@ TEST_P(ExecGraphTests, CheckExecGraphInfoBeforeExecution) { // Create CNNNetwork from ngrpah::Function InferenceEngine::CNNNetwork cnnNet(function); InferenceEngine::CNNNetwork execGraph; - if (targetDevice != CommonTestUtils::DEVICE_AUTO && - targetDevice != CommonTestUtils::DEVICE_MULTI && - targetDevice != CommonTestUtils::DEVICE_TEMPLATE && - targetDevice != CommonTestUtils::DEVICE_GNA) { - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); - ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo()); - // Create InferRequest - InferenceEngine::InferRequest req; - ASSERT_NO_THROW(req = execNet.CreateInferRequest()); - // Store all the original layers from the network - const auto originalLayers = function->get_ops(); - std::map originalLayersMap; - for (const auto &layer : originalLayers) { - originalLayersMap[layer->get_friendly_name()] = 0; - } - int IteratorForLayersConstant = 0; - - auto function = execGraph.getFunction(); - ASSERT_NE(function, nullptr); - - for (const auto & op : function->get_ops()) { - const auto & rtInfo = op->get_rt_info(); - - auto getExecValue = [&rtInfo](const std::string & paramName) -> std::string { - auto it = rtInfo.find(paramName); - IE_ASSERT(rtInfo.end() != it); - auto value = std::dynamic_pointer_cast>(it->second); - IE_ASSERT(nullptr != value); - - return value->get(); - }; - - // Each layer from the execGraphInfo network must have PM data option set - ASSERT_EQ("not_executed", getExecValue(ExecGraphInfoSerialization::PERF_COUNTER)); - // Parse origin layer names (fused/merged layers) from the executable graph - // and compare with layers from the original model - auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES); - if (origFromExecLayer == "") - IteratorForLayersConstant++; - std::vector origFromExecLayerSep = separateStrToVec(origFromExecLayer, ','); - std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) { - auto origLayer = originalLayersMap.find(layer); - ASSERT_NE(originalLayersMap.end(), origLayer) << layer; - origLayer->second++; - }); - } + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo()); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + // Store all the original layers from the network + const auto originalLayers = function->get_ops(); + std::map originalLayersMap; + for (const auto &layer : originalLayers) { + originalLayersMap[layer->get_friendly_name()] = 0; + } + int IteratorForLayersConstant = 0; + + auto function = execGraph.getFunction(); + ASSERT_NE(function, nullptr); + + for (const auto & op : function->get_ops()) { + const auto & rtInfo = op->get_rt_info(); + + auto getExecValue = [&rtInfo](const std::string & paramName) -> std::string { + auto it = rtInfo.find(paramName); + IE_ASSERT(rtInfo.end() != it) << " paramName: " << paramName; + auto value = std::dynamic_pointer_cast>(it->second); + IE_ASSERT(nullptr != value); + + return value->get(); + }; + + // Each layer from the execGraphInfo network must have PM data option set + ASSERT_EQ("not_executed", getExecValue(ExecGraphInfoSerialization::PERF_COUNTER)); + // Parse origin layer names (fused/merged layers) from the executable graph + // and compare with layers from the original model + auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES); + if (origFromExecLayer == "") + IteratorForLayersConstant++; + std::vector origFromExecLayerSep = separateStrToVec(origFromExecLayer, ','); + std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) { + auto origLayer = originalLayersMap.find(layer); + ASSERT_NE(originalLayersMap.end(), origLayer) << layer; + origLayer->second++; + }); + } - // All layers from the original IR must be present with in ExecGraphInfo - for (auto &layer : originalLayersMap) { - if ((layer.second == 0) && (IteratorForLayersConstant > 0)) { - IteratorForLayersConstant--; - continue; - } - ASSERT_GE(layer.second, 0); + // All layers from the original IR must be present with in ExecGraphInfo + for (auto &layer : originalLayersMap) { + if ((layer.second == 0) && (IteratorForLayersConstant > 0)) { + IteratorForLayersConstant--; + continue; } - } else { - InferenceEngine::ExecutableNetwork network; - ASSERT_NO_THROW(network = ie->LoadNetwork(cnnNet, targetDevice, configuration)); - ASSERT_THROW(network.GetExecGraphInfo(), InferenceEngine::NotImplemented); + ASSERT_GE(layer.second, 0); } } @@ -132,74 +123,66 @@ TEST_P(ExecGraphTests, CheckExecGraphInfoAfterExecution) { // Create CNNNetwork from ngrpah::Function InferenceEngine::CNNNetwork cnnNet(function); InferenceEngine::CNNNetwork execGraph; - if (targetDevice != CommonTestUtils::DEVICE_AUTO && - targetDevice != CommonTestUtils::DEVICE_MULTI && - targetDevice != CommonTestUtils::DEVICE_TEMPLATE && - targetDevice != CommonTestUtils::DEVICE_GNA) { - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); - ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo()); - // Create InferRequest - InferenceEngine::InferRequest req; - ASSERT_NO_THROW(req = execNet.CreateInferRequest()); - // Store all the original layers from the network - const auto originalLayers = function->get_ops(); - std::map originalLayersMap; - for (const auto &layer : originalLayers) { - originalLayersMap[layer->get_friendly_name()] = 0; - } - int IteratorForLayersConstant = 0; - // Store all the layers from the executable graph information represented as CNNNetwork - bool has_layer_with_valid_time = false; - auto function = execGraph.getFunction(); - ASSERT_NE(nullptr, function); - - for (const auto & op : function->get_ops()) { - const auto & rtInfo = op->get_rt_info(); - - auto getExecValue = [&rtInfo](const std::string & paramName) -> std::string { - auto it = rtInfo.find(paramName); - IE_ASSERT(rtInfo.end() != it); - auto value = std::dynamic_pointer_cast>(it->second); - IE_ASSERT(nullptr != value); - - return value->get(); - }; - - // At least one layer in the topology should be executed and have valid perf counter value - try { - float x = static_cast(std::atof( - getExecValue(ExecGraphInfoSerialization::PERF_COUNTER).c_str())); - ASSERT_GE(x, 0.0f); - has_layer_with_valid_time = true; - } catch (std::exception &) {} - - // Parse origin layer names (fused/merged layers) from the executable graph - // and compare with layers from the original model - auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES); - std::vector origFromExecLayerSep = separateStrToVec(origFromExecLayer, ','); - if (origFromExecLayer == "") - IteratorForLayersConstant++; - std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) { - auto origLayer = originalLayersMap.find(layer); - ASSERT_NE(originalLayersMap.end(), origLayer) << layer; - origLayer->second++; - }); - } + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo()); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + // Store all the original layers from the network + const auto originalLayers = function->get_ops(); + std::map originalLayersMap; + for (const auto &layer : originalLayers) { + originalLayersMap[layer->get_friendly_name()] = 0; + } + int IteratorForLayersConstant = 0; + // Store all the layers from the executable graph information represented as CNNNetwork + bool has_layer_with_valid_time = false; + auto function = execGraph.getFunction(); + ASSERT_NE(nullptr, function); + + for (const auto & op : function->get_ops()) { + const auto & rtInfo = op->get_rt_info(); + + auto getExecValue = [&rtInfo](const std::string & paramName) -> std::string { + auto it = rtInfo.find(paramName); + IE_ASSERT(rtInfo.end() != it); + auto value = std::dynamic_pointer_cast>(it->second); + IE_ASSERT(nullptr != value); + + return value->get(); + }; + + // At least one layer in the topology should be executed and have valid perf counter value + try { + float x = static_cast(std::atof( + getExecValue(ExecGraphInfoSerialization::PERF_COUNTER).c_str())); + ASSERT_GE(x, 0.0f); + has_layer_with_valid_time = true; + } catch (std::exception &) {} + + // Parse origin layer names (fused/merged layers) from the executable graph + // and compare with layers from the original model + auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES); + std::vector origFromExecLayerSep = separateStrToVec(origFromExecLayer, ','); + if (origFromExecLayer == "") + IteratorForLayersConstant++; + std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) { + auto origLayer = originalLayersMap.find(layer); + ASSERT_NE(originalLayersMap.end(), origLayer) << layer; + origLayer->second++; + }); + } - ASSERT_TRUE(has_layer_with_valid_time); + ASSERT_TRUE(has_layer_with_valid_time); - // All layers from the original IR must be present within ExecGraphInfo - for (auto &layer : originalLayersMap) { - if ((layer.second == 0) && (IteratorForLayersConstant > 0)) { - IteratorForLayersConstant--; - continue; - } - ASSERT_GE(layer.second, 0); + // All layers from the original IR must be present within ExecGraphInfo + for (auto &layer : originalLayersMap) { + if ((layer.second == 0) && (IteratorForLayersConstant > 0)) { + IteratorForLayersConstant--; + continue; } - } else { - ASSERT_THROW(ie->LoadNetwork(cnnNet, targetDevice, configuration).GetExecGraphInfo(), - InferenceEngine::NotImplemented); + ASSERT_GE(layer.second, 0); } } @@ -214,22 +197,14 @@ TEST_P(ExecGraphTests, CheckExecGraphInfoSerialization) { // Create CNNNetwork from ngrpah::Function InferenceEngine::CNNNetwork cnnNet(function); InferenceEngine::CNNNetwork execGraph; - if (targetDevice != CommonTestUtils::DEVICE_AUTO && - targetDevice != CommonTestUtils::DEVICE_MULTI && - targetDevice != CommonTestUtils::DEVICE_TEMPLATE && - targetDevice != CommonTestUtils::DEVICE_GNA) { - // Load CNNNetwork to target plugins - auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); - ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo()); - // Create InferRequest - InferenceEngine::InferRequest req; - ASSERT_NO_THROW(req = execNet.CreateInferRequest()); - execGraph.serialize(out_xml_path, out_bin_path); - ASSERT_EQ(0, std::remove(out_xml_path.c_str())); - ASSERT_EQ(0, std::remove(out_bin_path.c_str())); - } else { - ASSERT_THROW(ie->LoadNetwork(cnnNet, targetDevice, configuration).GetExecGraphInfo(), - InferenceEngine::NotImplemented); - } + // Load CNNNetwork to target plugins + auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration); + ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo()); + // Create InferRequest + InferenceEngine::InferRequest req; + ASSERT_NO_THROW(req = execNet.CreateInferRequest()); + execGraph.serialize(out_xml_path, out_bin_path); + ASSERT_EQ(0, std::remove(out_xml_path.c_str())); + ASSERT_EQ(0, std::remove(out_bin_path.c_str())); } } // namespace BehaviorTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mock.cpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mock.cpp index 9cc7309c1c9666..515a51e5244d7c 100644 --- a/inference-engine/tests/ie_test_utils/unit_test_utils/mock.cpp +++ b/inference-engine/tests/ie_test_utils/unit_test_utils/mock.cpp @@ -45,12 +45,12 @@ void MockNotEmptyICNNNetwork::getInputsInfo(InputsDataMap &inputs) const noexcep "Input", Precision::FP32 }); getInputTo(inData)[MockNotEmptyICNNNetwork::OUTPUT_BLOB_NAME] = inputLayer; - inData->setDims(MockNotEmptyICNNNetwork::INPUT_DIMENTIONS); + inData->setDims(MockNotEmptyICNNNetwork::INPUT_DIMENSIONS); inData->setLayout(Layout::NCHW); inputInfo->setInputData(inData); auto outData = std::make_shared(MockNotEmptyICNNNetwork::OUTPUT_BLOB_NAME, Precision::UNSPECIFIED); - outData->setDims(MockNotEmptyICNNNetwork::OUTPUT_DIMENTIONS); + outData->setDims(MockNotEmptyICNNNetwork::OUTPUT_DIMENSIONS); outData->setLayout(Layout::NCHW); getInputTo(outData)[""] = std::make_shared(LayerParams{ MockNotEmptyICNNNetwork::OUTPUT_BLOB_NAME, diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_not_empty_icnn_network.hpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_not_empty_icnn_network.hpp index d861ded519a863..98cf3509c8eef8 100644 --- a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_not_empty_icnn_network.hpp +++ b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/mock_not_empty_icnn_network.hpp @@ -19,9 +19,9 @@ IE_SUPPRESS_DEPRECATED_START class MockNotEmptyICNNNetwork final : public ICNNNetwork { public: static constexpr const char* INPUT_BLOB_NAME = "first_input"; - const SizeVector INPUT_DIMENTIONS = { 1, 3, 299, 299 }; + const SizeVector INPUT_DIMENSIONS = { 1, 3, 299, 299 }; static constexpr const char* OUTPUT_BLOB_NAME = "first_output"; - const SizeVector OUTPUT_DIMENTIONS = { 1, 3, 299, 299 }; + const SizeVector OUTPUT_DIMENSIONS = { 1, 3, 299, 299 }; const std::string name = "test"; const std::string& getName() const noexcept override { return name; @@ -29,10 +29,24 @@ class MockNotEmptyICNNNetwork final : public ICNNNetwork { void getOutputsInfo(OutputsDataMap& out) const noexcept override; void getInputsInfo(InputsDataMap &inputs) const noexcept override; std::shared_ptr getFunction() noexcept override { - return nullptr; + ngraph::ParameterVector parameters; + parameters.push_back(std::make_shared( + ov::element::f32, std::vector{INPUT_DIMENSIONS.begin(), INPUT_DIMENSIONS.end()})); + parameters.back()->set_friendly_name(INPUT_BLOB_NAME); + ngraph::ResultVector results; + results.push_back(std::make_shared(parameters.back()->output(0))); + results.back()->set_friendly_name(OUTPUT_BLOB_NAME); + return std::make_shared(results, parameters, "empty_function"); } std::shared_ptr getFunction() const noexcept override { - return nullptr; + ngraph::ParameterVector parameters; + parameters.push_back(std::make_shared( + ov::element::f32, std::vector{INPUT_DIMENSIONS.begin(), INPUT_DIMENSIONS.end()})); + parameters.back()->set_friendly_name(INPUT_BLOB_NAME); + ngraph::ResultVector results; + results.push_back(std::make_shared(parameters.back()->output(0))); + results.back()->set_friendly_name(OUTPUT_BLOB_NAME); + return std::make_shared(results, parameters, "empty_function"); } MOCK_METHOD(InputInfo::Ptr, getInput, (const std::string &inputName), (const, noexcept)); MOCK_METHOD(size_t, layerCount, (), (const, noexcept)); diff --git a/inference-engine/thirdparty/clDNN/api/cldnn/runtime/engine.hpp b/inference-engine/thirdparty/clDNN/api/cldnn/runtime/engine.hpp index 8aa53a14fe2061..fb79a20a785ecb 100644 --- a/inference-engine/thirdparty/clDNN/api/cldnn/runtime/engine.hpp +++ b/inference-engine/thirdparty/clDNN/api/cldnn/runtime/engine.hpp @@ -96,17 +96,20 @@ class engine { /// Returns user context handle which was used to create the engine virtual void* get_user_context() const = 0; - /// Returns the maximum amount of GPU memory that engine allocated in current process + /// Returns the total maximum amount of GPU memory allocated by engine in current process for all allocation types uint64_t get_max_used_device_memory() const; - /// Returns the amount of GPU memory currently used by the engine - uint64_t get_used_device_memory() const; + /// Returns the maximum amount of GPU memory allocated by engine in current process for the specified allocation @p type + uint64_t get_max_used_device_memory(allocation_type type) const; - /// Adds @p bytes count to currently used memory size - void add_memory_used(uint64_t bytes); + /// Returns the amount of GPU memory specified allocation @p type that currently used by the engine + uint64_t get_used_device_memory(allocation_type type) const; - /// Subtracts @p bytes count from currently used memory size - void subtract_memory_used(uint64_t bytes); + /// Adds @p bytes count to currently used memory size of the specified allocation @p type + void add_memory_used(uint64_t bytes, allocation_type type); + + /// Subtracts @p bytes count from currently used memory size of the specified allocation @p type + void subtract_memory_used(uint64_t bytes, allocation_type type); /// Returns true if USM is enabled in engine config and device/driver supports required features bool use_unified_shared_memory() const; @@ -142,8 +145,8 @@ class engine { const device::ptr _device; engine_configuration _configuration; - std::atomic memory_usage = {0}; - std::atomic peak_memory_usage = {0}; + std::map> memory_usage_map; + std::map> peak_memory_usage_map; }; } // namespace cldnn diff --git a/inference-engine/thirdparty/clDNN/runtime/engine.cpp b/inference-engine/thirdparty/clDNN/runtime/engine.cpp index 976e7bae595b89..3738ec2ae9d9b9 100644 --- a/inference-engine/thirdparty/clDNN/runtime/engine.cpp +++ b/inference-engine/thirdparty/clDNN/runtime/engine.cpp @@ -120,22 +120,51 @@ memory_ptr engine::share_surface(const layout& layout, shared_surface surf, uint #endif // _WIN32 uint64_t engine::get_max_used_device_memory() const { - return peak_memory_usage.load(); + uint64_t total_peak_memory_usage {0}; + for (auto const& m : peak_memory_usage_map) { + total_peak_memory_usage += m.second.load(); + } + return total_peak_memory_usage; } -uint64_t engine::get_used_device_memory() const { - return memory_usage.load(); +uint64_t engine::get_max_used_device_memory(allocation_type type) const { + uint64_t peak_memory_usage {0}; + auto iter = peak_memory_usage_map.find(type); + if (iter != peak_memory_usage_map.end()) { + peak_memory_usage = iter->second.load(); + } + return peak_memory_usage; } -void engine::add_memory_used(size_t bytes) { - memory_usage += bytes; - if (memory_usage > peak_memory_usage) { - peak_memory_usage = memory_usage.load(); +uint64_t engine::get_used_device_memory(allocation_type type) const { + uint64_t memory_usage {0}; + auto iter = memory_usage_map.find(type); + if (iter != memory_usage_map.end()) { + memory_usage = iter->second.load(); } + return memory_usage; } -void engine::subtract_memory_used(size_t bytes) { - memory_usage -= bytes; +void engine::add_memory_used(size_t bytes, allocation_type type) { + if (!memory_usage_map.count(type) && !peak_memory_usage_map.count(type)) { + static std::mutex m; + std::lock_guard guard(m); + memory_usage_map[type] = 0; + peak_memory_usage_map[type] = 0; + } + memory_usage_map[type] += bytes; + if (memory_usage_map[type] > peak_memory_usage_map[type]) { + peak_memory_usage_map[type] = memory_usage_map[type].load(); + } +} + +void engine::subtract_memory_used(size_t bytes, allocation_type type) { + auto iter = memory_usage_map.find(type); + if (iter != memory_usage_map.end()) { + memory_usage_map[type] -= bytes; + } else { + throw std::runtime_error("Attempt to free unallocated memory"); + } } std::shared_ptr engine::create(engine_types engine_type, diff --git a/inference-engine/thirdparty/clDNN/runtime/memory.cpp b/inference-engine/thirdparty/clDNN/runtime/memory.cpp index 80a6ee980edd37..9a22d3a2ae9d86 100644 --- a/inference-engine/thirdparty/clDNN/runtime/memory.cpp +++ b/inference-engine/thirdparty/clDNN/runtime/memory.cpp @@ -20,27 +20,25 @@ namespace cldnn { memory::memory(engine* engine, const layout& layout, allocation_type type, bool reused) : _engine(engine), _layout(layout), _bytes_count(_layout.bytes_count()), _type(type), _reused(reused) { if (!_reused && _engine) { - _engine->add_memory_used(_bytes_count); - } - - GPU_DEBUG_GET_INSTANCE(debug_config); - GPU_DEBUG_IF(debug_config->verbose >= 1) { - GPU_DEBUG_COUT << "Allocate " << _bytes_count << " bytes of " << type << " allocation type" - << " (current=" << _engine->get_used_device_memory() << ";" - << " max=" << _engine->get_max_used_device_memory() << ")" << std::endl; + _engine->add_memory_used(_bytes_count, type); + GPU_DEBUG_GET_INSTANCE(debug_config); + GPU_DEBUG_IF(debug_config->verbose >= 1) { + GPU_DEBUG_COUT << "Allocate " << _bytes_count << " bytes of " << type << " allocation type" + << " (current=" << _engine->get_used_device_memory(type) << ";" + << " max=" << _engine->get_max_used_device_memory(type) << ")" << std::endl; + } } } memory::~memory() { if (!_reused && _engine) { - _engine->subtract_memory_used(_bytes_count); - } - - GPU_DEBUG_GET_INSTANCE(debug_config); - GPU_DEBUG_IF(debug_config->verbose >= 1) { - GPU_DEBUG_COUT << "Free " << _bytes_count << " bytes" - << " (current=" << _engine->get_used_device_memory() << ";" - << " max=" << _engine->get_max_used_device_memory() << ")" << std::endl; + _engine->subtract_memory_used(_bytes_count, _type); + GPU_DEBUG_GET_INSTANCE(debug_config); + GPU_DEBUG_IF(debug_config->verbose >= 1) { + GPU_DEBUG_COUT << "Free " << _bytes_count << " bytes of " << _type << " allocation type" + << " (current=" << _engine->get_used_device_memory(_type) << ";" + << " max=" << _engine->get_max_used_device_memory(_type) << ")" << std::endl; + } } } diff --git a/inference-engine/thirdparty/clDNN/tests/test_cases/memory_test.cpp b/inference-engine/thirdparty/clDNN/tests/test_cases/memory_test.cpp index 4582f2ad06340e..e5e8bd01f09cea 100644 --- a/inference-engine/thirdparty/clDNN/tests/test_cases/memory_test.cpp +++ b/inference-engine/thirdparty/clDNN/tests/test_cases/memory_test.cpp @@ -403,7 +403,7 @@ TEST(memory_pool, shared_mem_pool_diff_batches) { network network_second(*engine, topo, bo); network_second.set_input_data("input", input_1); auto outputs_second = network_second.execute(); - EXPECT_EQ(engine->get_max_used_device_memory(), (uint64_t)3928); + EXPECT_EQ(engine->get_max_used_device_memory(), (uint64_t)4328); } TEST(memory_pool, shared_dep_two_output) { diff --git a/model-optimizer/automation/package_BOM.txt b/model-optimizer/automation/package_BOM.txt index 618f988a4c843e..b00272ac7e2061 100644 --- a/model-optimizer/automation/package_BOM.txt +++ b/model-optimizer/automation/package_BOM.txt @@ -52,11 +52,9 @@ extensions/back/ResultNormalizer.py extensions/back/ResultRename.py extensions/back/ReverseInputChannels.py extensions/back/RNNSequenceTypeRename.py -extensions/back/ScalarConstNormalize.py extensions/back/SelectBroadcast.py extensions/back/ShapeOfConstFolding.py extensions/back/ShuffleChannelPatternOptimization.py -extensions/back/ShufflenetReLUReorder.py extensions/back/SpecialNodesFinalization.py extensions/back/StridedSliceMasksNormalizer.py extensions/back/TopKNormalizer.py diff --git a/model-optimizer/extensions/back/ReduceMerge.py b/model-optimizer/extensions/back/ReduceMerge.py index 418f3a18ffc3b0..3ec28a86535c7d 100644 --- a/model-optimizer/extensions/back/ReduceMerge.py +++ b/model-optimizer/extensions/back/ReduceMerge.py @@ -5,7 +5,6 @@ import numpy as np -from extensions.back.ScalarConstNormalize import ScalarNormalize from extensions.ops.ReduceOps import reduce_map from mo.back.replacement import BackReplacementPattern from mo.front.common.partial_infer.utils import int64_array @@ -23,9 +22,6 @@ class ReduceMerge(BackReplacementPattern): enabled = True force_clean_up = True - def run_before(self): - return [ScalarNormalize] - @staticmethod def fuse_reduces(first_reduce, second_reduce): first_reduce_name = first_reduce.soft_get('name', first_reduce.id) diff --git a/model-optimizer/extensions/back/ReverseInputChannels.py b/model-optimizer/extensions/back/ReverseInputChannels.py index 088130e0bf26e1..ce0cbb952ae363 100644 --- a/model-optimizer/extensions/back/ReverseInputChannels.py +++ b/model-optimizer/extensions/back/ReverseInputChannels.py @@ -114,7 +114,7 @@ def pass_rc_through(node: Node, reverse_channels: Node): returns boolean value whatever we should continue propagating current ReverseChannels operation down or not """ # detaching reverse_channels node from the graph - if reverse_channels.is_in_port_connected(0) and reverse_channels.is_out_port_connected(0)\ + if reverse_channels.is_in_port_connected(0) and reverse_channels.is_out_port_connected(0) \ and node.is_out_port_connected(0): reverse_channels.out_port(0).get_connection().set_source( reverse_channels.in_port(0).get_connection().get_source()) @@ -137,7 +137,7 @@ def pass_rc_through_conv(node, reverse_channels): ReverseChannels weights previous_op ReverseChannels \ / \ / Conv Conv - + For grouped convolution: BEFORE AFTER @@ -295,12 +295,11 @@ class ReverseChannelsPropagationUp(BackReplacementPattern): 'Subtract': lambda node, rc: ReverseChannelsPropagationUp.lift_up_through_eltwise(node, rc), 'Pow': lambda node, rc: ReverseChannelsPropagationUp.lift_up_through_eltwise(node, rc), 'Convert': lambda node, rc: ReverseChannelsPropagationUp.lift_up_through_eltwise(node, rc), - - 'Pad': lambda node, rc: ReverseChannelsPropagationUp.lift_up_through(node, rc), + 'Pad': lambda node, rc: ReverseChannelsPropagationUp.lift_up_through_pad(node, rc), } @staticmethod - def lift_up_through(node: Node, reverse_channels: Node): + def lift_up_through_pad(node: Node, reverse_channels: Node): r""" BEFORE AFTER @@ -308,25 +307,29 @@ def lift_up_through(node: Node, reverse_channels: Node): \ previous_op previous_op ReverseChannels previous_op \ / \ / - Node Node + Pad Pad | | ReverseChannels next_op | next_op - returns boolean value whatever we should continue propagating current ReverseChannels operation up or not + returns two objects: + first - boolean value whatever we should continue propagating current ReverseChannels operation up or not + second - list of ReverseChannels operations that were produced while propagating reverse_channels up """ if node.is_in_port_connected(0): node_input_port_0 = node.in_port(0) - reverse_channels_out_npde = reverse_channels.out_port(0).get_connection().get_destination().node + reverse_channels_out_nodes = reverse_channels.out_port(0).get_connection().get_destinations() reverse_channels.out_port(0).disconnect() - + reverse_channels.in_port(0).disconnect() src = node_input_port_0.get_connection().get_source() node_input_port_0.get_connection().set_source(reverse_channels.out_port(0)) src.connect(reverse_channels.in_port(0)) - node.out_port(0).get_connection().set_destination(reverse_channels_out_npde.in_port(0)) - return True - return False + for reverse_channels_destination in reverse_channels_out_nodes: + node.out_port(0).get_connection().add_destination(reverse_channels_destination) + + return True, [reverse_channels] + return False, [] @staticmethod def lift_up_through_eltwise(node: Node, reverse_channels: Node): diff --git a/model-optimizer/extensions/back/ScalarConstNormalize.py b/model-optimizer/extensions/back/ScalarConstNormalize.py deleted file mode 100644 index 923f68b8929dbf..00000000000000 --- a/model-optimizer/extensions/back/ScalarConstNormalize.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from extensions.back.ReshapeMutation import ReshapeMutation -from mo.back.replacement import BackReplacementPattern -from mo.front.common.partial_infer.utils import int64_array -from mo.front.tf.graph_utils import create_op_node_with_second_input -from mo.graph.graph import Graph -from mo.ops.reshape import Reshape - - -# Temporary nGraph workaround. TODO: REMOVE -class ScalarNormalize(BackReplacementPattern): - enabled = False - force_clean_up = True - - def run_before(self): - return [ReshapeMutation] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('op', dict(kind='op', type='Const'))], - edges=[] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['op'] - if node.value.ndim == 0: - reshape = create_op_node_with_second_input(graph, Reshape, int64_array([1]), - {'name': node.id + '/Dims'}) - node.out_port(0).get_connection().set_source(reshape.out_port(0)) - node.out_port(0).connect(reshape.in_port(0)) - reshape.infer(reshape) diff --git a/model-optimizer/extensions/back/ShufflenetReLUReorder.py b/model-optimizer/extensions/back/ShufflenetReLUReorder.py deleted file mode 100644 index 89cd9d7629790a..00000000000000 --- a/model-optimizer/extensions/back/ShufflenetReLUReorder.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from mo.back.replacement import BackReplacementPattern -from mo.graph.graph import Graph - - -class ShufflenetReLUReorder(BackReplacementPattern): - """ - This pass is workaround for GPU plugin - """ - enabled = False - - def pattern(self): - return dict( - nodes=[ - ('relu', dict(kind='op', type='ReLU')), - ('relu_data', dict(kind='data')), - ('reshape1', dict(kind='op', type='Reshape')), - ('reshape1_data', dict(kind='data')), - ('transpose', dict(kind='op', type='Transpose')), - ('transpose_data', dict(kind='data')), - ('reshape2', dict(kind='op', type='Reshape')), - ('reshape2_data', dict(kind='data')), - ('conv', dict(kind='op', type='Convolution')) - ], - edges=[('relu', 'relu_data'), - ('relu_data', 'reshape1'), - ('reshape1', 'reshape1_data'), - ('reshape1_data', 'transpose'), - ('transpose', 'transpose_data'), - ('transpose_data', 'reshape2'), - ('reshape2', 'reshape2_data'), - ('reshape2_data', 'conv'), - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - relu = match['relu'] - reshape1 = match['reshape1'] - reshape2_data = match['reshape2_data'] - conv = match['conv'] - - if np.max(conv.pad) == 0: - return - - relu_input = relu.in_node() - - # Disconnect InputData-x->ReLU->Data-x->Reshape1 - edge_attrs = graph.get_edge_data(relu.out_node().id, reshape1.id)[0] - graph.remove_edge(relu_input.id, relu.id) - graph.remove_edge(relu.out_node().id, reshape1.id) - - # Connect InputData-->Reshape1 - graph.add_edges_from([(relu_input.id, reshape1.id, edge_attrs)]) - - # Insert ReLU: Reshape2Data->ReLU->Data->Convolution - edge_attrs = graph.get_edge_data(reshape2_data.id, conv.id)[0] - graph.remove_edge(reshape2_data.id, conv.id) - graph.add_edges_from([(reshape2_data.id, relu.id, {'in': 0}), (relu.out_node().id, conv.id, edge_attrs)]) diff --git a/model-optimizer/extensions/back/TopKNormalizer.py b/model-optimizer/extensions/back/TopKNormalizer.py index 6fc3f33e3a9401..90c43d641a187b 100644 --- a/model-optimizer/extensions/back/TopKNormalizer.py +++ b/model-optimizer/extensions/back/TopKNormalizer.py @@ -2,7 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 from extensions.back.Reshape0DToSqueeze import Reshape0DToSqueeze -from extensions.back.ScalarConstNormalize import ScalarNormalize from mo.back.replacement import BackReplacementPattern from mo.front.common.partial_infer.utils import int64_array from mo.front.tf.graph_utils import create_op_node_with_second_input @@ -23,9 +22,6 @@ class TopKNormalizer(BackReplacementPattern): """ enabled = True - def run_after(self): - return [ScalarNormalize] - def run_before(self): return [Reshape0DToSqueeze] diff --git a/model-optimizer/extensions/middle/MulFakeQuantizeFuse.py b/model-optimizer/extensions/middle/MulFakeQuantizeFuse.py index 70045685e958c3..3bbd670ed16428 100644 --- a/model-optimizer/extensions/middle/MulFakeQuantizeFuse.py +++ b/model-optimizer/extensions/middle/MulFakeQuantizeFuse.py @@ -72,6 +72,8 @@ def replace_pattern(self, graph: Graph, match: Dict[str, Node]): return mul_val = value_port.data.get_value() + if np.any(mul_val <= 0): + return # Direct modifications to quantize 1-st and 2-nd port inputs are performed. # So the data nodes at those inputs shouldn't have more than 1 consumer maximum 2 consumers to the same @@ -80,33 +82,6 @@ def replace_pattern(self, graph: Graph, match: Dict[str, Node]): # TODO: need some special processing for values that exactly equal to threshold - # Need to flip output_low and output_high for those elements that have multiplier < 0 - if np.all(mul_val < 0): - mi_o_node = quantize.in_port(3).get_source() - ma_o_node = quantize.in_port(4).get_source() - - quantize.in_port(3).disconnect() - quantize.in_port(4).disconnect() - - mi_o_node.connect(quantize.in_port(4)) - ma_o_node.connect(quantize.in_port(3)) - - elif np.any(mul_val < 0): - # Flipping values should be done on exclusive inputs of FakeQuantize node, so we duplicate them if needed - resolve_shared_inputs(node=quantize, port_ids_to_duplicate=[3, 4]) - - # Successful flipping will be done on broadcasted arrays - mi_o_val = quantize.in_port(3).data.get_value() - ma_o_val = quantize.in_port(4).data.get_value() - mul_val, mi_o_val, ma_o_val = [np.array(a) for a in np.broadcast_arrays(mul_val, mi_o_val, ma_o_val)] - - neg_idx = np.where(mul_val < 0) - mi_o_val[neg_idx], ma_o_val[neg_idx] = ma_o_val[neg_idx], mi_o_val[neg_idx] - - # TODO: revert broadcasting where unnecessary - quantize.in_port(3).data.set_value(mi_o_val) - quantize.in_port(4).data.set_value(ma_o_val) - quantize.in_port(1).data.set_value(quantize.in_port(1).data.get_value() / mul_val) if quantize.in_node(1).id != quantize.in_node(2).id: quantize.in_port(2).data.set_value(quantize.in_port(2).data.get_value() / mul_val) diff --git a/model-optimizer/unit_tests/extensions/back/ReverseInputChannels_test.py b/model-optimizer/unit_tests/extensions/back/ReverseInputChannels_test.py index 8ac90c8708fdaf..634f3ea9aef39c 100644 --- a/model-optimizer/unit_tests/extensions/back/ReverseInputChannels_test.py +++ b/model-optimizer/unit_tests/extensions/back/ReverseInputChannels_test.py @@ -32,6 +32,7 @@ **regular_op_with_shaped_data('pad', [1, 3, 10, 10], {'type': 'Pad'}), **regular_op_with_shaped_data('reverse_channels', [1, 3, 10, 10], {'type': 'ReverseChannels', 'axis': 1}), **result('result'), + **result('result2'), } class ReverseInputChannelsTest(unittest.TestCase): @@ -64,7 +65,7 @@ def test_lift_up_through_eltwise(self): ReverseChannelsPropagationUp.lift_up_through_eltwise(node, reverse_channels) self.check_graph_attrs(graph, ['placeholder1', 'placeholder2']) - def test_lift_up_through(self): + def test_lift_up_through_pad(self): graph = build_graph(nodes2, [*connect('placeholder', '0:mul'), *connect('mul_const', '1:mul'), *connect('mul', '0:pad'), *connect('pad_const_1', '1:pad'), *connect('pad_const_2', '2:pad'), *connect('pad', 'reverse_channels'), @@ -74,7 +75,25 @@ def test_lift_up_through(self): node = Node(graph, 'pad') reverse_channels = Node(graph, 'reverse_channels') - ReverseChannelsPropagationUp.lift_up_through(node, reverse_channels) + keep_moving_up, new_reverses = ReverseChannelsPropagationUp.lift_up_through_pad(node, reverse_channels) + self.assertTrue(keep_moving_up is True) + self.assertTrue(len(new_reverses) == 1) + self.check_graph_attrs(graph, ['placeholder']) + + + def test_lift_up_through_pad2(self): + graph = build_graph(nodes2, [*connect('placeholder', '0:mul'), *connect('mul_const', '1:mul'), + *connect('mul', '0:pad'), *connect('pad_const_1', '1:pad'), + *connect('pad_const_2', '2:pad'), *connect('pad', 'reverse_channels'), + *connect('reverse_channels:0', '0:result'), *connect('reverse_channels:0', '0:result2')]) + self.set_graph_attrs(graph, ['placeholder']) + + node = Node(graph, 'pad') + reverse_channels = Node(graph, 'reverse_channels') + + keep_moving_up, new_reverses = ReverseChannelsPropagationUp.lift_up_through_pad(node, reverse_channels) + self.assertTrue(keep_moving_up is True) + self.assertTrue(len(new_reverses) == 1) self.check_graph_attrs(graph, ['placeholder']) diff --git a/model-optimizer/unit_tests/extensions/back/ShufflenetReLUReorder_test.py b/model-optimizer/unit_tests/extensions/back/ShufflenetReLUReorder_test.py deleted file mode 100644 index 5b154f22c9d81a..00000000000000 --- a/model-optimizer/unit_tests/extensions/back/ShufflenetReLUReorder_test.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from extensions.back.ShufflenetReLUReorder import ShufflenetReLUReorder -from mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the -# dictionary with node attributes. -nodes_attributes = { - 'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # ReLU - 'relu_1': {'type': 'ReLU', 'kind': 'op', 'op': 'ReLU'}, - 'relu_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Reshape layers - 'reshape_1': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'}, - 'reshape_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'reshape_2': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'}, - 'reshape_2_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'reshape_3': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'}, - 'reshape_3_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Transpose layer - 'transpose_1': {'type': 'Transpose', 'kind': 'op', 'op': 'Transpose'}, - 'transpose_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Conv layer - 'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2d'}, - 'conv_1_data': {'value': None, 'shape': None, 'kind': 'data'}, -} - - -class ShufflenetReLUReorderTests(unittest.TestCase): - def test_1(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'relu_1'), - ('relu_1', 'relu_1_data'), - ('relu_1_data', 'reshape_1'), - ('reshape_1', 'reshape_1_data'), - ('reshape_1_data', 'transpose_1'), - ('transpose_1', 'transpose_1_data'), - ('transpose_1_data', 'reshape_2'), - ('reshape_2', 'reshape_2_data'), - ('reshape_2_data', 'conv_1'), - ('conv_1', 'conv_1_data') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 112])}, - 'relu_1_data': {'shape': np.array([1, 227, 227, 112])}, - 'reshape_1_data': {'shape': np.array([227, 227, 4, 28])}, - 'transpose_1': {'order': np.array([0, 1, 3, 2])}, - 'transpose_1_data': {'shape': np.array([227, 227, 28, 4])}, - 'reshape_2_data': {'shape': np.array([1, 227, 227, 112])}, - 'conv_1_data': {'shape': np.array([1, 227, 227, 112])}, - 'conv_1': {'pad': np.array([1, 1])} - }) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'reshape_1'), - ('reshape_1', 'reshape_1_data'), - ('reshape_1_data', 'transpose_1'), - ('transpose_1', 'transpose_1_data'), - ('transpose_1_data', 'reshape_2'), - ('reshape_2', 'reshape_2_data'), - ('reshape_2_data', 'relu_1'), - ('relu_1', 'relu_1_data'), - ('relu_1_data', 'conv_1'), - ('conv_1', 'conv_1_data') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 112])}, - 'relu_1_data': {'shape': np.array([1, 227, 227, 112])}, - 'reshape_1_data': {'shape': np.array([227, 227, 4, 28])}, - 'transpose_1': {'order': np.array([0, 1, 3, 2])}, - 'transpose_1_data': {'shape': np.array([227, 227, 28, 4])}, - 'reshape_2_data': {'shape': np.array([1, 227, 227, 112])}, - 'conv_1_data': {'shape': np.array([1, 227, 227, 112])}, - }) - - pattern = ShufflenetReLUReorder() - pattern.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'conv_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_2_neg(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'reshape_1'), - ('reshape_1', 'reshape_1_data'), - ('reshape_1_data', 'transpose_1'), - ('transpose_1', 'transpose_1_data'), - ('transpose_1_data', 'reshape_2'), - ('reshape_2', 'reshape_2_data'), - ('reshape_2_data', 'conv_1'), - ('conv_1', 'conv_1_data') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 112])}, - 'relu_1_data': {'shape': np.array([1, 227, 227, 112])}, - 'reshape_1_data': {'shape': np.array([227, 227, 4, 28])}, - 'transpose_1': {'order': np.array([0, 1, 3, 2])}, - 'transpose_1_data': {'shape': np.array([227, 227, 28, 4])}, - 'reshape_2_data': {'shape': np.array([1, 227, 227, 112])}, - 'conv_1_data': {'shape': np.array([1, 227, 227, 112])}, - }) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'reshape_1'), - ('reshape_1', 'reshape_1_data'), - ('reshape_1_data', 'transpose_1'), - ('transpose_1', 'transpose_1_data'), - ('transpose_1_data', 'reshape_2'), - ('reshape_2', 'reshape_2_data'), - ('reshape_2_data', 'conv_1'), - ('conv_1', 'conv_1_data') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 112])}, - 'relu_1_data': {'shape': np.array([1, 227, 227, 112])}, - 'reshape_1_data': {'shape': np.array([227, 227, 4, 28])}, - 'transpose_1': {'order': np.array([0, 1, 3, 2])}, - 'transpose_1_data': {'shape': np.array([227, 227, 28, 4])}, - 'reshape_2_data': {'shape': np.array([1, 227, 227, 112])}, - 'conv_1_data': {'shape': np.array([1, 227, 227, 112])}, - }) - - pattern = ShufflenetReLUReorder() - pattern.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'conv_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/model-optimizer/unit_tests/extensions/middle/MulQuantizeFuse_test.py b/model-optimizer/unit_tests/extensions/middle/MulQuantizeFuse_test.py index 34b3fccf810624..0fafb51addfcfa 100644 --- a/model-optimizer/unit_tests/extensions/middle/MulQuantizeFuse_test.py +++ b/model-optimizer/unit_tests/extensions/middle/MulQuantizeFuse_test.py @@ -110,7 +110,7 @@ def test_1(self): def test_2(self): graph = build_graph(nodes, edges, { 'mul': {'can_be_fused': True}, - 'mul_const_data': {'shape': np.array([1]), 'value': np.array([-1])}, + 'mul_const_data': {'shape': np.array([1]), 'value': np.array([2])}, 'quantize_data': {'shape': np.array([2, 3, 4, 4])}, 'mi_o_data': {'shape': np.array([1]), 'value': np.array([0])}, 'ma_o_data': {'shape': np.array([1]), 'value': np.array([1])}, @@ -118,11 +118,11 @@ def test_2(self): graph.stage = 'middle' graph_ref = build_graph(nodes, edges_ref, { 'quantize_data': {'shape': np.array([2, 3, 4, 4])}, - 'mul_const_data': {'shape': np.array([1]), 'value': np.array([-1])}, - 'mi_o_data': {'shape': np.array([1]), 'value': np.array([1])}, - 'ma_o_data': {'shape': np.array([1]), 'value': np.array([0])}, - 'mi_i_data': {'shape': np.array([1]), 'value': np.array([10])}, - 'ma_i_data': {'shape': np.array([1]), 'value': np.array([-10])}, + 'mul_const_data': {'shape': np.array([1]), 'value': np.array([2])}, + 'mi_o_data': {'shape': np.array([1]), 'value': np.array([0])}, + 'ma_o_data': {'shape': np.array([1]), 'value': np.array([1])}, + 'mi_i_data': {'shape': np.array([1]), 'value': np.array([-5])}, + 'ma_i_data': {'shape': np.array([1]), 'value': np.array([5])}, }, nodes_with_edges_only=True) MulFakeQuantizeFuse().find_and_replace_pattern(graph) @@ -131,23 +131,36 @@ def test_2(self): self.assertTrue(flag, resp) - def test_3(self): + def test_negative_1(self): + graph = build_graph(nodes, edges, nodes_with_edges_only=True) + graph.stage = 'middle' + graph_ref = build_graph(nodes, edges, nodes_with_edges_only=True) + + MulFakeQuantizeFuse().find_and_replace_pattern(graph) + (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) + + self.assertTrue(flag, resp) + + def test_negative_2(self): + graph = build_graph(nodes, edges, {'mul': {'can_be_fused': False}}, nodes_with_edges_only=True) + graph.stage = 'middle' + graph_ref = build_graph(nodes, edges, {'mul': {'can_be_fused': False}}, nodes_with_edges_only=True) + + MulFakeQuantizeFuse().find_and_replace_pattern(graph) + (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) + + self.assertTrue(flag, resp) + + def test_negative_3(self): graph = build_graph(nodes, edges, { 'mul': {'can_be_fused': True}, - 'mul_const_data': {'shape': np.array([3, 1, 1]), 'value': np.array([[[-1]], [[1]], [[-1]]])}, + 'mul_const_data': {'shape': np.array([1]), 'value': np.array([-1])}, 'quantize_data': {'shape': np.array([2, 3, 4, 4])}, - 'mi_o_data': {'shape': np.array([1, 1, 1, 1]), 'value': np.broadcast_to(np.array([0]), (1, 1, 1, 1))}, - 'ma_o_data': {'shape': np.array([1, 1, 1, 1]), 'value': np.broadcast_to(np.array([1]), (1, 1, 1, 1))}, + 'mi_o_data': {'shape': np.array([1]), 'value': np.array([0])}, + 'ma_o_data': {'shape': np.array([1]), 'value': np.array([1])}, }, nodes_with_edges_only=True) graph.stage = 'middle' - graph_ref = build_graph(nodes, edges_ref, { - 'quantize_data': {'shape': np.array([2, 3, 4, 4])}, - 'mul_const_data': {'shape': np.array([3, 1, 1]), 'value': np.array([[[-1]], [[1]], [[-1]]])}, - 'mi_o_data': {'shape': np.array([1, 3, 1, 1]), 'value': np.array([[[1]], [[0]], [[1]]])}, - 'ma_o_data': {'shape': np.array([1, 3, 1, 1]), 'value': np.array([[[0]], [[1]], [[0]]])}, - 'mi_i_data': {'shape': np.array([1, 3, 1, 1]), 'value': np.array([[[10]], [[-10]], [[10]]])}, - 'ma_i_data': {'shape': np.array([1, 3, 1, 1]), 'value': np.array([[[-10]], [[10]], [[-10]]])}, - }, nodes_with_edges_only=True) + graph_ref = graph.copy() MulFakeQuantizeFuse().find_and_replace_pattern(graph) @@ -155,22 +168,36 @@ def test_3(self): self.assertTrue(flag, resp) - def negative_test_1(self): - graph = build_graph(nodes, edges, nodes_with_edges_only=True) + def test_negative_4(self): + graph = build_graph(nodes, edges, { + 'mul': {'can_be_fused': True}, + 'mul_const_data': {'shape': np.array([3, 1, 1]), 'value': np.array([[[-1]], [[1]], [[-1]]])}, + 'quantize_data': {'shape': np.array([2, 3, 4, 4])}, + 'mi_o_data': {'shape': np.array([1, 1, 1, 1]), 'value': np.broadcast_to(np.array([0]), (1, 1, 1, 1))}, + 'ma_o_data': {'shape': np.array([1, 1, 1, 1]), 'value': np.broadcast_to(np.array([1]), (1, 1, 1, 1))}, + }, nodes_with_edges_only=True) graph.stage = 'middle' - graph_ref = build_graph(nodes, edges, nodes_with_edges_only=True) + graph_ref = graph.copy() MulFakeQuantizeFuse().find_and_replace_pattern(graph) + (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) self.assertTrue(flag, resp) - def negative_test_2(self): - graph = build_graph(nodes, edges, {'mul': {'can_be_fused': False}}, nodes_with_edges_only=True) + def test_negative_5(self): + graph = build_graph(nodes, edges, { + 'mul': {'can_be_fused': True}, + 'mul_const_data': {'shape': np.array([3, 1, 1]), 'value': np.array([[[0]], [[1]], [[2]]])}, + 'quantize_data': {'shape': np.array([2, 3, 4, 4])}, + 'mi_o_data': {'shape': np.array([1, 1, 1, 1]), 'value': np.broadcast_to(np.array([0]), (1, 1, 1, 1))}, + 'ma_o_data': {'shape': np.array([1, 1, 1, 1]), 'value': np.broadcast_to(np.array([1]), (1, 1, 1, 1))}, + }, nodes_with_edges_only=True) graph.stage = 'middle' - graph_ref = build_graph(nodes, edges, {'mul': {'can_be_fused': False}}, nodes_with_edges_only=True) + graph_ref = graph.copy() MulFakeQuantizeFuse().find_and_replace_pattern(graph) + (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) self.assertTrue(flag, resp) diff --git a/ngraph/core/include/ngraph/check.hpp b/ngraph/core/include/ngraph/check.hpp index 88606586537172..ea80e0c2442889 100644 --- a/ngraph/core/include/ngraph/check.hpp +++ b/ngraph/core/include/ngraph/check.hpp @@ -9,20 +9,20 @@ #include #include "ngraph/except.hpp" -#include "openvino/core/check.hpp" +#include "openvino/core/except.hpp" namespace ngraph { using ov::write_all_to_stream; -using ov::CheckFailure; +using CheckFailure = ov::AssertFailure; using ov::CheckLocInfo; } // namespace ngraph -#define NGRAPH_CHECK_HELPER2(exc_class, ctx, check, ...) OV_CHECK_HELPER2(exc_class, ctx, check, __VA_ARGS__) +#define NGRAPH_CHECK_HELPER2(exc_class, ctx, check, ...) OPENVINO_ASSERT_HELPER2(exc_class, ctx, check, __VA_ARGS__) -#define NGRAPH_CHECK_HELPER1(exc_class, ctx, check) OV_CHECK_HELPER1(exc_class, ctx, check) +#define NGRAPH_CHECK_HELPER1(exc_class, ctx, check) OPENVINO_ASSERT_HELPER1(exc_class, ctx, check) -#define NGRAPH_CHECK(...) OV_CHECK(__VA_ARGS__) +#define NGRAPH_CHECK(...) OPENVINO_ASSERT(__VA_ARGS__) #define NGRAPH_UNREACHABLE(...) NGRAPH_CHECK(false, "Unreachable: ", __VA_ARGS__) -#define NGRAPH_CHECK_HELPER(exc_class, ctx, ...) OV_CHECK_HELPER(exc_class, ctx, __VA_ARGS__) +#define NGRAPH_CHECK_HELPER(exc_class, ctx, ...) OPENVINO_ASSERT_HELPER(exc_class, ctx, __VA_ARGS__) diff --git a/ngraph/core/include/ngraph/op/util/slice_plan.hpp b/ngraph/core/include/ngraph/op/util/slice_plan.hpp index cd636450d1ec0a..746200d1048853 100644 --- a/ngraph/core/include/ngraph/op/util/slice_plan.hpp +++ b/ngraph/core/include/ngraph/op/util/slice_plan.hpp @@ -54,3 +54,5 @@ SlicePlan NGRAPH_API make_slice_plan(const Shape& input_shape, const AxisSet& shrink_axis_mask, const AxisSet& ellipsis_mask); } // namespace ngraph + +using ngraph::make_slice_plan; diff --git a/ngraph/core/include/ngraph/partial_shape.hpp b/ngraph/core/include/ngraph/partial_shape.hpp index d3bfb76dd4cbb9..298fa7881fea7e 100644 --- a/ngraph/core/include/ngraph/partial_shape.hpp +++ b/ngraph/core/include/ngraph/partial_shape.hpp @@ -4,10 +4,13 @@ #pragma once +#include "ngraph/attribute_adapter.hpp" #include "ngraph/dimension.hpp" +#include "ngraph/op/util/attr_types.hpp" #include "ngraph/rank.hpp" -#include "openvino/core/partial_shape.hpp" +#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" namespace ngraph { -using ov::PartialShape; +using PartialShape = ov::Shape; } // namespace ngraph diff --git a/ngraph/core/include/ngraph/runtime/host_tensor.hpp b/ngraph/core/include/ngraph/runtime/host_tensor.hpp index 157fb27be3bd57..e88330a8a2b6be 100644 --- a/ngraph/core/include/ngraph/runtime/host_tensor.hpp +++ b/ngraph/core/include/ngraph/runtime/host_tensor.hpp @@ -7,6 +7,7 @@ #include #include "ngraph/descriptor/output.hpp" +#include "ngraph/partial_shape.hpp" #include "ngraph/runtime/tensor.hpp" #include "ngraph/type/element_type.hpp" #include "ngraph/type/element_type_traits.hpp" diff --git a/ngraph/core/include/ngraph/shape.hpp b/ngraph/core/include/ngraph/shape.hpp index 1a12974bd7a73e..2799a4f9647b4f 100644 --- a/ngraph/core/include/ngraph/shape.hpp +++ b/ngraph/core/include/ngraph/shape.hpp @@ -12,102 +12,13 @@ #include "ngraph/axis_set.hpp" #include "ngraph/ngraph_visibility.hpp" #include "ngraph/strides.hpp" +#include "openvino/core/static_shape.hpp" namespace ngraph { -/// \brief Shape for a tensor. -class Shape : public std::vector { -public: - NGRAPH_API Shape(); - - NGRAPH_API Shape(const std::initializer_list& axis_lengths); - - NGRAPH_API Shape(const std::vector& axis_lengths); - - NGRAPH_API Shape(const Shape& axis_lengths); - - NGRAPH_API explicit Shape(size_t n, size_t initial_value = 0); - - NGRAPH_API ~Shape(); - - template - Shape(InputIterator first, InputIterator last) : std::vector(first, last) {} - - NGRAPH_API Shape& operator=(const Shape& v); - NGRAPH_API Shape& operator=(Shape&& v) noexcept; -}; - -/// Number of elements in spanned by a shape -template -size_t shape_size(const SHAPE_TYPE& shape) { - size_t size = 1; - for (auto d : shape) { - size *= d; - } - return size; -} - -/// Number of elements in a subset of dimensions of a shape. -/// Returns a product of dimensions in a range [start_dim;end_dim) -template -size_t shape_size(ForwardIt start_dim, const ForwardIt end_dim) { - static_assert(std::is_arithmetic::value_type>::value, - "shape_size expects 2 forward iterators as inputs. value_type of those iterators has to be an " - "arithmetic type so that they can be used in multiplication operation."); - - return std::accumulate(start_dim, - end_dim, - typename std::iterator_traits::value_type{1}, - std::multiplies::value_type>()); -} - -/// Row-major strides for a shape -template -std::vector row_major_strides(const SHAPE_TYPE& shape) { - std::vector strides(shape.size()); - size_t s = 1; - auto st = strides.rbegin(); - for (auto d = shape.rbegin(); d != shape.rend() && st != strides.rend(); d++, st++) { - *st = s; - s *= *d; - } - return strides; -} - -template -size_t row_major_stride(const SHAPE_TYPE& shape, size_t axis) { - size_t s = 1; - for (size_t i = shape.size(); i-- > axis + 1;) { - s *= shape[i]; - } - return s; -} - -template -inline bool is_scalar(const SHAPE_TYPE& shape) { - return 0 == shape.size(); -} - -template -inline bool is_vector(const SHAPE_TYPE& shape) { - return 1 == shape.size(); -} - -NGRAPH_API -std::ostream& operator<<(std::ostream& s, const Shape& shape); +using Shape = ov::StaticShape; +using ov::is_scalar; +using ov::is_vector; +using ov::row_major_stride; +using ov::row_major_strides; +using ov::shape_size; } // namespace ngraph - -namespace ov { - -template <> -class NGRAPH_API AttributeAdapter - : public IndirectVectorValueAccessor> - -{ -public: - AttributeAdapter(ngraph::Shape& value) : IndirectVectorValueAccessor>(value) {} - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; - const DiscreteTypeInfo& get_type_info() const override { - return type_info; - } -}; -} // namespace ov diff --git a/ngraph/core/include/openvino/core/attribute_visitor.hpp b/ngraph/core/include/openvino/core/attribute_visitor.hpp index 5d2b3175c8df91..2a0cc4adef6b24 100644 --- a/ngraph/core/include/openvino/core/attribute_visitor.hpp +++ b/ngraph/core/include/openvino/core/attribute_visitor.hpp @@ -8,7 +8,7 @@ #include #include -#include "openvino/core/partial_shape.hpp" +#include "openvino/core/shape.hpp" #include "openvino/core/type.hpp" #include "openvino/core/type/element_type.hpp" diff --git a/ngraph/core/include/openvino/core/check.hpp b/ngraph/core/include/openvino/core/check.hpp deleted file mode 100644 index 1a20a449cab5ab..00000000000000 --- a/ngraph/core/include/openvino/core/check.hpp +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "openvino/core/core_visibility.hpp" -#include "openvino/core/except.hpp" - -namespace ov { -static inline std::ostream& write_all_to_stream(std::ostream& str) { - return str; -} -template -static inline std::ostream& write_all_to_stream(std::ostream& str, const T& arg, TS&&... args) { - return write_all_to_stream(str << arg, args...); -} - -struct CheckLocInfo { - const char* file; - int line; - const char* check_string; -}; - -/// Base class for check failure exceptions. -class OPENVINO_API CheckFailure : public Exception { -public: - CheckFailure(const CheckLocInfo& check_loc_info, const std::string& context_info, const std::string& explanation) - : Exception(make_what(check_loc_info, context_info, explanation)) {} - -private: - static std::string make_what(const CheckLocInfo& check_loc_info, - const std::string& context_info, - const std::string& explanation); -}; -} // namespace ov - -// -// Helper macro for defining custom check macros, which throw custom exception classes and provide -// useful context information (the check condition, source filename, line number, and any domain- -// specific context information [e.g., a summary of the node that was being processed at the time -// of the check]). -// -// For example (actually implemented in node.cpp), let's say we want to define a macro for -// checking conditions during node validation, usable as follows: -// -// NODE_VALIDATION_CHECK(node_being_checked, -// node_being_checked->get_input_shape(0).size() == 1, -// "Node must have an input rank of 1, but got ", -// node_being_checked->get_input_shape(0).size(), "."); -// -// In case of failure, this will throw an exception of type NodeValidationFailure with a what() -// string something like: -// -// Check 'node_being_checked->get_input_shape(0).size() == 1' failed at foo.cpp:123: -// While validating node 'Broadcast[Broadcast_10](Reshape_9: float{1,3,4,5}) -> (??)': -// Node must have an input of rank 1, but got 2. -// -// To implement this, he first step is to define a subclass of CheckFailure (let's say it's called -// MyFailure), which must have a constructor of the form: -// -// MyFailure(const CheckLocInfo& check_loc_info, -// T context_info, // "T" can be any type; you'll supply a function to convert "T" -// // to std::string -// const std::string& explanation) -// -// Here, we define a custom class for node validation failures as follows: -// -// static std::string node_validation_failure_loc_string(const Node* node) -// { -// std::stringstream ss; -// ss << "While validating node '" << *node << "'"; -// return ss.str(); -// } -// -// class NodeValidationFailure : public CheckFailure -// { -// public: -// NodeValidationFailure(const CheckLocInfo& check_loc_info, -// const Node* node, -// const std::string& explanation) -// : CheckFailure(check_loc_info, node_validation_failure_loc_string(node), explanation) -// { -// } -// }; -// -// Then, we define the macro NODE_VALIDATION_CHECK as follows: -// -// #define NODE_VALIDATION_CHECK(node, cond, ...) -// OV_CHECK_HELPER(::ov::NodeValidationFailure, (node), (cond), ##__VA_ARGS__) -// -// The macro NODE_VALIDATION_CHECK can now be called on any condition, with a Node* pointer -// supplied to generate an informative error message via node_validation_failure_loc_string(). -// -// Take care to fully qualify the exception class name in the macro body. -// -// The "..." may be filled with expressions of any type that has an "operator<<" overload for -// insertion into std::ostream. -// -#define OV_CHECK_HELPER2(exc_class, ctx, check, ...) \ - do { \ - if (!(check)) { \ - ::std::stringstream ss___; \ - ::ov::write_all_to_stream(ss___, __VA_ARGS__); \ - throw exc_class((::ov::CheckLocInfo{__FILE__, __LINE__, #check}), (ctx), ss___.str()); \ - } \ - } while (0) - -#define OV_CHECK_HELPER1(exc_class, ctx, check) \ - do { \ - if (!(check)) { \ - throw exc_class((::ov::CheckLocInfo{__FILE__, __LINE__, #check}), (ctx), ""); \ - } \ - } while (0) - -/// \brief Macro to check whether a boolean condition holds. -/// \param cond Condition to check -/// \param ... Additional error message info to be added to the error message via the `<<` -/// stream-insertion operator. Note that the expressions here will be evaluated lazily, -/// i.e., only if the `cond` evalutes to `false`. -/// \throws ::ov::CheckFailure if `cond` is false. -#define OV_CHECK(...) OV_CHECK_HELPER(::ov::CheckFailure, "", __VA_ARGS__) - -/// \brief Macro to signal a code path that is unreachable in a successful execution. It's -/// implemented with OV_CHECK macro. -/// \param ... Additional error message that should describe why that execution path is unreachable. -/// \throws ::ov::CheckFailure if the macro is executed. -#define OV_UNREACHABLE(...) OV_CHECK(false, "Unreachable: ", __VA_ARGS__) -#define OV_CHECK_HELPER(exc_class, ctx, ...) CALL_OVERLOAD(OV_CHECK_HELPER, exc_class, ctx, __VA_ARGS__) - -#define GLUE(x, y) x y - -#define RETURN_ARG_COUNT(_1_, \ - _2_, \ - _3_, \ - _4_, \ - _5_, \ - _6, \ - _7, \ - _8, \ - _9, \ - _10, \ - _11, \ - _12, \ - _13, \ - _14, \ - _15, \ - _16, \ - _17, \ - _18, \ - _19, \ - _20, \ - _21, \ - _22, \ - _23, \ - _24, \ - _25, \ - count, \ - ...) \ - count -#define EXPAND_ARGS(args) RETURN_ARG_COUNT args -#define COUNT_ARGS_MAXN(...) \ - EXPAND_ARGS((__VA_ARGS__, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0)) - -#define OVERLOAD_MACRO2(name, count) name##count -#define OVERLOAD_MACRO1(name, count) OVERLOAD_MACRO2(name, count) -#define OVERLOAD_MACRO(name, count) OVERLOAD_MACRO1(name, count) - -#define CALL_OVERLOAD(name, exc_class, ctx, ...) \ - GLUE(OVERLOAD_MACRO(name, COUNT_ARGS_MAXN(__VA_ARGS__)), (exc_class, ctx, __VA_ARGS__)) diff --git a/ngraph/core/include/openvino/core/coordinate.hpp b/ngraph/core/include/openvino/core/coordinate.hpp index 28dd5196030b2d..055265bb74e7ad 100644 --- a/ngraph/core/include/openvino/core/coordinate.hpp +++ b/ngraph/core/include/openvino/core/coordinate.hpp @@ -17,7 +17,7 @@ class Coordinate : public std::vector { OPENVINO_API Coordinate(); OPENVINO_API Coordinate(const std::initializer_list& axes); - OPENVINO_API Coordinate(const ngraph::Shape& shape); + OPENVINO_API Coordinate(const StaticShape& shape); OPENVINO_API Coordinate(const std::vector& axes); diff --git a/ngraph/core/include/openvino/core/descriptor/input.hpp b/ngraph/core/include/openvino/core/descriptor/input.hpp index 437b44f6a604a6..3d488f4ed02838 100644 --- a/ngraph/core/include/openvino/core/descriptor/input.hpp +++ b/ngraph/core/include/openvino/core/descriptor/input.hpp @@ -92,10 +92,10 @@ class OPENVINO_API Input { } /// \return the shape of the connected output - const ngraph::Shape& get_shape() const; + const StaticShape& get_shape() const; /// \return the partial shape of the connected output - const PartialShape& get_partial_shape() const; + const Shape& get_partial_shape() const; /// \return the element type of the connected output const element::Type& get_element_type() const; diff --git a/ngraph/core/include/openvino/core/descriptor/output.hpp b/ngraph/core/include/openvino/core/descriptor/output.hpp index c4ec21d5ca67b9..82c7a541d2e3bb 100644 --- a/ngraph/core/include/openvino/core/descriptor/output.hpp +++ b/ngraph/core/include/openvino/core/descriptor/output.hpp @@ -59,10 +59,10 @@ class OPENVINO_API Output { return m_rt_info; } /// \return the shape of the output - const ngraph::Shape& get_shape() const; + const StaticShape& get_shape() const; /// \return the partial shape of the output - const PartialShape& get_partial_shape() const; + const Shape& get_partial_shape() const; /// \return the element type of the output const element::Type& get_element_type() const; diff --git a/ngraph/core/include/openvino/core/descriptor/tensor.hpp b/ngraph/core/include/openvino/core/descriptor/tensor.hpp index cc4c196c3e562d..879abeafa11c9e 100644 --- a/ngraph/core/include/openvino/core/descriptor/tensor.hpp +++ b/ngraph/core/include/openvino/core/descriptor/tensor.hpp @@ -12,7 +12,7 @@ #include "ngraph/shape.hpp" #include "openvino/core/core_visibility.hpp" -#include "openvino/core/partial_shape.hpp" +#include "openvino/core/shape.hpp" #include "openvino/core/type/element_type.hpp" namespace ngraph { @@ -28,8 +28,8 @@ namespace descriptor { /// \brief Compile-time descriptor of a first-class value that is a tensor. class OPENVINO_API Tensor { public: - Tensor(const element::Type& element_type, const PartialShape& pshape, const std::string& name); - Tensor(const element::Type& element_type, const PartialShape& pshape, Node* node, size_t node_output_number); + Tensor(const element::Type& element_type, const Shape& pshape, const std::string& name); + Tensor(const element::Type& element_type, const Shape& pshape, Node* node, size_t node_output_number); Tensor(const Tensor&) = delete; Tensor& operator=(const Tensor&) = delete; @@ -42,9 +42,9 @@ class OPENVINO_API Tensor { const std::unordered_set& get_names() const; void set_names(const std::unordered_set& names); void add_names(const std::unordered_set& names); - void set_tensor_type(const element::Type& element_type, const PartialShape& pshape); + void set_tensor_type(const element::Type& element_type, const Shape& pshape); void set_element_type(const element::Type& elemenet_type); - void set_partial_shape(const PartialShape& partial_shape); + void set_partial_shape(const Shape& partial_shape); /// \brief sets lower bound value description void set_lower_value(const ngraph::HostTensorPtr& value); @@ -56,8 +56,8 @@ class OPENVINO_API Tensor { const element::Type& get_element_type() const { return m_element_type; } - const ngraph::Shape& get_shape() const; - const PartialShape& get_partial_shape() const { + const StaticShape& get_shape() const; + const Shape& get_partial_shape() const { return m_partial_shape; } /// \brief gets lower bound value description @@ -78,22 +78,22 @@ class OPENVINO_API Tensor { element::Type m_element_type; // TODO: remove along with get_shape - // Initially there was ngraph::Shape m_shape only available to keep shape information. - // Support for dynamic shapes required transition to ngraph::PartialShape. - // To smoothly transition to ngraph::PartialShape we introduced m_partial_shape + // Initially there was StaticShape m_shape only available to keep shape information. + // Support for dynamic shapes required transition to ov::Shape. + // To smoothly transition to ov::Shape we introduced m_partial_shape // and kept m_shape in sync with m_partial_shape. Synchronization point was placed // in set_partial_shape which dramatically affected performance of ngraph::Function - // validation. Since we have started the transition to ngraph::PartialShape and reduced - // ngraph::Shape usage the only user of m_shape was get_shape method with signature: + // validation. Since we have started the transition to ov::Shape and reduced + // StaticShape usage the only user of m_shape was get_shape method with signature: // const Shape& descriptor::Tensor::get_shape() const // It was decided to move m_shape and m_partial_shape synchronization point there and // to keep methods signature backward compatible. mutable std::mutex shape_mutex; mutable std::atomic_bool m_shape_changed; - mutable ngraph::Shape m_shape; + mutable StaticShape m_shape; // TODO: end - PartialShape m_partial_shape; + Shape m_partial_shape; ngraph::HostTensorPtr m_lower_value, m_upper_value; std::string m_name; std::unordered_set m_names; diff --git a/ngraph/core/include/openvino/core/except.hpp b/ngraph/core/include/openvino/core/except.hpp index 30372a344d42bb..14c9260e1e5e21 100644 --- a/ngraph/core/include/openvino/core/except.hpp +++ b/ngraph/core/include/openvino/core/except.hpp @@ -20,4 +20,163 @@ class OPENVINO_API Exception : public std::runtime_error { explicit Exception(const std::stringstream& what_arg) : std::runtime_error(what_arg.str()) {} }; +static inline std::ostream& write_all_to_stream(std::ostream& str) { + return str; +} +template +static inline std::ostream& write_all_to_stream(std::ostream& str, const T& arg, TS&&... args) { + return write_all_to_stream(str << arg, args...); +} + +struct CheckLocInfo { + const char* file; + int line; + const char* check_string; +}; + +/// Base class for check failure exceptions. +class OPENVINO_API AssertFailure : public Exception { +public: + AssertFailure(const CheckLocInfo& check_loc_info, const std::string& context_info, const std::string& explanation) + : Exception(make_what(check_loc_info, context_info, explanation)) {} + +private: + static std::string make_what(const CheckLocInfo& check_loc_info, + const std::string& context_info, + const std::string& explanation); +}; } // namespace ov + +// +// Helper macro for defining custom check macros, which throw custom exception classes and provide +// useful context information (the check condition, source filename, line number, and any domain- +// specific context information [e.g., a summary of the node that was being processed at the time +// of the check]). +// +// For example (actually implemented in node.cpp), let's say we want to define a macro for +// checking conditions during node validation, usable as follows: +// +// NODE_VALIDATION_CHECK(node_being_checked, +// node_being_checked->get_input_shape(0).size() == 1, +// "Node must have an input rank of 1, but got ", +// node_being_checked->get_input_shape(0).size(), "."); +// +// In case of failure, this will throw an exception of type NodeValidationFailure with a what() +// string something like: +// +// Check 'node_being_checked->get_input_shape(0).size() == 1' failed at foo.cpp:123: +// While validating node 'Broadcast[Broadcast_10](Reshape_9: float{1,3,4,5}) -> (??)': +// Node must have an input of rank 1, but got 2. +// +// To implement this, he first step is to define a subclass of AssertFailure (let's say it's called +// MyFailure), which must have a constructor of the form: +// +// MyFailure(const CheckLocInfo& check_loc_info, +// T context_info, // "T" can be any type; you'll supply a function to convert "T" +// // to std::string +// const std::string& explanation) +// +// Here, we define a custom class for node validation failures as follows: +// +// static std::string node_validation_failure_loc_string(const Node* node) +// { +// std::stringstream ss; +// ss << "While validating node '" << *node << "'"; +// return ss.str(); +// } +// +// class NodeValidationFailure : public AssertFailure +// { +// public: +// NodeValidationFailure(const CheckLocInfo& check_loc_info, +// const Node* node, +// const std::string& explanation) +// : AssertFailure(check_loc_info, node_validation_failure_loc_string(node), explanation) +// { +// } +// }; +// +// Then, we define the macro NODE_VALIDATION_CHECK as follows: +// +// #define NODE_VALIDATION_CHECK(node, cond, ...) +// OPENVINO_ASSERT_HELPER(::ov::NodeValidationFailure, (node), (cond), ##__VA_ARGS__) +// +// The macro NODE_VALIDATION_CHECK can now be called on any condition, with a Node* pointer +// supplied to generate an informative error message via node_validation_failure_loc_string(). +// +// Take care to fully qualify the exception class name in the macro body. +// +// The "..." may be filled with expressions of any type that has an "operator<<" overload for +// insertion into std::ostream. +// +#define OPENVINO_ASSERT_HELPER2(exc_class, ctx, check, ...) \ + do { \ + if (!(check)) { \ + ::std::stringstream ss___; \ + ::ov::write_all_to_stream(ss___, __VA_ARGS__); \ + throw exc_class((::ov::CheckLocInfo{__FILE__, __LINE__, #check}), (ctx), ss___.str()); \ + } \ + } while (0) + +#define OPENVINO_ASSERT_HELPER1(exc_class, ctx, check) \ + do { \ + if (!(check)) { \ + throw exc_class((::ov::CheckLocInfo{__FILE__, __LINE__, #check}), (ctx), ""); \ + } \ + } while (0) + +/// \brief Macro to check whether a boolean condition holds. +/// \param cond Condition to check +/// \param ... Additional error message info to be added to the error message via the `<<` +/// stream-insertion operator. Note that the expressions here will be evaluated lazily, +/// i.e., only if the `cond` evalutes to `false`. +/// \throws ::ov::AssertFailure if `cond` is false. +#define OPENVINO_ASSERT(...) OPENVINO_ASSERT_HELPER(::ov::AssertFailure, "", __VA_ARGS__) + +/// \brief Macro to signal a code path that is unreachable in a successful execution. It's +/// implemented with OPENVINO_ASSERT macro. +/// \param ... Additional error message that should describe why that execution path is unreachable. +/// \throws ::ov::AssertFailure if the macro is executed. +#define OPENVINO_UNREACHABLE(...) OPENVINO_ASSERT(false, "Unreachable: ", __VA_ARGS__) +#define OPENVINO_ASSERT_HELPER(exc_class, ctx, ...) CALL_OVERLOAD(OPENVINO_ASSERT_HELPER, exc_class, ctx, __VA_ARGS__) + +#define GLUE(x, y) x y + +#define RETURN_ARG_COUNT(_1_, \ + _2_, \ + _3_, \ + _4_, \ + _5_, \ + _6, \ + _7, \ + _8, \ + _9, \ + _10, \ + _11, \ + _12, \ + _13, \ + _14, \ + _15, \ + _16, \ + _17, \ + _18, \ + _19, \ + _20, \ + _21, \ + _22, \ + _23, \ + _24, \ + _25, \ + count, \ + ...) \ + count +#define EXPAND_ARGS(args) RETURN_ARG_COUNT args +#define COUNT_ARGS_MAXN(...) \ + EXPAND_ARGS((__VA_ARGS__, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0)) + +#define OVERLOAD_MACRO2(name, count) name##count +#define OVERLOAD_MACRO1(name, count) OVERLOAD_MACRO2(name, count) +#define OVERLOAD_MACRO(name, count) OVERLOAD_MACRO1(name, count) + +#define CALL_OVERLOAD(name, exc_class, ctx, ...) \ + GLUE(OVERLOAD_MACRO(name, COUNT_ARGS_MAXN(__VA_ARGS__)), (exc_class, ctx, __VA_ARGS__)) diff --git a/ngraph/core/include/openvino/core/function.hpp b/ngraph/core/include/openvino/core/function.hpp index 76f7a904591265..509b555fbd0061 100644 --- a/ngraph/core/include/openvino/core/function.hpp +++ b/ngraph/core/include/openvino/core/function.hpp @@ -11,7 +11,6 @@ #include #include -#include "ngraph/node.hpp" #include "ngraph/op/assign.hpp" #include "ngraph/op/parameter.hpp" #include "ngraph/op/read_value.hpp" @@ -19,6 +18,7 @@ #include "ngraph/op/sink.hpp" #include "ngraph/op/util/variable.hpp" #include "openvino/core/core_visibility.hpp" +#include "openvino/core/node.hpp" namespace ov { /// A user-defined function. @@ -97,10 +97,10 @@ class OPENVINO_API Function { const ngraph::element::Type& get_output_element_type(size_t i) const; /// Return the shape of element i - const ngraph::Shape& get_output_shape(size_t i) const; + const StaticShape& get_output_shape(size_t i) const; /// Return the partial shape of element i - const ngraph::PartialShape& get_output_partial_shape(size_t i) const; + const Shape& get_output_partial_shape(size_t i) const; /// Check that there is a single result and return it. std::shared_ptr get_result() const; diff --git a/ngraph/core/include/openvino/core/node.hpp b/ngraph/core/include/openvino/core/node.hpp index aab675dc62f45f..6d5d28d186ab4e 100644 --- a/ngraph/core/include/openvino/core/node.hpp +++ b/ngraph/core/include/openvino/core/node.hpp @@ -18,14 +18,14 @@ #include #include -#include "ngraph/check.hpp" -#include "ngraph/deprecated.hpp" #include "ngraph/op/util/op_annotations.hpp" #include "openvino/core/attribute_visitor.hpp" #include "openvino/core/core_visibility.hpp" +#include "openvino/core/deprecated.hpp" #include "openvino/core/descriptor/input.hpp" #include "openvino/core/descriptor/output.hpp" #include "openvino/core/descriptor/tensor.hpp" +#include "openvino/core/except.hpp" #include "openvino/core/node_input.hpp" #include "openvino/core/node_output.hpp" #include "openvino/core/node_vector.hpp" @@ -239,7 +239,7 @@ class OPENVINO_API Node : public std::enable_shared_from_this { /// Sets/replaces the arguments with new arguments. void set_argument(size_t position, const Output& argument); - void set_output_type(size_t i, const element::Type& element_type, const PartialShape& pshape); + void set_output_type(size_t i, const element::Type& element_type, const Shape& pshape); /// Sets the number of outputs void set_output_size(size_t output_size); @@ -318,10 +318,10 @@ class OPENVINO_API Node : public std::enable_shared_from_this { const element::Type& get_element_type() const; /// Returns the shape for output i - const ngraph::Shape& get_output_shape(size_t i) const; + const StaticShape& get_output_shape(size_t i) const; /// Returns the partial shape for output i - const PartialShape& get_output_partial_shape(size_t i) const; + const Shape& get_output_partial_shape(size_t i) const; /// Return the output to use when converting to an Output with no index specified. /// Throws when not supported. @@ -337,7 +337,7 @@ class OPENVINO_API Node : public std::enable_shared_from_this { // TODO: deprecate in favor of node->get_output_shape(0) with a suitable check in the // calling code, or updates to the calling code if it is making an invalid assumption of // only one output. - const ngraph::Shape& get_shape() const; + const StaticShape& get_shape() const; /// Returns the tensor for output or input i descriptor::Tensor& get_output_tensor(size_t i) const; @@ -358,11 +358,11 @@ class OPENVINO_API Node : public std::enable_shared_from_this { /// Returns the shape of input i // TODO: deprecate in favor of node->get_input_shape(i) - const ngraph::Shape& get_input_shape(size_t i) const; + const StaticShape& get_input_shape(size_t i) const; /// Returns the partial shape of input i // TODO: deprecate in favor of node->get_input_partial_shape(i) - const PartialShape& get_input_partial_shape(size_t i) const; + const Shape& get_input_partial_shape(size_t i) const; /// Returns the tensor name for input i OPENVINO_DEPRECATED("The tensor name was deprecated. Use get_input_tensor(i).get_names() instead.") @@ -654,10 +654,10 @@ struct RawNodeOutput { using RawNodeOutputMap = std::map>; -class OPENVINO_API NodeValidationFailure : public ngraph::CheckFailure { +class OPENVINO_API NodeValidationFailure : public ov::AssertFailure { public: NodeValidationFailure(const ngraph::CheckLocInfo& check_loc_info, const Node* node, const std::string& explanation) - : CheckFailure(check_loc_info, node_validation_failure_loc_string(node), explanation) {} + : AssertFailure(check_loc_info, node_validation_failure_loc_string(node), explanation) {} }; } // namespace ov #define NODE_VALIDATION_CHECK(node, ...) NGRAPH_CHECK_HELPER(::ov::NodeValidationFailure, (node), __VA_ARGS__) diff --git a/ngraph/core/include/openvino/core/node_input.hpp b/ngraph/core/include/openvino/core/node_input.hpp index fb6e2cb527c8b4..0f9c4a28547130 100644 --- a/ngraph/core/include/openvino/core/node_input.hpp +++ b/ngraph/core/include/openvino/core/node_input.hpp @@ -10,7 +10,7 @@ #include "ngraph/shape.hpp" #include "openvino/core/core_visibility.hpp" #include "openvino/core/descriptor/tensor.hpp" -#include "openvino/core/partial_shape.hpp" +#include "openvino/core/shape.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/core/variant.hpp" @@ -39,9 +39,9 @@ class OPENVINO_API Input { /// \return The element type of the input referred to by this input handle. const element::Type& get_element_type() const; /// \return The shape of the input referred to by this input handle. - const ngraph::Shape& get_shape() const; + const StaticShape& get_shape() const; /// \return The partial shape of the input referred to by this input handle. - const PartialShape& get_partial_shape() const; + const Shape& get_partial_shape() const; /// \return A handle to the output that is connected to this input. Output get_source_output() const; /// \return A reference to the tensor descriptor for this input. @@ -90,9 +90,9 @@ class OPENVINO_API Input { /// \return The element type of the input referred to by this input handle. const element::Type& get_element_type() const; /// \return The shape of the input referred to by this input handle. - const ngraph::Shape& get_shape() const; + const StaticShape& get_shape() const; /// \return The partial shape of the input referred to by this input handle. - const PartialShape& get_partial_shape() const; + const Shape& get_partial_shape() const; /// \return A handle to the output that is connected to this input. Output get_source_output() const; /// \return A reference to the tensor descriptor for this input. diff --git a/ngraph/core/include/openvino/core/node_output.hpp b/ngraph/core/include/openvino/core/node_output.hpp index 73d9b4659823ba..0dd1709bb7a2be 100644 --- a/ngraph/core/include/openvino/core/node_output.hpp +++ b/ngraph/core/include/openvino/core/node_output.hpp @@ -11,7 +11,7 @@ #include "ngraph/shape.hpp" #include "openvino/core/core_visibility.hpp" #include "openvino/core/descriptor/tensor.hpp" -#include "openvino/core/partial_shape.hpp" +#include "openvino/core/shape.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/core/variant.hpp" @@ -68,9 +68,9 @@ class OPENVINO_API Output { /// \return The element type of the output referred to by this output handle. const element::Type& get_element_type() const; /// \return The shape of the output referred to by this output handle. - const ngraph::Shape& get_shape() const; + const StaticShape& get_shape() const; /// \return The partial shape of the output referred to by this output handle. - const PartialShape& get_partial_shape() const; + const Shape& get_partial_shape() const; /// \return The reference to runtime info map RTMap& get_rt_info(); @@ -145,9 +145,9 @@ class OPENVINO_API Output { /// \return The element type of the output referred to by this output handle. const element::Type& get_element_type() const; /// \return The shape of the output referred to by this output handle. - const ngraph::Shape& get_shape() const; + const StaticShape& get_shape() const; /// \return The partial shape of the output referred to by this output handle. - const PartialShape& get_partial_shape() const; + const Shape& get_partial_shape() const; /// \return The constant reference to runtime info map const RTMap& get_rt_info() const; diff --git a/ngraph/core/include/openvino/core/preprocess/input_info.hpp b/ngraph/core/include/openvino/core/preprocess/input_info.hpp new file mode 100644 index 00000000000000..d586a44ca6657e --- /dev/null +++ b/ngraph/core/include/openvino/core/preprocess/input_info.hpp @@ -0,0 +1,72 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/core_visibility.hpp" +#include "openvino/core/preprocess/input_tensor_info.hpp" +#include "openvino/core/preprocess/preprocess_steps.hpp" + +namespace ov { +namespace preprocess { + +/// \brief Class holding preprocessing information for one input +/// API has Builder-like style to allow chaining calls in client's code, like +/// \code{.cpp} +/// auto proc = PrePostProcessor().input(InputInfo().tensor(...).preprocess(...); +/// \endcode +class OPENVINO_API InputInfo final { + class InputInfoImpl; + std::unique_ptr m_impl; + friend class PrePostProcessor; + +public: + /// \brief Empty constructor. Should be used only if network will have only one input + InputInfo(); + + /// \brief Information about info for particular input index of model + /// + /// \param input_index Index to address specified input parameter of model + InputInfo(size_t input_index); + + /// \brief Default move constructor + InputInfo(InputInfo&&) noexcept; + + /// \brief Default move assignment operator + InputInfo& operator=(InputInfo&&) noexcept; + + /// \brief Default destructor + ~InputInfo(); + + /// \brief Set input tensor information for input - Lvalue version + /// + /// \param builder Input tensor information. + /// + /// \return Reference to 'this' to allow chaining with other calls in a builder-like manner + InputInfo& tensor(InputTensorInfo&& builder) &; + + /// \brief Set input tensor information for input - Rvalue version + /// + /// \param builder Input tensor information. + /// + /// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner + InputInfo&& tensor(InputTensorInfo&& builder) &&; + + /// \brief Set preprocessing operations for input - Lvalue version + /// + /// \param builder Preprocessing operations. + /// + /// \return Reference to 'this' to allow chaining with other calls in a builder-like manner + InputInfo& preprocess(PreProcessSteps&& builder) &; + + /// \brief Set preprocessing operations for input - Rvalue version + /// + /// \param builder Preprocessing operations. + /// + /// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner + InputInfo&& preprocess(PreProcessSteps&& builder) &&; +}; + +} // namespace preprocess +} // namespace ov diff --git a/ngraph/core/include/openvino/core/preprocess/input_tensor_info.hpp b/ngraph/core/include/openvino/core/preprocess/input_tensor_info.hpp new file mode 100644 index 00000000000000..25f58e7c19266f --- /dev/null +++ b/ngraph/core/include/openvino/core/preprocess/input_tensor_info.hpp @@ -0,0 +1,62 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/core_visibility.hpp" +#include "openvino/core/type/element_type.hpp" + +namespace ov { +namespace preprocess { + +/// \brief Information about user's input tensor. By default, it will be initialized to same data (type/shape/etc) as +/// network's input parameter User application can override particular parameters (like 'element_type') according to +/// application's data and specify appropriate conversions in pre-processing steps +/// +/// \code{.cpp} +/// auto proc = +/// PrePostProcessor() +/// .input(InputInfo() +/// .tensor(InputTensorInfo() +/// .set_element_type(ov::element::u8)) +/// .preprocess() +/// ); +/// \endcode +class OPENVINO_API InputTensorInfo final { + class InputTensorInfoImpl; + std::unique_ptr m_impl; + friend class InputInfo; + +public: + /// \brief Default empty constructor + InputTensorInfo(); + + /// \brief Default move constructor + InputTensorInfo(InputTensorInfo&&) noexcept; + + /// \brief Default move assignment + InputTensorInfo& operator=(InputTensorInfo&&) noexcept; + + /// \brief Default destructor + ~InputTensorInfo(); + + /// \brief Set element type for user's input tensor + /// This version allows chaining for Lvalue objects + /// + /// \param type Element type for user's input tensor. + /// + /// \return Reference to 'this' to allow chaining with other calls in a builder-like manner + InputTensorInfo& set_element_type(const ov::element::Type& type) &; + + /// \brief Set element type for user's input tensor + /// This version allows chaining for Rvalue objects + /// + /// \param builder Pre-processing data for input tensor of model. + /// + /// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner + InputTensorInfo&& set_element_type(const ov::element::Type& type) &&; +}; + +} // namespace preprocess +} // namespace ov diff --git a/ngraph/core/include/openvino/core/preprocess/pre_post_process.hpp b/ngraph/core/include/openvino/core/preprocess/pre_post_process.hpp new file mode 100644 index 00000000000000..300fd1ab557295 --- /dev/null +++ b/ngraph/core/include/openvino/core/preprocess/pre_post_process.hpp @@ -0,0 +1,70 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/core_visibility.hpp" +#include "openvino/core/preprocess/input_info.hpp" + +namespace ov { + +class Function; + +namespace preprocess { + +/// \brief Main class for adding pre- and post- processing steps to existing ov::Function +/// API has Builder-like style to allow chaining calls in client's code, like +/// \code{.cpp} +/// auto proc = PrePostProcessor().input().input(); +/// \endcode +/// +/// This is a helper class for writing easy pre- and post- processing operations on ov::Function object assuming that +/// any preprocess operation takes one input and produces one output. +/// +/// For advanced preprocessing scenarios, like combining several functions with multiple inputs/outputs into one, +/// client's code can use transformation passes over ov::Function +/// +class OPENVINO_API PrePostProcessor final { + class PrePostProcessorImpl; + std::unique_ptr m_impl; + +public: + /// \brief Default constructor + PrePostProcessor(); + + /// \brief Default move constructor + PrePostProcessor(PrePostProcessor&&) noexcept; + + /// \brief Default move assignment operator + PrePostProcessor& operator=(PrePostProcessor&&) noexcept; + + /// \brief Default destructor + ~PrePostProcessor(); + + /// \brief Adds pre-processing information and steps to input of model. This method can be used only if ov::Function + /// passed on `build` has only one input + /// + /// \param builder Pre-processing data for input tensor of model. + /// + /// \return Reference to 'this' to allow chaining with other calls in a builder-like manner + PrePostProcessor& input(InputInfo&& builder) &; + + /// \brief Adds pre-processing information and steps to input of model - Rvalue version. This method can be used + /// only if ov::Function passed on `build` has only one input. + /// + /// \param builder Pre-processing data for input tensor of model. + /// + /// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner + PrePostProcessor&& input(InputInfo&& builder) &&; + + /// \brief Adds pre/post-processing operations to existing function + /// + /// \param function Existing function representing loaded model + /// + /// \return Function with added pre/post-processing operations + std::shared_ptr build(const std::shared_ptr& function); +}; + +} // namespace preprocess +} // namespace ov diff --git a/ngraph/core/include/openvino/core/preprocess/preprocess_steps.hpp b/ngraph/core/include/openvino/core/preprocess/preprocess_steps.hpp new file mode 100644 index 00000000000000..097ca507329b2f --- /dev/null +++ b/ngraph/core/include/openvino/core/preprocess/preprocess_steps.hpp @@ -0,0 +1,117 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/core_visibility.hpp" +#include "openvino/core/type/element_type.hpp" + +namespace ov { + +class Node; + +namespace preprocess { + +/// \brief Preprocessing steps. Each step typically intends adding of some operation to input parameter +/// User application can specify sequence of preprocessing steps in a builder-like manner +/// \code{.cpp} +/// auto proc = PrePostProcessor() +/// .input(InputInfo() +/// .preprocess(PreProcessSteps() +/// .mean(0.2f) // Subtract 0.2 from each element +/// .scale(2.3f)) // then divide each element to 2.3 +/// ); +/// \endcode +class OPENVINO_API PreProcessSteps final { + class PreProcessStepsImpl; + std::unique_ptr m_impl; + friend class InputInfo; + +public: + /// \brief Default empty constructor + PreProcessSteps(); + + /// \brief Default move constructor + PreProcessSteps(PreProcessSteps&&) noexcept; + + /// \brief Default move assignment operator + PreProcessSteps& operator=(PreProcessSteps&&) noexcept; + + /// \brief Default destructor + ~PreProcessSteps(); + + /// \brief Add convert element type preprocess operation - Lvalue version + /// + /// \param type Desired type of input. + /// + /// \return Reference to 'this' to allow chaining with other calls in a builder-like manner + PreProcessSteps& convert_element_type(const ov::element::Type& type) &; + + /// \brief Add convert element type preprocess operation - Rvalue version + /// + /// \param type Desired type of input. + /// + /// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner + PreProcessSteps&& convert_element_type(const ov::element::Type& type) &&; + + /// \brief Add scale preprocess operation - Lvalue version + /// Divide each element of input by specified value + /// + /// \param value Scaling value. + /// + /// \return Reference to 'this' to allow chaining with other calls in a builder-like manner + PreProcessSteps& scale(float value) &; + + /// \brief Add scale preprocess operation - Rvalue version + /// Divide each element of input by specified value + /// + /// \param value Scaling value. + /// + /// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner + PreProcessSteps&& scale(float value) &&; + + /// \brief Add mean preprocess operation - Lvalue version + /// Subtract specified value from each element of input + /// + /// \param value Value to subtract from each element. + /// + /// \return Reference to 'this' to allow chaining with other calls in a builder-like manner + PreProcessSteps& mean(float value) &; + + /// \brief Add mean preprocess operation - Rvalue version + /// Subtract specified value from each element of input + /// + /// \param value Value to subtract from each element. + /// + /// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner + PreProcessSteps&& mean(float value) &&; + + /// \brief Signature for custom preprocessing operation. Custom preprocessing operation takes one input node and + /// produces one output node. For more advanced cases, client's code can use transformation passes over ov::Function + /// directly + /// + /// \param node Input node for custom preprocessing operation + /// + /// \return New node after applying custom preprocessing operation + using CustomPreprocessOp = std::function(const std::shared_ptr& node)>; + + /// \brief Add custom preprocess operation - Lvalue version + /// Client application can specify callback function for custom action + /// + /// \param preprocess_cb Client's custom preprocess operation. + /// + /// \return Reference to 'this' to allow chaining with other calls in a builder-like manner + PreProcessSteps& custom(const CustomPreprocessOp& preprocess_cb) &; + + /// \brief Add custom preprocess operation - Rvalue version + /// Client application can specify callback function for custom action + /// + /// \param preprocess_cb Client's custom preprocess operation. + /// + /// \return Rvalue reference to 'this' to allow chaining with other calls in a builder-like manner + PreProcessSteps&& custom(const CustomPreprocessOp& preprocess_cb) &&; +}; + +} // namespace preprocess +} // namespace ov diff --git a/ngraph/core/include/openvino/core/partial_shape.hpp b/ngraph/core/include/openvino/core/shape.hpp similarity index 79% rename from ngraph/core/include/openvino/core/partial_shape.hpp rename to ngraph/core/include/openvino/core/shape.hpp index 9b630a7017232b..5923acf79f8cb8 100644 --- a/ngraph/core/include/openvino/core/partial_shape.hpp +++ b/ngraph/core/include/openvino/core/shape.hpp @@ -6,11 +6,11 @@ #include -#include "ngraph/attribute_adapter.hpp" #include "ngraph/op/util/attr_types.hpp" -#include "ngraph/shape.hpp" +#include "openvino/core/attribute_adapter.hpp" #include "openvino/core/dimension.hpp" #include "openvino/core/rank.hpp" +#include "openvino/core/static_shape.hpp" namespace ov { namespace op { @@ -20,14 +20,14 @@ struct AutoBroadcastSpec; /// \brief Class representing a shape that may be partially or totally dynamic. /// /// -/// A PartialShape may have: +/// A Shape may have: /// /// \li Dynamic rank. (Informal notation: `?`) /// \li Static rank, but dynamic dimensions on some or all axes. /// (Informal notation examples: `{1,2,?,4}`, `{?,?,?}`) /// \li Static rank, and static dimensions on all axes. /// (Informal notation examples: `{1,2,3,4}`, `{6}`, `{}`) -class OPENVINO_API PartialShape { +class OPENVINO_API Shape { using Dimensions = std::vector; public: @@ -42,26 +42,26 @@ class OPENVINO_API PartialShape { /// Examples: /// /// \code{.cpp} - /// PartialShape s{2,3,4}; // rank=3, all dimensions static - /// PartialShape s{}; // rank=0 - /// PartialShape s{2,Dimension::dynamic(),3}; // rank=3, dimension 1 dynamic + /// Shape s{2,3,4}; // rank=3, all dimensions static + /// Shape s{}; // rank=0 + /// Shape s{2,Dimension::dynamic(),3}; // rank=3, dimension 1 dynamic /// \endcode - PartialShape(std::initializer_list init); + Shape(std::initializer_list init); - /// \brief Constructs a PartialShape with static rank from a vector of Dimension. + /// \brief Constructs a Shape with static rank from a vector of Dimension. /// \param dimensions The Dimension values for the constructed shape. - PartialShape(std::vector dimensions); + Shape(std::vector dimensions); - /// \brief Constructs a PartialShape with static rank from a vector of dimensions values. + /// \brief Constructs a Shape with static rank from a vector of dimensions values. /// \param dimensions The Dimension values for the constructed shape. - PartialShape(const std::vector& dimensions); + Shape(const std::vector& dimensions); - /// \brief Constructs a static PartialShape with zero rank (the shape of a scalar). - PartialShape(); + /// \brief Constructs a static Shape with zero rank (the shape of a scalar). + Shape(); - /// \brief Constructs a static PartialShape from a Shape. - /// \param shape The Shape to convert into PartialShape. - PartialShape(const ngraph::Shape& shape); + /// \brief Constructs a static Shape from a Shape. + /// \param shape The Shape to convert into Shape. + Shape(const StaticShape& shape); /// \brief Check if this shape is static. /// \return `true` if this shape is static, else `false`. @@ -84,9 +84,9 @@ class OPENVINO_API PartialShape { Rank rank() const { return m_rank_is_static ? Rank(m_dimensions.size()) : Rank::dynamic(); } - /// \brief Construct a PartialShape with the given rank and all dimensions (if any) dynamic. - /// \return A PartialShape with the given rank, and all dimensions (if any) dynamic. - static PartialShape dynamic(Rank r = Rank::dynamic()); + /// \brief Construct a Shape with the given rank and all dimensions (if any) dynamic. + /// \return A Shape with the given rank, and all dimensions (if any) dynamic. + static Shape dynamic(Rank r = Rank::dynamic()); /// \brief Check whether this shape is compatible with the argument, i.e., whether it is /// possible to merge them. /// \param s The shape to be checked for compatibility with this shape. @@ -96,7 +96,7 @@ class OPENVINO_API PartialShape { /// \li one or both of them has dynamic rank, or /// \li both shapes have dynamic and equal rank, and their dimensions are elementwise /// compatible (see Dimension::compatible()). - bool compatible(const PartialShape& s) const; + bool compatible(const Shape& s) const; /// \brief Check whether this shape represents the same scheme as the argument. /// \param s The shape whose scheme is being compared with this shape. @@ -106,13 +106,13 @@ class OPENVINO_API PartialShape { /// \li they both have dynamic rank, or /// \li they both have static and equal rank `r`, and for every `i` from `0` to `r-1`, /// `s1[i]` represents the same scheme as `s2[i]` (see Dimension::same_scheme()). - bool same_scheme(const PartialShape& s) const; + bool same_scheme(const Shape& s) const; /// \brief Check whether this shape is a relaxation of the argument. /// \param s The shape which is being compared against this shape. /// \return `true` if this shape relaxes `s`, else `false`. /// - /// Intuitively, a PartialShape `s1` is said to _relax_ `s2` (or _is a + /// Intuitively, a Shape `s1` is said to _relax_ `s2` (or _is a /// relaxation_ of `s2`) if it is "more permissive" than `s2`. In other /// words, `s1` is a relaxation of `s2` if anything you can form by /// plugging things into the dynamic dimensions of `s2` is also @@ -121,17 +121,17 @@ class OPENVINO_API PartialShape { /// /// `s1.relaxes(s2)` is equivalent to `s2.refines(s1)`. /// - /// Formally, PartialShape `s1` is said to _relax_ PartialShape `s2` + /// Formally, Shape `s1` is said to _relax_ Shape `s2` /// if: /// \li For every `i` from `0` to `r-1`, /// either `s1[i]` contains s2[i]. - bool relaxes(const PartialShape& s) const; + bool relaxes(const Shape& s) const; /// \brief Check whether this shape is a refinement of the argument. /// \param s The shape which is being compared against this shape. /// \return `true` if this shape refines `s`, else `false`. /// - /// Intuitively, a PartialShape `s1` is said to _relax_ `s2` (or _is a + /// Intuitively, a Shape `s1` is said to _relax_ `s2` (or _is a /// relaxation_ of `s2`) if it is "less permissive" than `s2`. In other /// words, `s1` is a relaxation of `s2` if anything you can form by /// plugging things into the dynamic dimensions of `s1` is also @@ -140,12 +140,12 @@ class OPENVINO_API PartialShape { /// /// `s1.refines(s2)` is equivalent to `s2.relaxes(s1)`. /// - /// Formally, PartialShape `s1` is said to _refine_ PartialShape `s2` + /// Formally, Shape `s1` is said to _refine_ Shape `s2` /// if: /// \li `s2` has dynamic rank, or /// \li `s1` and `s2` both have static rank `r`, and for every `i` from `0` to `r-1`, /// either `s2[i]` is dynamic, or `s1[i]` == `s2[i]`. - bool refines(const PartialShape& s) const; + bool refines(const Shape& s) const; /// \brief Checks that this shape's rank is compatible with `r`, and, if this shape's /// rank is dynamic and `r` is static, updates this shape to have a rank of `r` @@ -153,20 +153,20 @@ class OPENVINO_API PartialShape { /// \return `true` if this shape's rank is compatible with `r`, else `false`. bool merge_rank(Rank r); - /// \brief Convert a static PartialShape to a Shape. + /// \brief Convert a static Shape to a Shape. /// \return A new Shape `s` where `s[i] = size_t((*this)[i])`. - /// \throws std::invalid_argument If this PartialShape is dynamic. - ngraph::Shape to_shape() const; + /// \throws std::invalid_argument If this Shape is dynamic. + StaticShape to_shape() const; /// \brief Returns `true` if all static dimensions of the tensor are non-negative, else /// `false`. bool all_non_negative() const; - /// \brief Index operator for PartialShape. + /// \brief Index operator for Shape. /// \param i The index of the dimension being selected. /// \return A reference to the `i`th Dimension of this shape. const Dimension& operator[](size_t i) const; - /// \brief Index operator for PartialShape. + /// \brief Index operator for Shape. /// \param i The index of the dimension being selected. /// \return A reference to the `i`th Dimension of this shape. Dimension& operator[](size_t i); @@ -174,16 +174,16 @@ class OPENVINO_API PartialShape { explicit operator std::vector() const { return m_dimensions; } - friend OPENVINO_API std::ostream& operator<<(std::ostream& str, const PartialShape& shape); - friend PartialShape operator+(const PartialShape& s1, const PartialShape& s2); - bool operator==(const PartialShape& partial_shape) const; - bool operator!=(const PartialShape& partial_shape) const; + friend OPENVINO_API std::ostream& operator<<(std::ostream& str, const Shape& shape); + friend Shape operator+(const Shape& s1, const Shape& s2); + bool operator==(const Shape& partial_shape) const; + bool operator!=(const Shape& partial_shape) const; /// Get the max bounding shape - ngraph::Shape get_max_shape() const; + StaticShape get_max_shape() const; /// Get the min bounding shape - ngraph::Shape get_min_shape() const; + StaticShape get_min_shape() const; /// Get the unique shape - ngraph::Shape get_shape() const; + StaticShape get_shape() const; /// \brief Try to merge one shape into another. /// \param[in,out] dst The shape that `src` will be merged into. @@ -213,12 +213,10 @@ class OPENVINO_API PartialShape { /// `src`, but overwrites `dst` with the result and returns `true` if merging is /// successful; if merging is unsuccessful, the function returns `false` and may make /// unspecified changes to `dst`. - static bool merge_into(PartialShape& dst, const PartialShape& src); + static bool merge_into(Shape& dst, const Shape& src); /// \brief Try to merge one shape into another along with implicit broadcasting - static bool broadcast_merge_into(PartialShape& dst, - const PartialShape& src, - const ngraph::op::AutoBroadcastSpec& autob); + static bool broadcast_merge_into(Shape& dst, const Shape& src, const ngraph::op::AutoBroadcastSpec& autob); /// \brief Returns a read/write iterator that points to the first /// element in the shape. Iteration is done in ordinary @@ -294,8 +292,8 @@ class OPENVINO_API PartialShape { } private: - // Private constructor for PartialShape::dynamic(). - PartialShape(bool rank_is_static, std::vector dimensions); + // Private constructor for Shape::dynamic(). + Shape(bool rank_is_static, std::vector dimensions); // True if the shape's rank is static. bool m_rank_is_static; @@ -323,20 +321,20 @@ class OPENVINO_API PartialShape { Dimensions m_dimensions; }; -/// \brief Elementwise addition of two PartialShape objects. +/// \brief Elementwise addition of two Shape objects. /// \param s1 Left operand for addition. /// \param s2 Right operand for addition. /// \return The result of elementwise adding `s1` to `s2` (see description). /// \throws std::invalid_argument If `s1` and `s2` have inconsistent ranks. /// -/// \li If `s1` or `s2` has dynamic rank, returns PartialShape::dynamic(). +/// \li If `s1` or `s2` has dynamic rank, returns Shape::dynamic(). /// \li If `s1 and `s2` both have static rank, and their ranks are unequal, throws /// std::invalid_argument. /// \li If `s1` and `s2` both have static rank, and their ranks are equal, /// returns a new shape whose `i`th dimension is `s1[i] + s2[i]`. -PartialShape operator+(const PartialShape& s1, const PartialShape& s2); +Shape operator+(const Shape& s1, const Shape& s2); -/// \brief Inserts a human-readable representation of a PartialShape into an output stream. +/// \brief Inserts a human-readable representation of a Shape into an output stream. /// \param str The output stream targeted for insertion. /// \param shape The shape to be inserted into `str`. /// \return A reference to `str` after insertion. @@ -350,10 +348,10 @@ PartialShape operator+(const PartialShape& s1, const PartialShape& s2); /// Example: /// /// \code{.cpp} -/// PartialShape s1{PartialShape::dynamic())}; -/// PartialShape s2{}; -/// PartialShape s3{1,Dimension::dynamic(),2,3}; -/// PartialShape s4{2,3,4}; +/// Shape s1{Shape::dynamic())}; +/// Shape s2{}; +/// Shape s3{1,Dimension::dynamic(),2,3}; +/// Shape s4{2,3,4}; /// std::cout << s1 << std::endl /// << s2 << std::endl /// << s3 << std::endl @@ -369,25 +367,25 @@ PartialShape operator+(const PartialShape& s1, const PartialShape& s2); /// {2,3,4} /// \endcode OPENVINO_API -std::ostream& operator<<(std::ostream& str, const PartialShape& shape); +std::ostream& operator<<(std::ostream& str, const Shape& shape); template <> -class OPENVINO_API AttributeAdapter : public ValueAccessor> { +class OPENVINO_API AttributeAdapter : public ValueAccessor> { public: - AttributeAdapter(ov::PartialShape& value) : m_ref(value) {} + AttributeAdapter(ov::Shape& value) : m_ref(value) {} const std::vector& get() override; void set(const std::vector& value) override; - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; const DiscreteTypeInfo& get_type_info() const override { return type_info; } - operator ov::PartialShape&() { + operator ov::Shape&() { return m_ref; } protected: - ov::PartialShape& m_ref; + ov::Shape& m_ref; std::vector m_buffer; bool m_buffer_valid{false}; }; diff --git a/ngraph/core/include/openvino/core/static_shape.hpp b/ngraph/core/include/openvino/core/static_shape.hpp new file mode 100644 index 00000000000000..878cd1e54fcb98 --- /dev/null +++ b/ngraph/core/include/openvino/core/static_shape.hpp @@ -0,0 +1,111 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "openvino/core/attribute_adapter.hpp" +#include "openvino/core/axis_set.hpp" +#include "openvino/core/core_visibility.hpp" +#include "openvino/core/strides.hpp" + +namespace ov { +/// \brief StaticShape for a tensor. +class StaticShape : public std::vector { +public: + OPENVINO_API StaticShape(); + + OPENVINO_API StaticShape(const std::initializer_list& axis_lengths); + + OPENVINO_API StaticShape(const std::vector& axis_lengths); + + OPENVINO_API StaticShape(const StaticShape& axis_lengths); + + OPENVINO_API explicit StaticShape(size_t n, size_t initial_value = 0); + + OPENVINO_API ~StaticShape(); + + template + StaticShape(InputIterator first, InputIterator last) : std::vector(first, last) {} + + OPENVINO_API StaticShape& operator=(const StaticShape& v); + OPENVINO_API StaticShape& operator=(StaticShape&& v) noexcept; +}; + +/// Number of elements in spanned by a shape +template +size_t shape_size(const SHAPE_TYPE& shape) { + size_t size = 1; + for (auto d : shape) { + size *= d; + } + return size; +} + +/// Number of elements in a subset of dimensions of a shape. +/// Returns a product of dimensions in a range [start_dim;end_dim) +template +size_t shape_size(ForwardIt start_dim, const ForwardIt end_dim) { + static_assert(std::is_arithmetic::value_type>::value, + "shape_size expects 2 forward iterators as inputs. value_type of those iterators has to be an " + "arithmetic type so that they can be used in multiplication operation."); + + return std::accumulate(start_dim, + end_dim, + typename std::iterator_traits::value_type{1}, + std::multiplies::value_type>()); +} + +/// Row-major strides for a shape +template +std::vector row_major_strides(const SHAPE_TYPE& shape) { + std::vector strides(shape.size()); + size_t s = 1; + auto st = strides.rbegin(); + for (auto d = shape.rbegin(); d != shape.rend() && st != strides.rend(); d++, st++) { + *st = s; + s *= *d; + } + return strides; +} + +template +size_t row_major_stride(const SHAPE_TYPE& shape, size_t axis) { + size_t s = 1; + for (size_t i = shape.size(); i-- > axis + 1;) { + s *= shape[i]; + } + return s; +} + +template +inline bool is_scalar(const SHAPE_TYPE& shape) { + return 0 == shape.size(); +} + +template +inline bool is_vector(const SHAPE_TYPE& shape) { + return 1 == shape.size(); +} + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const StaticShape& shape); + +template <> +class OPENVINO_API AttributeAdapter + : public IndirectVectorValueAccessor> + +{ +public: + AttributeAdapter(ov::StaticShape& value) + : IndirectVectorValueAccessor>(value) {} + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 0}; + const DiscreteTypeInfo& get_type_info() const override { + return type_info; + } +}; +} // namespace ov diff --git a/ngraph/core/include/openvino/op/avg_pool.hpp b/ngraph/core/include/openvino/op/avg_pool.hpp index 66f94c7cf6f528..58fc928ef0b87d 100644 --- a/ngraph/core/include/openvino/op/avg_pool.hpp +++ b/ngraph/core/include/openvino/op/avg_pool.hpp @@ -38,9 +38,9 @@ class OPENVINO_API AvgPool : public Op { /// AvgPool(const Output& arg, const Strides& strides, - const ngraph::Shape& pads_begin, - const ngraph::Shape& pads_end, - const ngraph::Shape& kernel, + const StaticShape& pads_begin, + const StaticShape& pads_end, + const StaticShape& kernel, bool exclude_pad, op::RoundingType rounding_type = op::RoundingType::FLOOR, const PadType& auto_pad = op::PadType::EXPLICIT); @@ -51,17 +51,17 @@ class OPENVINO_API AvgPool : public Op { std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; /// \return The kernel shape. - const ngraph::Shape& get_kernel() const; - void set_kernel(const ngraph::Shape& kernel); + const StaticShape& get_kernel() const; + void set_kernel(const StaticShape& kernel); /// \return The strides. const Strides& get_strides() const; void set_strides(const Strides& strides); /// \return The beginning of padding shape. - const ngraph::Shape& get_pads_begin() const; - void set_pads_begin(const ngraph::Shape& pads_begin); + const StaticShape& get_pads_begin() const; + void set_pads_begin(const StaticShape& pads_begin); /// \return The end of padding shape. - const ngraph::Shape& get_pads_end() const; - void set_pads_end(const ngraph::Shape& pads_end); + const StaticShape& get_pads_end() const; + void set_pads_end(const StaticShape& pads_end); bool get_exclude_pad() const; void set_exclude_pad(bool exclude_pad); /// \return The pad type for pooling. @@ -75,10 +75,10 @@ class OPENVINO_API AvgPool : public Op { OPENVINO_SUPPRESS_DEPRECATED_END protected: - ngraph::Shape m_kernel; + StaticShape m_kernel; Strides m_strides; - ngraph::Shape m_pads_begin; - ngraph::Shape m_pads_end; + StaticShape m_pads_begin; + StaticShape m_pads_end; bool m_exclude_pad{true}; PadType m_auto_pad{PadType::EXPLICIT}; op::RoundingType m_rounding_type{op::RoundingType::FLOOR}; diff --git a/ngraph/core/include/openvino/op/constant.hpp b/ngraph/core/include/openvino/op/constant.hpp index f13899b31ca09b..4d02bb375ebc8e 100644 --- a/ngraph/core/include/openvino/op/constant.hpp +++ b/ngraph/core/include/openvino/op/constant.hpp @@ -37,7 +37,7 @@ class OPENVINO_API Constant : public Op { /// \param values A vector of literals for initializing the tensor constant. The /// size of values must match the size of the shape. template - Constant(const element::Type& type, const ngraph::Shape& shape, const std::vector& values) + Constant(const element::Type& type, const StaticShape& shape, const std::vector& values) : Constant(type, shape) { NODE_VALIDATION_CHECK(this, values.size() == 1 || values.size() == shape_size(m_shape), @@ -59,7 +59,7 @@ class OPENVINO_API Constant : public Op { } /// \brief Create uninitialized constant - Constant(const element::Type& type, const ngraph::Shape& shape); + Constant(const element::Type& type, const StaticShape& shape); /// \brief Constructs a uniform tensor constant. /// /// \param type The element type of the tensor constant. @@ -67,7 +67,7 @@ class OPENVINO_API Constant : public Op { /// \param value A scalar for initializing the uniform tensor constant. The /// value is broadcast to the specified shape. template ::value>::type> - Constant(const element::Type& type, const ngraph::Shape& shape, T value) : Constant(type, shape) { + Constant(const element::Type& type, const StaticShape& shape, T value) : Constant(type, shape) { fill_data(type, value); m_all_elements_bitwise_identical = true; } @@ -144,14 +144,14 @@ class OPENVINO_API Constant : public Op { /// \param type The element type of the tensor constant. /// \param shape The shape of the tensor constant. /// \param values A list of string values to use as the constant data. - Constant(const element::Type& type, const ngraph::Shape& shape, const std::vector& values); + Constant(const element::Type& type, const StaticShape& shape, const std::vector& values); /// \brief Constructs a tensor constant with the supplied data /// /// \param type The element type of the tensor constant. /// \param shape The shape of the tensor constant. /// \param data A void* to constant data. - Constant(const element::Type& type, const ngraph::Shape& shape, const void* data); + Constant(const element::Type& type, const StaticShape& shape, const void* data); /// \brief Constructs a tensor constant with the supplied data /// @@ -160,7 +160,7 @@ class OPENVINO_API Constant : public Op { /// \param data A pointer to pre-allocated shared data. template Constant(const element::Type& type, - const ngraph::Shape& shape, + const StaticShape& shape, std::shared_ptr> data) : m_element_type(type), m_shape(shape) { @@ -169,7 +169,7 @@ class OPENVINO_API Constant : public Op { } Constant(const Constant& other); - Constant(const Constant& other, const ngraph::Shape& new_shape); + Constant(const Constant& other, const StaticShape& new_shape); Constant& operator=(const Constant&) = delete; ~Constant() override; @@ -196,7 +196,7 @@ class OPENVINO_API Constant : public Op { /// \brief Returns the value of the constant node as a Shape object /// Can only be used on element::i64 nodes and interprets /// negative values as zeros. - ngraph::Shape get_shape_val() const; + StaticShape get_shape_val() const; /// \brief Returns the value of the constant node as a Strides /// object /// Can only be used on element::i64 nodes and interprets @@ -228,7 +228,7 @@ class OPENVINO_API Constant : public Op { /// /// \param shape The shape of the tensor constant. OPENVINO_DEPRECATED("Use Constant c-tor with shape argument instead") - void set_data_shape(const ngraph::Shape& shape); + void set_data_shape(const StaticShape& shape); /// \brief Wrapper around constructing a shared_ptr of a Constant /// @@ -237,7 +237,7 @@ class OPENVINO_API Constant : public Op { /// \param values A vector of values to use as the constant data. template static std::shared_ptr create(const element::Type& type, - const ngraph::Shape& shape, + const StaticShape& shape, const std::vector& values) { return std::make_shared(type, shape, values); } @@ -249,7 +249,7 @@ class OPENVINO_API Constant : public Op { /// \param values An initializer_list of values to use as the constant data. template static std::shared_ptr create(const element::Type& type, - const ngraph::Shape& shape, + const StaticShape& shape, std::initializer_list values) { return std::make_shared(type, shape, std::vector{values}); } @@ -259,7 +259,7 @@ class OPENVINO_API Constant : public Op { /// \param type The element type of the tensor constant. /// \param shape The shape of the tensor constant. /// \param memory An continues memory chunk which contains the constant data. - static std::shared_ptr create(const element::Type& type, const ngraph::Shape& shape, const void* memory) { + static std::shared_ptr create(const element::Type& type, const StaticShape& shape, const void* memory) { return std::make_shared(type, shape, memory); } @@ -701,7 +701,7 @@ class OPENVINO_API Constant : public Op { } element::Type m_element_type; - ngraph::Shape m_shape{}; + StaticShape m_shape{}; std::shared_ptr m_data; bool m_all_elements_bitwise_identical; bool m_alloc_buffer_on_visit_attributes = true; diff --git a/ngraph/core/include/openvino/op/convolution.hpp b/ngraph/core/include/openvino/op/convolution.hpp index 8c5dc16b1c2bee..c40b9713c110de 100644 --- a/ngraph/core/include/openvino/op/convolution.hpp +++ b/ngraph/core/include/openvino/op/convolution.hpp @@ -167,8 +167,8 @@ class OPENVINO_API ConvolutionBackpropData : public Op { clone_with_new_inputs(const OutputVector& new_args) const override; /// \return The output spatial dimensions shape. - const PartialShape get_output_shape() const; - void set_output_shape(const ngraph::Shape& output_shape); + const Shape get_output_shape() const; + void set_output_shape(const StaticShape& output_shape); /// \return The strides from the forward prop. const Strides& get_strides() const { return m_strides; } void set_strides(const Strides& strides) { m_strides = strides; } diff --git a/ngraph/core/include/openvino/op/extractimagepatches.hpp b/ngraph/core/include/openvino/op/extractimagepatches.hpp index bd738d0cf105a5..bb94e908557cc8 100644 --- a/ngraph/core/include/openvino/op/extractimagepatches.hpp +++ b/ngraph/core/include/openvino/op/extractimagepatches.hpp @@ -24,9 +24,9 @@ class OPENVINO_API ExtractImagePatches : public Op { /// \param auto_pad Padding type. it can be any value from /// valid, same_lower, same_upper ExtractImagePatches(const Output& image, - const ngraph::Shape& sizes, + const StaticShape& sizes, const Strides& strides, - const ngraph::Shape& rates, + const StaticShape& rates, const PadType& auto_pad); void validate_and_infer_types() override; @@ -34,10 +34,10 @@ class OPENVINO_API ExtractImagePatches : public Op { std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - const ngraph::Shape& get_sizes() const { + const StaticShape& get_sizes() const { return m_patch_sizes; } - void set_sizes(const ngraph::Shape& sizes) { + void set_sizes(const StaticShape& sizes) { m_patch_sizes = sizes; } const Strides& get_strides() const { @@ -46,10 +46,10 @@ class OPENVINO_API ExtractImagePatches : public Op { void set_strides(const Strides& strides) { m_patch_movement_strides = strides; } - const ngraph::Shape& get_rates() const { + const StaticShape& get_rates() const { return m_patch_selection_rates; } - void set_rates(const ngraph::Shape& rates) { + void set_rates(const StaticShape& rates) { m_patch_selection_rates = rates; } const PadType& get_auto_pad() const { @@ -60,9 +60,9 @@ class OPENVINO_API ExtractImagePatches : public Op { } private: - ngraph::Shape m_patch_sizes; + StaticShape m_patch_sizes; Strides m_patch_movement_strides; - ngraph::Shape m_patch_selection_rates; + StaticShape m_patch_selection_rates; PadType m_padding; }; } // namespace v3 diff --git a/ngraph/core/include/openvino/op/group_conv.hpp b/ngraph/core/include/openvino/op/group_conv.hpp index b2001468eabd0d..f2db14bbd56a5b 100644 --- a/ngraph/core/include/openvino/op/group_conv.hpp +++ b/ngraph/core/include/openvino/op/group_conv.hpp @@ -215,8 +215,8 @@ class OPENVINO_API GroupConvolutionBackpropData : public Op { std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; /// \return The spatial shape of the output. - const PartialShape get_convolution_output_shape() const; - void set_output_shape(const ngraph::Shape& output_shape); + const Shape get_convolution_output_shape() const; + void set_output_shape(const StaticShape& output_shape); /// \return The strides from the forward prop. const Strides& get_strides() const { return m_strides; diff --git a/ngraph/core/include/openvino/op/interpolate.hpp b/ngraph/core/include/openvino/op/interpolate.hpp index f66f1c226f1ef3..0f989fa0b067bd 100644 --- a/ngraph/core/include/openvino/op/interpolate.hpp +++ b/ngraph/core/include/openvino/op/interpolate.hpp @@ -251,7 +251,7 @@ class OPENVINO_API Interpolate : public Op { /// \param input_shape Shape of input data. /// /// \return Padded input shape, i.e. input_shape + pads_begin + pads_end - PartialShape get_padded_input_shape(const PartialShape& input_shape) const; + Shape get_padded_input_shape(const Shape& input_shape) const; /// \brief Infers output shape using scales. /// @@ -259,17 +259,17 @@ class OPENVINO_API Interpolate : public Op { /// \param axes Interpolation axes /// \param scales Scales for interpolated axes /// \param padded_input_shape input shape after padding - void infer_using_scales(PartialShape& output_shape, + void infer_using_scales(Shape& output_shape, const std::vector& axes, const std::vector& scales, - const PartialShape& padded_input_shape) const; + const Shape& padded_input_shape) const; /// \brief Infers output shape using sizes. /// /// \param output_shape[in,out] output shape /// \param axes Interpolation axes /// \param sizes sizes for interpolated axes - void infer_using_shapes(PartialShape& output_shape, + void infer_using_shapes(Shape& output_shape, const std::vector& axes, const std::vector& sizes) const; }; diff --git a/ngraph/core/include/openvino/op/max_pool.hpp b/ngraph/core/include/openvino/op/max_pool.hpp index d64660961d2f8a..ec8723c19dc378 100644 --- a/ngraph/core/include/openvino/op/max_pool.hpp +++ b/ngraph/core/include/openvino/op/max_pool.hpp @@ -31,9 +31,9 @@ class OPENVINO_API MaxPool : public op::util::MaxPoolBase { /// \param auto_pad The pad type for automatically computing padding sizes. MaxPool(const Output& arg, const Strides& strides, - const ngraph::Shape& pads_begin, - const ngraph::Shape& pads_end, - const ngraph::Shape& kernel, + const StaticShape& pads_begin, + const StaticShape& pads_end, + const StaticShape& kernel, const op::RoundingType rounding_type = op::RoundingType::FLOOR, const PadType auto_pad = op::PadType::EXPLICIT); @@ -83,9 +83,9 @@ class OPENVINO_API MaxPool : public op::util::MaxPoolBase { MaxPool(const Output& arg, const Strides& strides, const Strides& dilations, - const ngraph::Shape& pads_begin, - const ngraph::Shape& pads_end, - const ngraph::Shape& kernel, + const StaticShape& pads_begin, + const StaticShape& pads_end, + const StaticShape& kernel, const op::RoundingType rounding_type = op::RoundingType::FLOOR, const PadType auto_pad = op::PadType::EXPLICIT, const element::Type index_element_type = element::i64, diff --git a/ngraph/core/include/openvino/op/ops.hpp b/ngraph/core/include/openvino/op/ops.hpp index 773986c918af9f..194dd60b697983 100644 --- a/ngraph/core/include/openvino/op/ops.hpp +++ b/ngraph/core/include/openvino/op/ops.hpp @@ -83,6 +83,7 @@ #include "openvino/op/logical_and.hpp" #include "openvino/op/logical_not.hpp" #include "openvino/op/logical_or.hpp" +#include "openvino/op/logical_xor.hpp" #include "openvino/op/loop.hpp" #include "openvino/op/lrn.hpp" #include "openvino/op/lstm_cell.hpp" diff --git a/ngraph/core/include/openvino/op/parameter.hpp b/ngraph/core/include/openvino/op/parameter.hpp index 7878f582927616..cdb64bacd2b00b 100644 --- a/ngraph/core/include/openvino/op/parameter.hpp +++ b/ngraph/core/include/openvino/op/parameter.hpp @@ -23,7 +23,7 @@ class OPENVINO_API Parameter : public op::Op { /// /// \param element_type The element type of the parameter. /// \param pshape The partial shape of the parameter. - Parameter(const ngraph::element::Type& element_type, const PartialShape& pshape); + Parameter(const ngraph::element::Type& element_type, const Shape& pshape); bool visit_attributes(AttributeVisitor& visitor) override; @@ -34,13 +34,13 @@ class OPENVINO_API Parameter : public op::Op { bool is_relevant_to_shapes() const; void set_is_relevant_to_shapes(bool is_relevant); - const PartialShape& get_partial_shape() const { + const Shape& get_partial_shape() const { return m_partial_shape; } - PartialShape& get_partial_shape() { + Shape& get_partial_shape() { return m_partial_shape; } - void set_partial_shape(const PartialShape& partial_shape) { + void set_partial_shape(const Shape& partial_shape) { m_partial_shape = partial_shape; } const element::Type& get_element_type() const { @@ -51,7 +51,7 @@ class OPENVINO_API Parameter : public op::Op { } protected: - PartialShape m_partial_shape; + Shape m_partial_shape; element::Type m_element_type; bool m_is_relevant_to_shapes{false}; }; diff --git a/ngraph/core/include/openvino/op/reshape.hpp b/ngraph/core/include/openvino/op/reshape.hpp index b1d0bbaa2acc43..ed1e3c9aad964a 100644 --- a/ngraph/core/include/openvino/op/reshape.hpp +++ b/ngraph/core/include/openvino/op/reshape.hpp @@ -58,7 +58,7 @@ class OPENVINO_API Reshape : public Op { private: void calculate_output_shape(std::vector& reshape_pattern, const int64_t& minus_one_idx, - const PartialShape& input_pshape, + const Shape& input_pshape, std::vector& output_shape) const; }; } // namespace v1 diff --git a/ngraph/core/include/openvino/op/roi_pooling.hpp b/ngraph/core/include/openvino/op/roi_pooling.hpp index d81a14cf144bcc..f948bd30ed6721 100644 --- a/ngraph/core/include/openvino/op/roi_pooling.hpp +++ b/ngraph/core/include/openvino/op/roi_pooling.hpp @@ -23,7 +23,7 @@ class OPENVINO_API ROIPooling : public Op { /// \param method Method of pooling - Max or Bilinear ROIPooling(const Output& input, const Output& coords, - const ngraph::Shape& output_size, + const StaticShape& output_size, const float spatial_scale, const std::string& method = "max"); @@ -31,7 +31,7 @@ class OPENVINO_API ROIPooling : public Op { std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - const ngraph::Shape& get_output_size() const { + const StaticShape& get_output_size() const { return m_output_size; } float get_spatial_scale() const { @@ -43,7 +43,7 @@ class OPENVINO_API ROIPooling : public Op { bool visit_attributes(AttributeVisitor& visitor) override; private: - ngraph::Shape m_output_size{0, 0}; + StaticShape m_output_size{0, 0}; float m_spatial_scale; std::string m_method = "max"; }; diff --git a/ngraph/core/include/openvino/op/topk.hpp b/ngraph/core/include/openvino/op/topk.hpp index dc0c2ddfeb4e21..1c82459ada4910 100644 --- a/ngraph/core/include/openvino/op/topk.hpp +++ b/ngraph/core/include/openvino/op/topk.hpp @@ -105,9 +105,9 @@ class OPENVINO_API TopK : public Op { template size_t validate_and_get_k(const std::shared_ptr& k_constant) const; - ngraph::Shape compute_output_shape(const std::string& node_description, - const PartialShape input_partial_shape, - const int64_t k) const; + StaticShape compute_output_shape(const std::string& node_description, + const Shape input_partial_shape, + const int64_t k) const; void set_axis(const Rank input_rank, const int64_t axis); }; } // namespace v1 diff --git a/ngraph/core/include/openvino/op/util/broadcast_base.hpp b/ngraph/core/include/openvino/op/util/broadcast_base.hpp index d06771b621221e..63d1b0ef3efe19 100644 --- a/ngraph/core/include/openvino/op/util/broadcast_base.hpp +++ b/ngraph/core/include/openvino/op/util/broadcast_base.hpp @@ -53,31 +53,31 @@ class OPENVINO_API BroadcastBase : public Op { bool evaluate_broadcast(const HostTensorPtr& arg0, const HostTensorPtr& out, const std::pair& pair_broadcast_axes, - const ngraph::Shape& output_shape) const; + const StaticShape& output_shape) const; bool evaluate_broadcast(const HostTensorPtr& arg0, const HostTensorPtr& out, const AxisSet& broadcast_axes) const; bool evaluate_lower(const HostTensorVector& outputs) const override; bool evaluate_upper(const HostTensorVector& outputs) const override; - PartialShape get_result_shape_pdpd(const PartialShape& arg0_shape, - const PartialShape& target_shape, - const op::BroadcastModeSpec& broadcast_spec) const; + Shape get_result_shape_pdpd(const Shape& arg0_shape, + const Shape& target_shape, + const op::BroadcastModeSpec& broadcast_spec) const; - void validate_target_shape_numpy(const PartialShape& arg_shape, const PartialShape& target_shape) const; + void validate_target_shape_numpy(const Shape& arg_shape, const Shape& target_shape) const; - static std::pair get_broadcast_axes_numpy_pdpd(const ngraph::Shape& arg_shape, - const ngraph::Shape& result_shape, + static std::pair get_broadcast_axes_numpy_pdpd(const StaticShape& arg_shape, + const StaticShape& result_shape, const op::BroadcastModeSpec& broadcast_spec); static std::pair get_broadcast_axes_none(const AxisVector& axes_mapping_val, const size_t target_shape); - void validate_target_shape_none(const PartialShape& arg_shape, + void validate_target_shape_none(const Shape& arg_shape, const AxisVector& axes_mapping_val, - const PartialShape& target_shape) const; + const Shape& target_shape) const; - ngraph::Shape get_target_shape(const HostTensorPtr& input1) const; + StaticShape get_target_shape(const HostTensorPtr& input1) const; }; } // namespace util } // namespace op diff --git a/ngraph/core/include/openvino/op/util/elementwise_args.hpp b/ngraph/core/include/openvino/op/util/elementwise_args.hpp index 90961bee7ac7eb..ffdae129ba3db6 100644 --- a/ngraph/core/include/openvino/op/util/elementwise_args.hpp +++ b/ngraph/core/include/openvino/op/util/elementwise_args.hpp @@ -9,7 +9,7 @@ namespace ov { namespace op { namespace util { -std::tuple validate_and_infer_elementwise_args( +std::tuple validate_and_infer_elementwise_args( Node* node, const op::AutoBroadcastSpec& autob = op::AutoBroadcastSpec()); } diff --git a/ngraph/core/include/openvino/op/util/max_pool_base.hpp b/ngraph/core/include/openvino/op/util/max_pool_base.hpp index 99eb04b7c4fd53..df95daf28315d0 100644 --- a/ngraph/core/include/openvino/op/util/max_pool_base.hpp +++ b/ngraph/core/include/openvino/op/util/max_pool_base.hpp @@ -25,19 +25,19 @@ class OPENVINO_API MaxPoolBase : public Op { /// \param auto_pad The pad type for automatically computing padding sizes. MaxPoolBase(const Output& arg, const Strides& strides, - const ngraph::Shape& pads_begin, - const ngraph::Shape& pads_end, - const ngraph::Shape& kernel, + const StaticShape& pads_begin, + const StaticShape& pads_end, + const StaticShape& kernel, const op::RoundingType rounding_mode = op::RoundingType::FLOOR, const PadType auto_pad = op::PadType::EXPLICIT); void validate_and_infer_types() override; /// \return The kernel shape. - const ngraph::Shape& get_kernel() const { + const StaticShape& get_kernel() const { return m_kernel; } - void set_kernel(const ngraph::Shape& kernel) { + void set_kernel(const StaticShape& kernel) { m_kernel = kernel; } /// \return The strides. @@ -48,17 +48,17 @@ class OPENVINO_API MaxPoolBase : public Op { m_strides = strides; } /// \return The beginning of padding shape. - const ngraph::Shape& get_pads_begin() const { + const StaticShape& get_pads_begin() const { return m_pads_begin; } - void set_pads_begin(const ngraph::Shape& pads_begin) { + void set_pads_begin(const StaticShape& pads_begin) { m_pads_begin = pads_begin; } /// \return The end of padding shape. - const ngraph::Shape& get_pads_end() const { + const StaticShape& get_pads_end() const { return m_pads_end; } - void set_adding_above(const ngraph::Shape& pads_end) { + void set_adding_above(const StaticShape& pads_end) { m_pads_end = pads_end; } /// \return The pad type for pooling. @@ -77,17 +77,17 @@ class OPENVINO_API MaxPoolBase : public Op { } protected: - bool update_auto_padding(const PartialShape& in_shape, + bool update_auto_padding(const Shape& in_shape, const Strides& filter_dilations, - ngraph::Shape& new_pads_end, - ngraph::Shape& new_pads_begin) const; + StaticShape& new_pads_end, + StaticShape& new_pads_begin) const; - PartialShape infer_output_shape(const Strides& dilations); + Shape infer_output_shape(const Strides& dilations); - ngraph::Shape m_kernel; + StaticShape m_kernel; Strides m_strides; - ngraph::Shape m_pads_begin; - ngraph::Shape m_pads_end; + StaticShape m_pads_begin; + StaticShape m_pads_end; PadType m_auto_pad; op::RoundingType m_rounding_type; }; diff --git a/ngraph/core/include/openvino/op/util/reduction_base.hpp b/ngraph/core/include/openvino/op/util/reduction_base.hpp index 6cbffbd4bcdfd3..cb2afdfe6497f0 100644 --- a/ngraph/core/include/openvino/op/util/reduction_base.hpp +++ b/ngraph/core/include/openvino/op/util/reduction_base.hpp @@ -26,7 +26,7 @@ class OPENVINO_API ReductionBase : public Op { /// \param[in] keep_dims Reduction operation keeps dimensions. /// /// \return Partial shape of the output. - PartialShape infer_reduction_output_shape(const bool keep_dims); + Shape infer_reduction_output_shape(const bool keep_dims); public: OPENVINO_RTTI_DECLARATION; diff --git a/ngraph/core/include/openvino/op/util/rnn_cell_base.hpp b/ngraph/core/include/openvino/op/util/rnn_cell_base.hpp index fb4bb757714e41..4d41b76c73d883 100644 --- a/ngraph/core/include/openvino/op/util/rnn_cell_base.hpp +++ b/ngraph/core/include/openvino/op/util/rnn_cell_base.hpp @@ -82,7 +82,7 @@ class OPENVINO_API RNNCellBase : public Op { /// \param[in] input Vector with RNN-Cell op inputs in following order: /// X, initial_hidden_state, W, R and B. /// - void validate_input_rank_dimension(const std::vector& input); + void validate_input_rank_dimension(const std::vector& input); bool visit_attributes(AttributeVisitor& visitor) override; std::size_t get_hidden_size() const { diff --git a/ngraph/core/include/openvino/op/util/unary_elementwise_arithmetic.hpp b/ngraph/core/include/openvino/op/util/unary_elementwise_arithmetic.hpp index e17ec61ec04f3f..0b9be9877cb55a 100644 --- a/ngraph/core/include/openvino/op/util/unary_elementwise_arithmetic.hpp +++ b/ngraph/core/include/openvino/op/util/unary_elementwise_arithmetic.hpp @@ -10,25 +10,25 @@ namespace ov { namespace op { namespace util { // clang-format off - /// \brief Abstract base class for elementwise unary arithmetic operations, i.e., - /// operations where the same scalar arithmetic operation is applied to each - /// element. - /// - /// For example, if the underlying operation (determined by the subclass) is - /// \f$\mathit{op}(x)\f$, the input tensor \f$[[x,y],[z,w]]\f$ will be mapped to - /// \f$[[\mathit{op}(x),\mathit{op}(y)],[\mathit{op}(z),\mathit{op}(w)]]\f$. - /// - /// ## Inputs - /// - /// | | Type | Description | - /// | ----- | --------------------------------- | ------------------------------------------------------------------------ | - /// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape. The element type \f$N\f$ may be any numeric type. | - /// - /// ## Output - /// - /// | Type | Description | - /// | ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | - /// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \mathit{op}(\texttt{arg}[i_1,\dots,i_n])\f$. This will always have the same shape and element type as the input tensor. | +/// \brief Abstract base class for elementwise unary arithmetic operations, i.e., +/// operations where the same scalar arithmetic operation is applied to each +/// element. +/// +/// For example, if the underlying operation (determined by the subclass) is +/// \f$\mathit{op}(x)\f$, the input tensor \f$[[x,y],[z,w]]\f$ will be mapped to +/// \f$[[\mathit{op}(x),\mathit{op}(y)],[\mathit{op}(z),\mathit{op}(w)]]\f$. +/// +/// ## Inputs +/// +/// | | Type | Description | +/// | ----- | --------------------------------- | ------------------------------------------------------------------------ | +/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape. The element type \f$N\f$ may be any numeric type. | +/// +/// ## Output +/// +/// | Type | Description | +/// | ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \mathit{op}(\texttt{arg}[i_1,\dots,i_n])\f$. This will always have the same shape and element type as the input tensor. | // clang-format on class OPENVINO_API UnaryElementwiseArithmetic : public Op { protected: diff --git a/ngraph/core/include/openvino/op/util/variable.hpp b/ngraph/core/include/openvino/op/util/variable.hpp index c9a54506545462..24bde493c4d1a6 100644 --- a/ngraph/core/include/openvino/op/util/variable.hpp +++ b/ngraph/core/include/openvino/op/util/variable.hpp @@ -7,7 +7,7 @@ #include #include -#include "openvino/core/partial_shape.hpp" +#include "openvino/core/shape.hpp" #include "openvino/core/type.hpp" #include "openvino/core/type/element_type.hpp" @@ -15,7 +15,7 @@ namespace ov { namespace op { namespace util { struct VariableInfo { - PartialShape data_shape; + Shape data_shape; element::Type data_type; std::string variable_id; @@ -29,7 +29,7 @@ class OPENVINO_API Variable { using Ptr = std::shared_ptr; Variable() = default; - explicit Variable(const VariableInfo& variable_info) : m_info(variable_info) {} + explicit Variable(VariableInfo variable_info) : m_info(std::move(variable_info)) {} VariableInfo get_info() const { return m_info; diff --git a/ngraph/core/include/openvino/opsets/opset1_tbl.hpp b/ngraph/core/include/openvino/opsets/opset1_tbl.hpp index 4bc2bfd7f60115..faa7100c00799c 100644 --- a/ngraph/core/include/openvino/opsets/opset1_tbl.hpp +++ b/ngraph/core/include/openvino/opsets/opset1_tbl.hpp @@ -27,8 +27,8 @@ // #undef OPENVINO_OP // // This sample expands to a list like this: -// ngraph::op::Abs, -// ngraph::op::Acos, +// ov::op::Abs, +// ov::op::Acos, // ... // // It's that easy. You can use this for fun and profit. @@ -38,113 +38,113 @@ # define OPENVINO_OP(x, y) #endif -OPENVINO_OP(Abs, ngraph::op::v0) -OPENVINO_OP(Acos, ngraph::op::v0) -OPENVINO_OP(Add, ngraph::op::v1) -OPENVINO_OP(Asin, ngraph::op::v0) -OPENVINO_OP(Atan, ngraph::op::v0) -OPENVINO_OP(AvgPool, ngraph::op::v1) -OPENVINO_OP(BatchNormInference, ngraph::op::v0) -OPENVINO_OP(BinaryConvolution, ngraph::op::v1) -OPENVINO_OP(Broadcast, ngraph::op::v1) -OPENVINO_OP(CTCGreedyDecoder, ngraph::op::v0) -OPENVINO_OP(Ceiling, ngraph::op::v0) -OPENVINO_OP(Clamp, ngraph::op::v0) -OPENVINO_OP(Concat, ngraph::op::v0) -OPENVINO_OP(Constant, ngraph::op) -OPENVINO_OP(Convert, ngraph::op::v0) -OPENVINO_OP(ConvertLike, ngraph::op::v1) -OPENVINO_OP(Convolution, ngraph::op::v1) -OPENVINO_OP(ConvolutionBackpropData, ngraph::op::v1) -OPENVINO_OP(Cos, ngraph::op::v0) -OPENVINO_OP(Cosh, ngraph::op::v0) -OPENVINO_OP(DeformableConvolution, ngraph::op::v1) -OPENVINO_OP(DeformablePSROIPooling, ngraph::op::v1) -OPENVINO_OP(DepthToSpace, ngraph::op::v0) -OPENVINO_OP(DetectionOutput, ngraph::op::v0) -OPENVINO_OP(Divide, ngraph::op::v1) -OPENVINO_OP(Elu, ngraph::op::v0) -OPENVINO_OP(Erf, ngraph::op::v0) -OPENVINO_OP(Equal, ngraph::op::v1) -OPENVINO_OP(Exp, ngraph::op::v0) -OPENVINO_OP(FakeQuantize, ngraph::op::v0) -OPENVINO_OP(Floor, ngraph::op::v0) -OPENVINO_OP(FloorMod, ngraph::op::v1) -OPENVINO_OP(Gather, ngraph::op::v1) -OPENVINO_OP(GatherTree, ngraph::op::v1) -OPENVINO_OP(Greater, ngraph::op::v1) -OPENVINO_OP(GreaterEqual, ngraph::op::v1) -OPENVINO_OP(GroupConvolution, ngraph::op::v1) -OPENVINO_OP(GroupConvolutionBackpropData, ngraph::op::v1) -OPENVINO_OP(GRN, ngraph::op::v0) -OPENVINO_OP(HardSigmoid, ngraph::op::v0) -OPENVINO_OP(Interpolate, ngraph::op::v0) -OPENVINO_OP(Less, ngraph::op::v1) -OPENVINO_OP(LessEqual, ngraph::op::v1) -OPENVINO_OP(Log, ngraph::op::v0) -OPENVINO_OP(LogicalAnd, ngraph::op::v1) -OPENVINO_OP(LogicalNot, ngraph::op::v1) -OPENVINO_OP(LogicalOr, ngraph::op::v1) -OPENVINO_OP(LogicalXor, ngraph::op::v1) -OPENVINO_OP(LRN, ngraph::op::v0) -OPENVINO_OP(LSTMCell, ngraph::op::v0) -OPENVINO_OP(LSTMSequence, ngraph::op::v0) -OPENVINO_OP(MatMul, ngraph::op::v0) -OPENVINO_OP(MaxPool, ngraph::op::v1) -OPENVINO_OP(Maximum, ngraph::op::v1) -OPENVINO_OP(Minimum, ngraph::op::v1) -OPENVINO_OP(Mod, ngraph::op::v1) -OPENVINO_OP(Multiply, ngraph::op::v1) -OPENVINO_OP(Negative, ngraph::op::v0) -OPENVINO_OP(NonMaxSuppression, ngraph::op::v1) -OPENVINO_OP(NormalizeL2, ngraph::op::v0) -OPENVINO_OP(NotEqual, ngraph::op::v1) -OPENVINO_OP(OneHot, ngraph::op::v1) -OPENVINO_OP(PRelu, ngraph::op::v0) -OPENVINO_OP(PSROIPooling, ngraph::op::v0) -OPENVINO_OP(Pad, ngraph::op::v1) -OPENVINO_OP(Parameter, ngraph::op::v0) -OPENVINO_OP(Power, ngraph::op::v1) -OPENVINO_OP(PriorBox, ngraph::op::v0) -OPENVINO_OP(PriorBoxClustered, ngraph::op::v0) -OPENVINO_OP(Proposal, ngraph::op::v0) -OPENVINO_OP(Range, ngraph::op::v0) -OPENVINO_OP(Relu, ngraph::op::v0) -OPENVINO_OP(ReduceMax, ngraph::op::v1) -OPENVINO_OP(ReduceLogicalAnd, ngraph::op::v1) -OPENVINO_OP(ReduceLogicalOr, ngraph::op::v1) -OPENVINO_OP(ReduceMean, ngraph::op::v1) -OPENVINO_OP(ReduceMin, ngraph::op::v1) -OPENVINO_OP(ReduceProd, ngraph::op::v1) -OPENVINO_OP(ReduceSum, ngraph::op::v1) -OPENVINO_OP(RegionYolo, ngraph::op::v0) -OPENVINO_OP(Reshape, ngraph::op::v1) -OPENVINO_OP(Result, ngraph::op::v0) -OPENVINO_OP(Reverse, ngraph::op::v1) -OPENVINO_OP(ReverseSequence, ngraph::op::v0) -OPENVINO_OP(RNNCell, ngraph::op::v0) -OPENVINO_OP(Select, ngraph::op::v1) -OPENVINO_OP(Selu, ngraph::op::v0) -OPENVINO_OP(ShapeOf, ngraph::op::v0) -OPENVINO_OP(ShuffleChannels, ngraph::op::v0) -OPENVINO_OP(Sign, ngraph::op::v0) -OPENVINO_OP(Sigmoid, ngraph::op::v0) -OPENVINO_OP(Sin, ngraph::op::v0) -OPENVINO_OP(Sinh, ngraph::op::v0) -OPENVINO_OP(Softmax, ngraph::op::v1) -OPENVINO_OP(Sqrt, ngraph::op::v0) -OPENVINO_OP(SpaceToDepth, ngraph::op::v0) -OPENVINO_OP(Split, ngraph::op::v1) -OPENVINO_OP(SquaredDifference, ngraph::op::v0) -OPENVINO_OP(Squeeze, ngraph::op::v0) -OPENVINO_OP(StridedSlice, ngraph::op::v1) -OPENVINO_OP(Subtract, ngraph::op::v1) -OPENVINO_OP(Tan, ngraph::op::v0) -OPENVINO_OP(Tanh, ngraph::op::v0) -OPENVINO_OP(TensorIterator, ngraph::op::v0) -OPENVINO_OP(Tile, ngraph::op::v0) -OPENVINO_OP(TopK, ngraph::op::v1) -OPENVINO_OP(Transpose, ngraph::op::v1) -OPENVINO_OP(Unsqueeze, ngraph::op::v0) -OPENVINO_OP(VariadicSplit, ngraph::op::v1) -OPENVINO_OP(Xor, ngraph::op::v0) +OPENVINO_OP(Abs, ov::op::v0) +OPENVINO_OP(Acos, ov::op::v0) +OPENVINO_OP(Add, ov::op::v1) +OPENVINO_OP(Asin, ov::op::v0) +OPENVINO_OP(Atan, ov::op::v0) +OPENVINO_OP(AvgPool, ov::op::v1) +OPENVINO_OP(BatchNormInference, ov::op::v0) +OPENVINO_OP(BinaryConvolution, ov::op::v1) +OPENVINO_OP(Broadcast, ov::op::v1) +OPENVINO_OP(CTCGreedyDecoder, ov::op::v0) +OPENVINO_OP(Ceiling, ov::op::v0) +OPENVINO_OP(Clamp, ov::op::v0) +OPENVINO_OP(Concat, ov::op::v0) +OPENVINO_OP(Constant, ov::op::v0) +OPENVINO_OP(Convert, ov::op::v0) +OPENVINO_OP(ConvertLike, ov::op::v1) +OPENVINO_OP(Convolution, ov::op::v1) +OPENVINO_OP(ConvolutionBackpropData, ov::op::v1) +OPENVINO_OP(Cos, ov::op::v0) +OPENVINO_OP(Cosh, ov::op::v0) +OPENVINO_OP(DeformableConvolution, ov::op::v1) +OPENVINO_OP(DeformablePSROIPooling, ov::op::v1) +OPENVINO_OP(DepthToSpace, ov::op::v0) +OPENVINO_OP(DetectionOutput, ov::op::v0) +OPENVINO_OP(Divide, ov::op::v1) +OPENVINO_OP(Elu, ov::op::v0) +OPENVINO_OP(Erf, ov::op::v0) +OPENVINO_OP(Equal, ov::op::v1) +OPENVINO_OP(Exp, ov::op::v0) +OPENVINO_OP(FakeQuantize, ov::op::v0) +OPENVINO_OP(Floor, ov::op::v0) +OPENVINO_OP(FloorMod, ov::op::v1) +OPENVINO_OP(Gather, ov::op::v1) +OPENVINO_OP(GatherTree, ov::op::v1) +OPENVINO_OP(Greater, ov::op::v1) +OPENVINO_OP(GreaterEqual, ov::op::v1) +OPENVINO_OP(GroupConvolution, ov::op::v1) +OPENVINO_OP(GroupConvolutionBackpropData, ov::op::v1) +OPENVINO_OP(GRN, ov::op::v0) +OPENVINO_OP(HardSigmoid, ov::op::v0) +OPENVINO_OP(Interpolate, ov::op::v0) +OPENVINO_OP(Less, ov::op::v1) +OPENVINO_OP(LessEqual, ov::op::v1) +OPENVINO_OP(Log, ov::op::v0) +OPENVINO_OP(LogicalAnd, ov::op::v1) +OPENVINO_OP(LogicalNot, ov::op::v1) +OPENVINO_OP(LogicalOr, ov::op::v1) +OPENVINO_OP(LogicalXor, ov::op::v1) +OPENVINO_OP(LRN, ov::op::v0) +OPENVINO_OP(LSTMCell, ov::op::v0) +OPENVINO_OP(LSTMSequence, ov::op::v0) +OPENVINO_OP(MatMul, ov::op::v0) +OPENVINO_OP(MaxPool, ov::op::v1) +OPENVINO_OP(Maximum, ov::op::v1) +OPENVINO_OP(Minimum, ov::op::v1) +OPENVINO_OP(Mod, ov::op::v1) +OPENVINO_OP(Multiply, ov::op::v1) +OPENVINO_OP(Negative, ov::op::v0) +OPENVINO_OP(NonMaxSuppression, ov::op::v1) +OPENVINO_OP(NormalizeL2, ov::op::v0) +OPENVINO_OP(NotEqual, ov::op::v1) +OPENVINO_OP(OneHot, ov::op::v1) +OPENVINO_OP(PRelu, ov::op::v0) +OPENVINO_OP(PSROIPooling, ov::op::v0) +OPENVINO_OP(Pad, ov::op::v1) +OPENVINO_OP(Parameter, ov::op::v0) +OPENVINO_OP(Power, ov::op::v1) +OPENVINO_OP(PriorBox, ov::op::v0) +OPENVINO_OP(PriorBoxClustered, ov::op::v0) +OPENVINO_OP(Proposal, ov::op::v0) +OPENVINO_OP(Range, ov::op::v0) +OPENVINO_OP(Relu, ov::op::v0) +OPENVINO_OP(ReduceMax, ov::op::v1) +OPENVINO_OP(ReduceLogicalAnd, ov::op::v1) +OPENVINO_OP(ReduceLogicalOr, ov::op::v1) +OPENVINO_OP(ReduceMean, ov::op::v1) +OPENVINO_OP(ReduceMin, ov::op::v1) +OPENVINO_OP(ReduceProd, ov::op::v1) +OPENVINO_OP(ReduceSum, ov::op::v1) +OPENVINO_OP(RegionYolo, ov::op::v0) +OPENVINO_OP(Reshape, ov::op::v1) +OPENVINO_OP(Result, ov::op::v0) +OPENVINO_OP(Reverse, ov::op::v1) +OPENVINO_OP(ReverseSequence, ov::op::v0) +OPENVINO_OP(RNNCell, ov::op::v0) +OPENVINO_OP(Select, ov::op::v1) +OPENVINO_OP(Selu, ov::op::v0) +OPENVINO_OP(ShapeOf, ov::op::v0) +OPENVINO_OP(ShuffleChannels, ov::op::v0) +OPENVINO_OP(Sign, ov::op::v0) +OPENVINO_OP(Sigmoid, ov::op::v0) +OPENVINO_OP(Sin, ov::op::v0) +OPENVINO_OP(Sinh, ov::op::v0) +OPENVINO_OP(Softmax, ov::op::v1) +OPENVINO_OP(Sqrt, ov::op::v0) +OPENVINO_OP(SpaceToDepth, ov::op::v0) +OPENVINO_OP(Split, ov::op::v1) +OPENVINO_OP(SquaredDifference, ov::op::v0) +OPENVINO_OP(Squeeze, ov::op::v0) +OPENVINO_OP(StridedSlice, ov::op::v1) +OPENVINO_OP(Subtract, ov::op::v1) +OPENVINO_OP(Tan, ov::op::v0) +OPENVINO_OP(Tanh, ov::op::v0) +OPENVINO_OP(TensorIterator, ov::op::v0) +OPENVINO_OP(Tile, ov::op::v0) +OPENVINO_OP(TopK, ov::op::v1) +OPENVINO_OP(Transpose, ov::op::v1) +OPENVINO_OP(Unsqueeze, ov::op::v0) +OPENVINO_OP(VariadicSplit, ov::op::v1) +OPENVINO_OP(Xor, ov::op::v0) diff --git a/ngraph/core/include/openvino/opsets/opset2_tbl.hpp b/ngraph/core/include/openvino/opsets/opset2_tbl.hpp index 139215d2f56c28..80ac1d7f658574 100644 --- a/ngraph/core/include/openvino/opsets/opset2_tbl.hpp +++ b/ngraph/core/include/openvino/opsets/opset2_tbl.hpp @@ -7,137 +7,137 @@ # define OPENVINO_OP(x, y) #endif -OPENVINO_OP(Abs, ngraph::op::v0) -OPENVINO_OP(Acos, ngraph::op::v0) -OPENVINO_OP(Add, ngraph::op::v1) -OPENVINO_OP(Asin, ngraph::op::v0) -OPENVINO_OP(Atan, ngraph::op::v0) -OPENVINO_OP(AvgPool, ngraph::op::v1) -OPENVINO_OP(BatchNormInference, ngraph::op::v0) -OPENVINO_OP(BinaryConvolution, ngraph::op::v1) -OPENVINO_OP(Broadcast, ngraph::op::v1) -OPENVINO_OP(CTCGreedyDecoder, ngraph::op::v0) -OPENVINO_OP(Ceiling, ngraph::op::v0) -OPENVINO_OP(Clamp, ngraph::op::v0) -OPENVINO_OP(Concat, ngraph::op::v0) -OPENVINO_OP(Constant, ngraph::op) -OPENVINO_OP(Convert, ngraph::op::v0) -OPENVINO_OP(ConvertLike, ngraph::op::v1) -OPENVINO_OP(Convolution, ngraph::op::v1) -OPENVINO_OP(ConvolutionBackpropData, ngraph::op::v1) -OPENVINO_OP(Cos, ngraph::op::v0) -OPENVINO_OP(Cosh, ngraph::op::v0) -OPENVINO_OP(DeformableConvolution, ngraph::op::v1) -OPENVINO_OP(DeformablePSROIPooling, ngraph::op::v1) -OPENVINO_OP(DepthToSpace, ngraph::op::v0) -OPENVINO_OP(DetectionOutput, ngraph::op::v0) -OPENVINO_OP(Divide, ngraph::op::v1) -OPENVINO_OP(Elu, ngraph::op::v0) -OPENVINO_OP(Erf, ngraph::op::v0) -OPENVINO_OP(Equal, ngraph::op::v1) -OPENVINO_OP(Exp, ngraph::op::v0) -OPENVINO_OP(FakeQuantize, ngraph::op::v0) -OPENVINO_OP(Floor, ngraph::op::v0) -OPENVINO_OP(FloorMod, ngraph::op::v1) -OPENVINO_OP(Gather, ngraph::op::v1) -OPENVINO_OP(GatherTree, ngraph::op::v1) -OPENVINO_OP(Greater, ngraph::op::v1) -OPENVINO_OP(GreaterEqual, ngraph::op::v1) -OPENVINO_OP(GroupConvolution, ngraph::op::v1) -OPENVINO_OP(GroupConvolutionBackpropData, ngraph::op::v1) -OPENVINO_OP(GRN, ngraph::op::v0) -OPENVINO_OP(HardSigmoid, ngraph::op::v0) -OPENVINO_OP(Interpolate, ngraph::op::v0) -OPENVINO_OP(Less, ngraph::op::v1) -OPENVINO_OP(LessEqual, ngraph::op::v1) -OPENVINO_OP(Log, ngraph::op::v0) -OPENVINO_OP(LogicalAnd, ngraph::op::v1) -OPENVINO_OP(LogicalNot, ngraph::op::v1) -OPENVINO_OP(LogicalOr, ngraph::op::v1) -OPENVINO_OP(LogicalXor, ngraph::op::v1) -OPENVINO_OP(LRN, ngraph::op::v0) -OPENVINO_OP(LSTMCell, ngraph::op::v0) -OPENVINO_OP(LSTMSequence, ngraph::op::v0) -OPENVINO_OP(MatMul, ngraph::op::v0) -OPENVINO_OP(MaxPool, ngraph::op::v1) -OPENVINO_OP(Maximum, ngraph::op::v1) -OPENVINO_OP(Minimum, ngraph::op::v1) -OPENVINO_OP(Mod, ngraph::op::v1) -OPENVINO_OP(Multiply, ngraph::op::v1) +OPENVINO_OP(Abs, ov::op::v0) +OPENVINO_OP(Acos, ov::op::v0) +OPENVINO_OP(Add, ov::op::v1) +OPENVINO_OP(Asin, ov::op::v0) +OPENVINO_OP(Atan, ov::op::v0) +OPENVINO_OP(AvgPool, ov::op::v1) +OPENVINO_OP(BatchNormInference, ov::op::v0) +OPENVINO_OP(BinaryConvolution, ov::op::v1) +OPENVINO_OP(Broadcast, ov::op::v1) +OPENVINO_OP(CTCGreedyDecoder, ov::op::v0) +OPENVINO_OP(Ceiling, ov::op::v0) +OPENVINO_OP(Clamp, ov::op::v0) +OPENVINO_OP(Concat, ov::op::v0) +OPENVINO_OP(Constant, ov::op::v0) +OPENVINO_OP(Convert, ov::op::v0) +OPENVINO_OP(ConvertLike, ov::op::v1) +OPENVINO_OP(Convolution, ov::op::v1) +OPENVINO_OP(ConvolutionBackpropData, ov::op::v1) +OPENVINO_OP(Cos, ov::op::v0) +OPENVINO_OP(Cosh, ov::op::v0) +OPENVINO_OP(DeformableConvolution, ov::op::v1) +OPENVINO_OP(DeformablePSROIPooling, ov::op::v1) +OPENVINO_OP(DepthToSpace, ov::op::v0) +OPENVINO_OP(DetectionOutput, ov::op::v0) +OPENVINO_OP(Divide, ov::op::v1) +OPENVINO_OP(Elu, ov::op::v0) +OPENVINO_OP(Erf, ov::op::v0) +OPENVINO_OP(Equal, ov::op::v1) +OPENVINO_OP(Exp, ov::op::v0) +OPENVINO_OP(FakeQuantize, ov::op::v0) +OPENVINO_OP(Floor, ov::op::v0) +OPENVINO_OP(FloorMod, ov::op::v1) +OPENVINO_OP(Gather, ov::op::v1) +OPENVINO_OP(GatherTree, ov::op::v1) +OPENVINO_OP(Greater, ov::op::v1) +OPENVINO_OP(GreaterEqual, ov::op::v1) +OPENVINO_OP(GroupConvolution, ov::op::v1) +OPENVINO_OP(GroupConvolutionBackpropData, ov::op::v1) +OPENVINO_OP(GRN, ov::op::v0) +OPENVINO_OP(HardSigmoid, ov::op::v0) +OPENVINO_OP(Interpolate, ov::op::v0) +OPENVINO_OP(Less, ov::op::v1) +OPENVINO_OP(LessEqual, ov::op::v1) +OPENVINO_OP(Log, ov::op::v0) +OPENVINO_OP(LogicalAnd, ov::op::v1) +OPENVINO_OP(LogicalNot, ov::op::v1) +OPENVINO_OP(LogicalOr, ov::op::v1) +OPENVINO_OP(LogicalXor, ov::op::v1) +OPENVINO_OP(LRN, ov::op::v0) +OPENVINO_OP(LSTMCell, ov::op::v0) +OPENVINO_OP(LSTMSequence, ov::op::v0) +OPENVINO_OP(MatMul, ov::op::v0) +OPENVINO_OP(MaxPool, ov::op::v1) +OPENVINO_OP(Maximum, ov::op::v1) +OPENVINO_OP(Minimum, ov::op::v1) +OPENVINO_OP(Mod, ov::op::v1) +OPENVINO_OP(Multiply, ov::op::v1) -OPENVINO_OP(MVN, ngraph::op::v0) // Missing in opset1 +OPENVINO_OP(MVN, ov::op::v0) // Missing in opset1 -OPENVINO_OP(Negative, ngraph::op::v0) -OPENVINO_OP(NonMaxSuppression, ngraph::op::v1) -OPENVINO_OP(NormalizeL2, ngraph::op::v0) -OPENVINO_OP(NotEqual, ngraph::op::v1) -OPENVINO_OP(OneHot, ngraph::op::v1) -OPENVINO_OP(PRelu, ngraph::op::v0) -OPENVINO_OP(PSROIPooling, ngraph::op::v0) -OPENVINO_OP(Pad, ngraph::op::v1) -OPENVINO_OP(Parameter, ngraph::op::v0) -OPENVINO_OP(Power, ngraph::op::v1) -OPENVINO_OP(PriorBox, ngraph::op::v0) -OPENVINO_OP(PriorBoxClustered, ngraph::op::v0) -OPENVINO_OP(Proposal, ngraph::op::v0) -OPENVINO_OP(Range, ngraph::op::v0) -OPENVINO_OP(Relu, ngraph::op::v0) -OPENVINO_OP(ReduceMax, ngraph::op::v1) -OPENVINO_OP(ReduceLogicalAnd, ngraph::op::v1) -OPENVINO_OP(ReduceLogicalOr, ngraph::op::v1) -OPENVINO_OP(ReduceMean, ngraph::op::v1) -OPENVINO_OP(ReduceMin, ngraph::op::v1) -OPENVINO_OP(ReduceProd, ngraph::op::v1) -OPENVINO_OP(ReduceSum, ngraph::op::v1) -OPENVINO_OP(RegionYolo, ngraph::op::v0) +OPENVINO_OP(Negative, ov::op::v0) +OPENVINO_OP(NonMaxSuppression, ov::op::v1) +OPENVINO_OP(NormalizeL2, ov::op::v0) +OPENVINO_OP(NotEqual, ov::op::v1) +OPENVINO_OP(OneHot, ov::op::v1) +OPENVINO_OP(PRelu, ov::op::v0) +OPENVINO_OP(PSROIPooling, ov::op::v0) +OPENVINO_OP(Pad, ov::op::v1) +OPENVINO_OP(Parameter, ov::op::v0) +OPENVINO_OP(Power, ov::op::v1) +OPENVINO_OP(PriorBox, ov::op::v0) +OPENVINO_OP(PriorBoxClustered, ov::op::v0) +OPENVINO_OP(Proposal, ov::op::v0) +OPENVINO_OP(Range, ov::op::v0) +OPENVINO_OP(Relu, ov::op::v0) +OPENVINO_OP(ReduceMax, ov::op::v1) +OPENVINO_OP(ReduceLogicalAnd, ov::op::v1) +OPENVINO_OP(ReduceLogicalOr, ov::op::v1) +OPENVINO_OP(ReduceMean, ov::op::v1) +OPENVINO_OP(ReduceMin, ov::op::v1) +OPENVINO_OP(ReduceProd, ov::op::v1) +OPENVINO_OP(ReduceSum, ov::op::v1) +OPENVINO_OP(RegionYolo, ov::op::v0) -OPENVINO_OP(ReorgYolo, ngraph::op::v0) // Missing in opset1 +OPENVINO_OP(ReorgYolo, ov::op::v0) // Missing in opset1 -OPENVINO_OP(Reshape, ngraph::op::v1) -OPENVINO_OP(Result, ngraph::op::v0) +OPENVINO_OP(Reshape, ov::op::v1) +OPENVINO_OP(Result, ov::op::v0) // Moved out of opset2, it was added to opset1 by mistake -// OPENVINO_OP(Reverse, ngraph::op::v1) +// OPENVINO_OP(Reverse, ov::op::v1) -OPENVINO_OP(ReverseSequence, ngraph::op::v0) +OPENVINO_OP(ReverseSequence, ov::op::v0) // Moved out of opset2, it was added to opset1 by mistake -// OPENVINO_OP(RNNCell, ngraph::op::v0) +// OPENVINO_OP(RNNCell, ov::op::v0) -OPENVINO_OP(ROIPooling, ngraph::op::v0) // Missing in opset1 +OPENVINO_OP(ROIPooling, ov::op::v0) // Missing in opset1 -OPENVINO_OP(Select, ngraph::op::v1) -OPENVINO_OP(Selu, ngraph::op::v0) -OPENVINO_OP(ShapeOf, ngraph::op::v0) +OPENVINO_OP(Select, ov::op::v1) +OPENVINO_OP(Selu, ov::op::v0) +OPENVINO_OP(ShapeOf, ov::op::v0) // Moved out of opset2, it was added to opset1 by mistake -// OPENVINO_OP(ShuffleChannels, ngraph::op::v0) +// OPENVINO_OP(ShuffleChannels, ov::op::v0) -OPENVINO_OP(Sign, ngraph::op::v0) -OPENVINO_OP(Sigmoid, ngraph::op::v0) -OPENVINO_OP(Sin, ngraph::op::v0) -OPENVINO_OP(Sinh, ngraph::op::v0) -OPENVINO_OP(Softmax, ngraph::op::v1) -OPENVINO_OP(Sqrt, ngraph::op::v0) -OPENVINO_OP(SpaceToDepth, ngraph::op::v0) -OPENVINO_OP(Split, ngraph::op::v1) -OPENVINO_OP(SquaredDifference, ngraph::op::v0) -OPENVINO_OP(Squeeze, ngraph::op::v0) -OPENVINO_OP(StridedSlice, ngraph::op::v1) -OPENVINO_OP(Subtract, ngraph::op::v1) -OPENVINO_OP(Tan, ngraph::op::v0) -OPENVINO_OP(Tanh, ngraph::op::v0) -OPENVINO_OP(TensorIterator, ngraph::op::v0) -OPENVINO_OP(Tile, ngraph::op::v0) -OPENVINO_OP(TopK, ngraph::op::v1) -OPENVINO_OP(Transpose, ngraph::op::v1) -OPENVINO_OP(Unsqueeze, ngraph::op::v0) -OPENVINO_OP(VariadicSplit, ngraph::op::v1) +OPENVINO_OP(Sign, ov::op::v0) +OPENVINO_OP(Sigmoid, ov::op::v0) +OPENVINO_OP(Sin, ov::op::v0) +OPENVINO_OP(Sinh, ov::op::v0) +OPENVINO_OP(Softmax, ov::op::v1) +OPENVINO_OP(Sqrt, ov::op::v0) +OPENVINO_OP(SpaceToDepth, ov::op::v0) +OPENVINO_OP(Split, ov::op::v1) +OPENVINO_OP(SquaredDifference, ov::op::v0) +OPENVINO_OP(Squeeze, ov::op::v0) +OPENVINO_OP(StridedSlice, ov::op::v1) +OPENVINO_OP(Subtract, ov::op::v1) +OPENVINO_OP(Tan, ov::op::v0) +OPENVINO_OP(Tanh, ov::op::v0) +OPENVINO_OP(TensorIterator, ov::op::v0) +OPENVINO_OP(Tile, ov::op::v0) +OPENVINO_OP(TopK, ov::op::v1) +OPENVINO_OP(Transpose, ov::op::v1) +OPENVINO_OP(Unsqueeze, ov::op::v0) +OPENVINO_OP(VariadicSplit, ov::op::v1) // Moved out of opset2, it was added to opset1 by mistake -// OPENVINO_OP(Xor, ngraph::op::v0) +// OPENVINO_OP(Xor, ov::op::v0) // New operations added in opset2 -OPENVINO_OP(Gelu, ngraph::op::v0) -OPENVINO_OP(BatchToSpace, ngraph::op::v1) -OPENVINO_OP(SpaceToBatch, ngraph::op::v1) +OPENVINO_OP(Gelu, ov::op::v0) +OPENVINO_OP(BatchToSpace, ov::op::v1) +OPENVINO_OP(SpaceToBatch, ov::op::v1) diff --git a/ngraph/core/include/openvino/opsets/opset3_tbl.hpp b/ngraph/core/include/openvino/opsets/opset3_tbl.hpp index f1883c853700ce..c6369b99b68fcf 100644 --- a/ngraph/core/include/openvino/opsets/opset3_tbl.hpp +++ b/ngraph/core/include/openvino/opsets/opset3_tbl.hpp @@ -7,153 +7,153 @@ # define OPENVINO_OP(x, y) #endif -OPENVINO_OP(Abs, ngraph::op::v0) -OPENVINO_OP(Acos, ngraph::op::v0) -OPENVINO_OP(Add, ngraph::op::v1) -OPENVINO_OP(Asin, ngraph::op::v0) -OPENVINO_OP(Atan, ngraph::op::v0) -OPENVINO_OP(AvgPool, ngraph::op::v1) -OPENVINO_OP(BatchNormInference, ngraph::op::v0) -OPENVINO_OP(BinaryConvolution, ngraph::op::v1) -OPENVINO_OP(Broadcast, ngraph::op::v3) -OPENVINO_OP(Bucketize, ngraph::op::v3) -OPENVINO_OP(CTCGreedyDecoder, ngraph::op::v0) -OPENVINO_OP(Ceiling, ngraph::op::v0) -OPENVINO_OP(Clamp, ngraph::op::v0) -OPENVINO_OP(Concat, ngraph::op::v0) -OPENVINO_OP(Constant, ngraph::op) -OPENVINO_OP(Convert, ngraph::op::v0) -OPENVINO_OP(ConvertLike, ngraph::op::v1) -OPENVINO_OP(Convolution, ngraph::op::v1) -OPENVINO_OP(ConvolutionBackpropData, ngraph::op::v1) -OPENVINO_OP(Cos, ngraph::op::v0) -OPENVINO_OP(Cosh, ngraph::op::v0) -OPENVINO_OP(CumSum, ngraph::op::v0) -OPENVINO_OP(DeformableConvolution, ngraph::op::v1) -OPENVINO_OP(DeformablePSROIPooling, ngraph::op::v1) -OPENVINO_OP(DepthToSpace, ngraph::op::v0) -OPENVINO_OP(DetectionOutput, ngraph::op::v0) -OPENVINO_OP(Divide, ngraph::op::v1) -OPENVINO_OP(Elu, ngraph::op::v0) -OPENVINO_OP(Erf, ngraph::op::v0) -OPENVINO_OP(Equal, ngraph::op::v1) -OPENVINO_OP(Exp, ngraph::op::v0) -OPENVINO_OP(ExtractImagePatches, ngraph::op::v3) -OPENVINO_OP(FakeQuantize, ngraph::op::v0) -OPENVINO_OP(Floor, ngraph::op::v0) -OPENVINO_OP(FloorMod, ngraph::op::v1) -OPENVINO_OP(Gather, ngraph::op::v1) -OPENVINO_OP(GatherTree, ngraph::op::v1) -OPENVINO_OP(Greater, ngraph::op::v1) -OPENVINO_OP(GreaterEqual, ngraph::op::v1) -OPENVINO_OP(GroupConvolution, ngraph::op::v1) -OPENVINO_OP(GroupConvolutionBackpropData, ngraph::op::v1) -OPENVINO_OP(GRN, ngraph::op::v0) -OPENVINO_OP(HardSigmoid, ngraph::op::v0) -OPENVINO_OP(Interpolate, ngraph::op::v0) -OPENVINO_OP(Less, ngraph::op::v1) -OPENVINO_OP(LessEqual, ngraph::op::v1) -OPENVINO_OP(Log, ngraph::op::v0) -OPENVINO_OP(LogicalAnd, ngraph::op::v1) -OPENVINO_OP(LogicalNot, ngraph::op::v1) -OPENVINO_OP(LogicalOr, ngraph::op::v1) -OPENVINO_OP(LogicalXor, ngraph::op::v1) -OPENVINO_OP(LRN, ngraph::op::v0) -OPENVINO_OP(LSTMCell, ngraph::op::v0) -OPENVINO_OP(LSTMSequence, ngraph::op::v0) -OPENVINO_OP(MatMul, ngraph::op::v0) -OPENVINO_OP(MaxPool, ngraph::op::v1) -OPENVINO_OP(Maximum, ngraph::op::v1) -OPENVINO_OP(Minimum, ngraph::op::v1) -OPENVINO_OP(Mod, ngraph::op::v1) -OPENVINO_OP(Multiply, ngraph::op::v1) +OPENVINO_OP(Abs, ov::op::v0) +OPENVINO_OP(Acos, ov::op::v0) +OPENVINO_OP(Add, ov::op::v1) +OPENVINO_OP(Asin, ov::op::v0) +OPENVINO_OP(Atan, ov::op::v0) +OPENVINO_OP(AvgPool, ov::op::v1) +OPENVINO_OP(BatchNormInference, ov::op::v0) +OPENVINO_OP(BinaryConvolution, ov::op::v1) +OPENVINO_OP(Broadcast, ov::op::v3) +OPENVINO_OP(Bucketize, ov::op::v3) +OPENVINO_OP(CTCGreedyDecoder, ov::op::v0) +OPENVINO_OP(Ceiling, ov::op::v0) +OPENVINO_OP(Clamp, ov::op::v0) +OPENVINO_OP(Concat, ov::op::v0) +OPENVINO_OP(Constant, ov::op::v0) +OPENVINO_OP(Convert, ov::op::v0) +OPENVINO_OP(ConvertLike, ov::op::v1) +OPENVINO_OP(Convolution, ov::op::v1) +OPENVINO_OP(ConvolutionBackpropData, ov::op::v1) +OPENVINO_OP(Cos, ov::op::v0) +OPENVINO_OP(Cosh, ov::op::v0) +OPENVINO_OP(CumSum, ov::op::v0) +OPENVINO_OP(DeformableConvolution, ov::op::v1) +OPENVINO_OP(DeformablePSROIPooling, ov::op::v1) +OPENVINO_OP(DepthToSpace, ov::op::v0) +OPENVINO_OP(DetectionOutput, ov::op::v0) +OPENVINO_OP(Divide, ov::op::v1) +OPENVINO_OP(Elu, ov::op::v0) +OPENVINO_OP(Erf, ov::op::v0) +OPENVINO_OP(Equal, ov::op::v1) +OPENVINO_OP(Exp, ov::op::v0) +OPENVINO_OP(ExtractImagePatches, ov::op::v3) +OPENVINO_OP(FakeQuantize, ov::op::v0) +OPENVINO_OP(Floor, ov::op::v0) +OPENVINO_OP(FloorMod, ov::op::v1) +OPENVINO_OP(Gather, ov::op::v1) +OPENVINO_OP(GatherTree, ov::op::v1) +OPENVINO_OP(Greater, ov::op::v1) +OPENVINO_OP(GreaterEqual, ov::op::v1) +OPENVINO_OP(GroupConvolution, ov::op::v1) +OPENVINO_OP(GroupConvolutionBackpropData, ov::op::v1) +OPENVINO_OP(GRN, ov::op::v0) +OPENVINO_OP(HardSigmoid, ov::op::v0) +OPENVINO_OP(Interpolate, ov::op::v0) +OPENVINO_OP(Less, ov::op::v1) +OPENVINO_OP(LessEqual, ov::op::v1) +OPENVINO_OP(Log, ov::op::v0) +OPENVINO_OP(LogicalAnd, ov::op::v1) +OPENVINO_OP(LogicalNot, ov::op::v1) +OPENVINO_OP(LogicalOr, ov::op::v1) +OPENVINO_OP(LogicalXor, ov::op::v1) +OPENVINO_OP(LRN, ov::op::v0) +OPENVINO_OP(LSTMCell, ov::op::v0) +OPENVINO_OP(LSTMSequence, ov::op::v0) +OPENVINO_OP(MatMul, ov::op::v0) +OPENVINO_OP(MaxPool, ov::op::v1) +OPENVINO_OP(Maximum, ov::op::v1) +OPENVINO_OP(Minimum, ov::op::v1) +OPENVINO_OP(Mod, ov::op::v1) +OPENVINO_OP(Multiply, ov::op::v1) -OPENVINO_OP(MVN, ngraph::op::v0) // Missing in opset1 +OPENVINO_OP(MVN, ov::op::v0) // Missing in opset1 -OPENVINO_OP(Negative, ngraph::op::v0) -OPENVINO_OP(NonMaxSuppression, ngraph::op::v3) -OPENVINO_OP(NormalizeL2, ngraph::op::v0) -OPENVINO_OP(NotEqual, ngraph::op::v1) -OPENVINO_OP(OneHot, ngraph::op::v1) -OPENVINO_OP(PRelu, ngraph::op::v0) -OPENVINO_OP(PSROIPooling, ngraph::op::v0) -OPENVINO_OP(Pad, ngraph::op::v1) -OPENVINO_OP(Parameter, ngraph::op::v0) -OPENVINO_OP(Power, ngraph::op::v1) -OPENVINO_OP(PriorBox, ngraph::op::v0) -OPENVINO_OP(PriorBoxClustered, ngraph::op::v0) -OPENVINO_OP(Proposal, ngraph::op::v0) -OPENVINO_OP(Range, ngraph::op::v0) -OPENVINO_OP(Relu, ngraph::op::v0) -OPENVINO_OP(ReduceMax, ngraph::op::v1) -OPENVINO_OP(ReduceLogicalAnd, ngraph::op::v1) -OPENVINO_OP(ReduceLogicalOr, ngraph::op::v1) -OPENVINO_OP(ReduceMean, ngraph::op::v1) -OPENVINO_OP(ReduceMin, ngraph::op::v1) -OPENVINO_OP(ReduceProd, ngraph::op::v1) -OPENVINO_OP(ReduceSum, ngraph::op::v1) -OPENVINO_OP(RegionYolo, ngraph::op::v0) +OPENVINO_OP(Negative, ov::op::v0) +OPENVINO_OP(NonMaxSuppression, ov::op::v3) +OPENVINO_OP(NormalizeL2, ov::op::v0) +OPENVINO_OP(NotEqual, ov::op::v1) +OPENVINO_OP(OneHot, ov::op::v1) +OPENVINO_OP(PRelu, ov::op::v0) +OPENVINO_OP(PSROIPooling, ov::op::v0) +OPENVINO_OP(Pad, ov::op::v1) +OPENVINO_OP(Parameter, ov::op::v0) +OPENVINO_OP(Power, ov::op::v1) +OPENVINO_OP(PriorBox, ov::op::v0) +OPENVINO_OP(PriorBoxClustered, ov::op::v0) +OPENVINO_OP(Proposal, ov::op::v0) +OPENVINO_OP(Range, ov::op::v0) +OPENVINO_OP(Relu, ov::op::v0) +OPENVINO_OP(ReduceMax, ov::op::v1) +OPENVINO_OP(ReduceLogicalAnd, ov::op::v1) +OPENVINO_OP(ReduceLogicalOr, ov::op::v1) +OPENVINO_OP(ReduceMean, ov::op::v1) +OPENVINO_OP(ReduceMin, ov::op::v1) +OPENVINO_OP(ReduceProd, ov::op::v1) +OPENVINO_OP(ReduceSum, ov::op::v1) +OPENVINO_OP(RegionYolo, ov::op::v0) -OPENVINO_OP(ReorgYolo, ngraph::op::v0) // Missing in opset1 +OPENVINO_OP(ReorgYolo, ov::op::v0) // Missing in opset1 -OPENVINO_OP(Reshape, ngraph::op::v1) -OPENVINO_OP(Result, ngraph::op::v0) +OPENVINO_OP(Reshape, ov::op::v1) +OPENVINO_OP(Result, ov::op::v0) // Moved out of opset2, it was added to opset1 by mistake -// OPENVINO_OP(Reverse, ngraph::op::v1) +// OPENVINO_OP(Reverse, ov::op::v1) -OPENVINO_OP(ReverseSequence, ngraph::op::v0) +OPENVINO_OP(ReverseSequence, ov::op::v0) // Moved out of opset2, it was added to opset1 by mistake -// OPENVINO_OP(RNNCell, ngraph::op::v0) +// OPENVINO_OP(RNNCell, ov::op::v0) -OPENVINO_OP(ROIPooling, ngraph::op::v0) // Missing in opset1 +OPENVINO_OP(ROIPooling, ov::op::v0) // Missing in opset1 -OPENVINO_OP(Select, ngraph::op::v1) -OPENVINO_OP(Selu, ngraph::op::v0) +OPENVINO_OP(Select, ov::op::v1) +OPENVINO_OP(Selu, ov::op::v0) // Superseded -// OPENVINO_OP(ShapeOf, ngraph::op::v0) +// OPENVINO_OP(ShapeOf, ov::op::v0) -OPENVINO_OP(Sign, ngraph::op::v0) -OPENVINO_OP(Sigmoid, ngraph::op::v0) -OPENVINO_OP(Sin, ngraph::op::v0) -OPENVINO_OP(Sinh, ngraph::op::v0) -OPENVINO_OP(Softmax, ngraph::op::v1) -OPENVINO_OP(Sqrt, ngraph::op::v0) -OPENVINO_OP(SpaceToDepth, ngraph::op::v0) -OPENVINO_OP(Split, ngraph::op::v1) -OPENVINO_OP(SquaredDifference, ngraph::op::v0) -OPENVINO_OP(Squeeze, ngraph::op::v0) -OPENVINO_OP(StridedSlice, ngraph::op::v1) -OPENVINO_OP(Subtract, ngraph::op::v1) -OPENVINO_OP(Tan, ngraph::op::v0) -OPENVINO_OP(Tanh, ngraph::op::v0) -OPENVINO_OP(TensorIterator, ngraph::op::v0) -OPENVINO_OP(Tile, ngraph::op::v0) -OPENVINO_OP(Transpose, ngraph::op::v1) -OPENVINO_OP(Unsqueeze, ngraph::op::v0) -OPENVINO_OP(VariadicSplit, ngraph::op::v1) +OPENVINO_OP(Sign, ov::op::v0) +OPENVINO_OP(Sigmoid, ov::op::v0) +OPENVINO_OP(Sin, ov::op::v0) +OPENVINO_OP(Sinh, ov::op::v0) +OPENVINO_OP(Softmax, ov::op::v1) +OPENVINO_OP(Sqrt, ov::op::v0) +OPENVINO_OP(SpaceToDepth, ov::op::v0) +OPENVINO_OP(Split, ov::op::v1) +OPENVINO_OP(SquaredDifference, ov::op::v0) +OPENVINO_OP(Squeeze, ov::op::v0) +OPENVINO_OP(StridedSlice, ov::op::v1) +OPENVINO_OP(Subtract, ov::op::v1) +OPENVINO_OP(Tan, ov::op::v0) +OPENVINO_OP(Tanh, ov::op::v0) +OPENVINO_OP(TensorIterator, ov::op::v0) +OPENVINO_OP(Tile, ov::op::v0) +OPENVINO_OP(Transpose, ov::op::v1) +OPENVINO_OP(Unsqueeze, ov::op::v0) +OPENVINO_OP(VariadicSplit, ov::op::v1) // Moved out of opset2, it was added to opset1 by mistake -// OPENVINO_OP(Xor, ngraph::op::v0) +// OPENVINO_OP(Xor, ov::op::v0) // New operations added in opset2 -OPENVINO_OP(Gelu, ngraph::op::v0) -OPENVINO_OP(BatchToSpace, ngraph::op::v1) -OPENVINO_OP(SpaceToBatch, ngraph::op::v1) +OPENVINO_OP(Gelu, ov::op::v0) +OPENVINO_OP(BatchToSpace, ov::op::v1) +OPENVINO_OP(SpaceToBatch, ov::op::v1) // New operations added in opset3 -OPENVINO_OP(EmbeddingBagPackedSum, ngraph::op::v3) -OPENVINO_OP(EmbeddingSegmentsSum, ngraph::op::v3) -OPENVINO_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) -OPENVINO_OP(GRUCell, ngraph::op::v3) -OPENVINO_OP(NonZero, ngraph::op::v3) -OPENVINO_OP(RNNCell, ngraph::op::v0) -OPENVINO_OP(ROIAlign, ngraph::op::v3) -OPENVINO_OP(ScatterElementsUpdate, ngraph::op::v3) -OPENVINO_OP(ScatterUpdate, ngraph::op::v3) -OPENVINO_OP(ShuffleChannels, ngraph::op::v0) -OPENVINO_OP(ShapeOf, ngraph::op::v3) -OPENVINO_OP(Assign, ngraph::op::v3) -OPENVINO_OP(ReadValue, ngraph::op::v3) -OPENVINO_OP(TopK, ngraph::op::v3) +OPENVINO_OP(EmbeddingBagPackedSum, ov::op::v3) +OPENVINO_OP(EmbeddingSegmentsSum, ov::op::v3) +OPENVINO_OP(EmbeddingBagOffsetsSum, ov::op::v3) +OPENVINO_OP(GRUCell, ov::op::v3) +OPENVINO_OP(NonZero, ov::op::v3) +OPENVINO_OP(RNNCell, ov::op::v0) +OPENVINO_OP(ROIAlign, ov::op::v3) +OPENVINO_OP(ScatterElementsUpdate, ov::op::v3) +OPENVINO_OP(ScatterUpdate, ov::op::v3) +OPENVINO_OP(ShuffleChannels, ov::op::v0) +OPENVINO_OP(ShapeOf, ov::op::v3) +OPENVINO_OP(Assign, ov::op::v3) +OPENVINO_OP(ReadValue, ov::op::v3) +OPENVINO_OP(TopK, ov::op::v3) diff --git a/ngraph/core/include/openvino/opsets/opset4_tbl.hpp b/ngraph/core/include/openvino/opsets/opset4_tbl.hpp index 142ea8cb1571a1..320916daf48d78 100644 --- a/ngraph/core/include/openvino/opsets/opset4_tbl.hpp +++ b/ngraph/core/include/openvino/opsets/opset4_tbl.hpp @@ -7,146 +7,146 @@ # define OPENVINO_OP(x, y) #endif -OPENVINO_OP(Abs, ngraph::op::v0) -OPENVINO_OP(Acos, ngraph::op::v0) -OPENVINO_OP(Add, ngraph::op::v1) -OPENVINO_OP(Asin, ngraph::op::v0) -OPENVINO_OP(Atan, ngraph::op::v0) -OPENVINO_OP(AvgPool, ngraph::op::v1) -OPENVINO_OP(BatchNormInference, ngraph::op::v0) -OPENVINO_OP(BinaryConvolution, ngraph::op::v1) -OPENVINO_OP(Broadcast, ngraph::op::v3) -OPENVINO_OP(Bucketize, ngraph::op::v3) -OPENVINO_OP(CTCGreedyDecoder, ngraph::op::v0) -OPENVINO_OP(Ceiling, ngraph::op::v0) -OPENVINO_OP(Clamp, ngraph::op::v0) -OPENVINO_OP(Concat, ngraph::op::v0) -OPENVINO_OP(Constant, ngraph::op) -OPENVINO_OP(Convert, ngraph::op::v0) -OPENVINO_OP(ConvertLike, ngraph::op::v1) -OPENVINO_OP(Convolution, ngraph::op::v1) -OPENVINO_OP(ConvolutionBackpropData, ngraph::op::v1) -OPENVINO_OP(Cos, ngraph::op::v0) -OPENVINO_OP(Cosh, ngraph::op::v0) -OPENVINO_OP(CumSum, ngraph::op::v0) -OPENVINO_OP(DeformableConvolution, ngraph::op::v1) -OPENVINO_OP(DeformablePSROIPooling, ngraph::op::v1) -OPENVINO_OP(DepthToSpace, ngraph::op::v0) -OPENVINO_OP(DetectionOutput, ngraph::op::v0) -OPENVINO_OP(Divide, ngraph::op::v1) -OPENVINO_OP(Elu, ngraph::op::v0) -OPENVINO_OP(Erf, ngraph::op::v0) -OPENVINO_OP(Equal, ngraph::op::v1) -OPENVINO_OP(Exp, ngraph::op::v0) -OPENVINO_OP(ExtractImagePatches, ngraph::op::v3) -OPENVINO_OP(FakeQuantize, ngraph::op::v0) -OPENVINO_OP(Floor, ngraph::op::v0) -OPENVINO_OP(FloorMod, ngraph::op::v1) -OPENVINO_OP(Gather, ngraph::op::v1) -OPENVINO_OP(GatherTree, ngraph::op::v1) -OPENVINO_OP(Greater, ngraph::op::v1) -OPENVINO_OP(GreaterEqual, ngraph::op::v1) -OPENVINO_OP(GroupConvolution, ngraph::op::v1) -OPENVINO_OP(GroupConvolutionBackpropData, ngraph::op::v1) -OPENVINO_OP(GRN, ngraph::op::v0) -OPENVINO_OP(HardSigmoid, ngraph::op::v0) -OPENVINO_OP(Less, ngraph::op::v1) -OPENVINO_OP(LessEqual, ngraph::op::v1) -OPENVINO_OP(Log, ngraph::op::v0) -OPENVINO_OP(LogicalAnd, ngraph::op::v1) -OPENVINO_OP(LogicalNot, ngraph::op::v1) -OPENVINO_OP(LogicalOr, ngraph::op::v1) -OPENVINO_OP(LogicalXor, ngraph::op::v1) -OPENVINO_OP(LRN, ngraph::op::v0) -OPENVINO_OP(LSTMCell, ngraph::op::v4) -OPENVINO_OP(MatMul, ngraph::op::v0) -OPENVINO_OP(MaxPool, ngraph::op::v1) -OPENVINO_OP(Maximum, ngraph::op::v1) -OPENVINO_OP(Minimum, ngraph::op::v1) -OPENVINO_OP(Mod, ngraph::op::v1) -OPENVINO_OP(Multiply, ngraph::op::v1) -OPENVINO_OP(MVN, ngraph::op::v0) -OPENVINO_OP(Negative, ngraph::op::v0) -OPENVINO_OP(NormalizeL2, ngraph::op::v0) -OPENVINO_OP(NotEqual, ngraph::op::v1) -OPENVINO_OP(OneHot, ngraph::op::v1) -OPENVINO_OP(PRelu, ngraph::op::v0) -OPENVINO_OP(PSROIPooling, ngraph::op::v0) -OPENVINO_OP(Pad, ngraph::op::v1) -OPENVINO_OP(Parameter, ngraph::op::v0) -OPENVINO_OP(Power, ngraph::op::v1) -OPENVINO_OP(PriorBox, ngraph::op::v0) -OPENVINO_OP(PriorBoxClustered, ngraph::op::v0) -OPENVINO_OP(Proposal, ngraph::op::v4) -OPENVINO_OP(Range, ngraph::op::v4) -OPENVINO_OP(Relu, ngraph::op::v0) -OPENVINO_OP(ReduceMax, ngraph::op::v1) -OPENVINO_OP(ReduceLogicalAnd, ngraph::op::v1) -OPENVINO_OP(ReduceLogicalOr, ngraph::op::v1) -OPENVINO_OP(ReduceMean, ngraph::op::v1) -OPENVINO_OP(ReduceMin, ngraph::op::v1) -OPENVINO_OP(ReduceProd, ngraph::op::v1) -OPENVINO_OP(ReduceSum, ngraph::op::v1) -OPENVINO_OP(RegionYolo, ngraph::op::v0) -OPENVINO_OP(ReorgYolo, ngraph::op::v0) -OPENVINO_OP(Reshape, ngraph::op::v1) -OPENVINO_OP(Result, ngraph::op::v0) -OPENVINO_OP(ReverseSequence, ngraph::op::v0) -OPENVINO_OP(ROIPooling, ngraph::op::v0) -OPENVINO_OP(ScatterNDUpdate, ngraph::op::v3) -OPENVINO_OP(Select, ngraph::op::v1) -OPENVINO_OP(Selu, ngraph::op::v0) -OPENVINO_OP(Sign, ngraph::op::v0) -OPENVINO_OP(Sigmoid, ngraph::op::v0) -OPENVINO_OP(Sin, ngraph::op::v0) -OPENVINO_OP(Sinh, ngraph::op::v0) -OPENVINO_OP(Softmax, ngraph::op::v1) -OPENVINO_OP(Sqrt, ngraph::op::v0) -OPENVINO_OP(SpaceToDepth, ngraph::op::v0) -OPENVINO_OP(Split, ngraph::op::v1) -OPENVINO_OP(SquaredDifference, ngraph::op::v0) -OPENVINO_OP(Squeeze, ngraph::op::v0) -OPENVINO_OP(StridedSlice, ngraph::op::v1) -OPENVINO_OP(Subtract, ngraph::op::v1) -OPENVINO_OP(Tan, ngraph::op::v0) -OPENVINO_OP(Tanh, ngraph::op::v0) -OPENVINO_OP(TensorIterator, ngraph::op::v0) -OPENVINO_OP(Tile, ngraph::op::v0) -OPENVINO_OP(Transpose, ngraph::op::v1) -OPENVINO_OP(Unsqueeze, ngraph::op::v0) -OPENVINO_OP(VariadicSplit, ngraph::op::v1) +OPENVINO_OP(Abs, ov::op::v0) +OPENVINO_OP(Acos, ov::op::v0) +OPENVINO_OP(Add, ov::op::v1) +OPENVINO_OP(Asin, ov::op::v0) +OPENVINO_OP(Atan, ov::op::v0) +OPENVINO_OP(AvgPool, ov::op::v1) +OPENVINO_OP(BatchNormInference, ov::op::v0) +OPENVINO_OP(BinaryConvolution, ov::op::v1) +OPENVINO_OP(Broadcast, ov::op::v3) +OPENVINO_OP(Bucketize, ov::op::v3) +OPENVINO_OP(CTCGreedyDecoder, ov::op::v0) +OPENVINO_OP(Ceiling, ov::op::v0) +OPENVINO_OP(Clamp, ov::op::v0) +OPENVINO_OP(Concat, ov::op::v0) +OPENVINO_OP(Constant, ov::op::v0) +OPENVINO_OP(Convert, ov::op::v0) +OPENVINO_OP(ConvertLike, ov::op::v1) +OPENVINO_OP(Convolution, ov::op::v1) +OPENVINO_OP(ConvolutionBackpropData, ov::op::v1) +OPENVINO_OP(Cos, ov::op::v0) +OPENVINO_OP(Cosh, ov::op::v0) +OPENVINO_OP(CumSum, ov::op::v0) +OPENVINO_OP(DeformableConvolution, ov::op::v1) +OPENVINO_OP(DeformablePSROIPooling, ov::op::v1) +OPENVINO_OP(DepthToSpace, ov::op::v0) +OPENVINO_OP(DetectionOutput, ov::op::v0) +OPENVINO_OP(Divide, ov::op::v1) +OPENVINO_OP(Elu, ov::op::v0) +OPENVINO_OP(Erf, ov::op::v0) +OPENVINO_OP(Equal, ov::op::v1) +OPENVINO_OP(Exp, ov::op::v0) +OPENVINO_OP(ExtractImagePatches, ov::op::v3) +OPENVINO_OP(FakeQuantize, ov::op::v0) +OPENVINO_OP(Floor, ov::op::v0) +OPENVINO_OP(FloorMod, ov::op::v1) +OPENVINO_OP(Gather, ov::op::v1) +OPENVINO_OP(GatherTree, ov::op::v1) +OPENVINO_OP(Greater, ov::op::v1) +OPENVINO_OP(GreaterEqual, ov::op::v1) +OPENVINO_OP(GroupConvolution, ov::op::v1) +OPENVINO_OP(GroupConvolutionBackpropData, ov::op::v1) +OPENVINO_OP(GRN, ov::op::v0) +OPENVINO_OP(HardSigmoid, ov::op::v0) +OPENVINO_OP(Less, ov::op::v1) +OPENVINO_OP(LessEqual, ov::op::v1) +OPENVINO_OP(Log, ov::op::v0) +OPENVINO_OP(LogicalAnd, ov::op::v1) +OPENVINO_OP(LogicalNot, ov::op::v1) +OPENVINO_OP(LogicalOr, ov::op::v1) +OPENVINO_OP(LogicalXor, ov::op::v1) +OPENVINO_OP(LRN, ov::op::v0) +OPENVINO_OP(LSTMCell, ov::op::v4) +OPENVINO_OP(MatMul, ov::op::v0) +OPENVINO_OP(MaxPool, ov::op::v1) +OPENVINO_OP(Maximum, ov::op::v1) +OPENVINO_OP(Minimum, ov::op::v1) +OPENVINO_OP(Mod, ov::op::v1) +OPENVINO_OP(Multiply, ov::op::v1) +OPENVINO_OP(MVN, ov::op::v0) +OPENVINO_OP(Negative, ov::op::v0) +OPENVINO_OP(NormalizeL2, ov::op::v0) +OPENVINO_OP(NotEqual, ov::op::v1) +OPENVINO_OP(OneHot, ov::op::v1) +OPENVINO_OP(PRelu, ov::op::v0) +OPENVINO_OP(PSROIPooling, ov::op::v0) +OPENVINO_OP(Pad, ov::op::v1) +OPENVINO_OP(Parameter, ov::op::v0) +OPENVINO_OP(Power, ov::op::v1) +OPENVINO_OP(PriorBox, ov::op::v0) +OPENVINO_OP(PriorBoxClustered, ov::op::v0) +OPENVINO_OP(Proposal, ov::op::v4) +OPENVINO_OP(Range, ov::op::v4) +OPENVINO_OP(Relu, ov::op::v0) +OPENVINO_OP(ReduceMax, ov::op::v1) +OPENVINO_OP(ReduceLogicalAnd, ov::op::v1) +OPENVINO_OP(ReduceLogicalOr, ov::op::v1) +OPENVINO_OP(ReduceMean, ov::op::v1) +OPENVINO_OP(ReduceMin, ov::op::v1) +OPENVINO_OP(ReduceProd, ov::op::v1) +OPENVINO_OP(ReduceSum, ov::op::v1) +OPENVINO_OP(RegionYolo, ov::op::v0) +OPENVINO_OP(ReorgYolo, ov::op::v0) +OPENVINO_OP(Reshape, ov::op::v1) +OPENVINO_OP(Result, ov::op::v0) +OPENVINO_OP(ReverseSequence, ov::op::v0) +OPENVINO_OP(ROIPooling, ov::op::v0) +OPENVINO_OP(ScatterNDUpdate, ov::op::v3) +OPENVINO_OP(Select, ov::op::v1) +OPENVINO_OP(Selu, ov::op::v0) +OPENVINO_OP(Sign, ov::op::v0) +OPENVINO_OP(Sigmoid, ov::op::v0) +OPENVINO_OP(Sin, ov::op::v0) +OPENVINO_OP(Sinh, ov::op::v0) +OPENVINO_OP(Softmax, ov::op::v1) +OPENVINO_OP(Sqrt, ov::op::v0) +OPENVINO_OP(SpaceToDepth, ov::op::v0) +OPENVINO_OP(Split, ov::op::v1) +OPENVINO_OP(SquaredDifference, ov::op::v0) +OPENVINO_OP(Squeeze, ov::op::v0) +OPENVINO_OP(StridedSlice, ov::op::v1) +OPENVINO_OP(Subtract, ov::op::v1) +OPENVINO_OP(Tan, ov::op::v0) +OPENVINO_OP(Tanh, ov::op::v0) +OPENVINO_OP(TensorIterator, ov::op::v0) +OPENVINO_OP(Tile, ov::op::v0) +OPENVINO_OP(Transpose, ov::op::v1) +OPENVINO_OP(Unsqueeze, ov::op::v0) +OPENVINO_OP(VariadicSplit, ov::op::v1) // New operations added in opset2 -OPENVINO_OP(Gelu, ngraph::op::v0) -OPENVINO_OP(BatchToSpace, ngraph::op::v1) -OPENVINO_OP(SpaceToBatch, ngraph::op::v1) +OPENVINO_OP(Gelu, ov::op::v0) +OPENVINO_OP(BatchToSpace, ov::op::v1) +OPENVINO_OP(SpaceToBatch, ov::op::v1) // New operations added in opset3 -OPENVINO_OP(EmbeddingBagPackedSum, ngraph::op::v3) -OPENVINO_OP(EmbeddingSegmentsSum, ngraph::op::v3) -OPENVINO_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) -OPENVINO_OP(GRUCell, ngraph::op::v3) -OPENVINO_OP(NonZero, ngraph::op::v3) -OPENVINO_OP(RNNCell, ngraph::op::v0) -OPENVINO_OP(ROIAlign, ngraph::op::v3) -OPENVINO_OP(ScatterElementsUpdate, ngraph::op::v3) -OPENVINO_OP(ScatterUpdate, ngraph::op::v3) -OPENVINO_OP(ShuffleChannels, ngraph::op::v0) -OPENVINO_OP(ShapeOf, ngraph::op::v3) -OPENVINO_OP(Assign, ngraph::op::v3) -OPENVINO_OP(ReadValue, ngraph::op::v3) -OPENVINO_OP(TopK, ngraph::op::v3) +OPENVINO_OP(EmbeddingBagPackedSum, ov::op::v3) +OPENVINO_OP(EmbeddingSegmentsSum, ov::op::v3) +OPENVINO_OP(EmbeddingBagOffsetsSum, ov::op::v3) +OPENVINO_OP(GRUCell, ov::op::v3) +OPENVINO_OP(NonZero, ov::op::v3) +OPENVINO_OP(RNNCell, ov::op::v0) +OPENVINO_OP(ROIAlign, ov::op::v3) +OPENVINO_OP(ScatterElementsUpdate, ov::op::v3) +OPENVINO_OP(ScatterUpdate, ov::op::v3) +OPENVINO_OP(ShuffleChannels, ov::op::v0) +OPENVINO_OP(ShapeOf, ov::op::v3) +OPENVINO_OP(Assign, ov::op::v3) +OPENVINO_OP(ReadValue, ov::op::v3) +OPENVINO_OP(TopK, ov::op::v3) // New operations added in opset4 -OPENVINO_OP(Acosh, ngraph::op::v3) -OPENVINO_OP(Asinh, ngraph::op::v3) -OPENVINO_OP(Atanh, ngraph::op::v3) -OPENVINO_OP(CTCLoss, ngraph::op::v4) -OPENVINO_OP(HSwish, ngraph::op::v4) -OPENVINO_OP(Interpolate, ngraph::op::v4) -OPENVINO_OP(Mish, ngraph::op::v4) -OPENVINO_OP(NonMaxSuppression, ngraph::op::v4) -OPENVINO_OP(ReduceL1, ngraph::op::v4) -OPENVINO_OP(ReduceL2, ngraph::op::v4) -OPENVINO_OP(SoftPlus, ngraph::op::v4) -OPENVINO_OP(Swish, ngraph::op::v4) +OPENVINO_OP(Acosh, ov::op::v3) +OPENVINO_OP(Asinh, ov::op::v3) +OPENVINO_OP(Atanh, ov::op::v3) +OPENVINO_OP(CTCLoss, ov::op::v4) +OPENVINO_OP(HSwish, ov::op::v4) +OPENVINO_OP(Interpolate, ov::op::v4) +OPENVINO_OP(Mish, ov::op::v4) +OPENVINO_OP(NonMaxSuppression, ov::op::v4) +OPENVINO_OP(ReduceL1, ov::op::v4) +OPENVINO_OP(ReduceL2, ov::op::v4) +OPENVINO_OP(SoftPlus, ov::op::v4) +OPENVINO_OP(Swish, ov::op::v4) diff --git a/ngraph/core/include/openvino/opsets/opset5_tbl.hpp b/ngraph/core/include/openvino/opsets/opset5_tbl.hpp index bde996f32521fe..beeeef5395bd3d 100644 --- a/ngraph/core/include/openvino/opsets/opset5_tbl.hpp +++ b/ngraph/core/include/openvino/opsets/opset5_tbl.hpp @@ -7,156 +7,156 @@ # define OPENVINO_OP(x, y) #endif -OPENVINO_OP(Abs, ngraph::op::v0) -OPENVINO_OP(Acos, ngraph::op::v0) -OPENVINO_OP(Add, ngraph::op::v1) -OPENVINO_OP(Asin, ngraph::op::v0) -OPENVINO_OP(Atan, ngraph::op::v0) -OPENVINO_OP(AvgPool, ngraph::op::v1) -OPENVINO_OP(BatchNormInference, ngraph::op::v5) -OPENVINO_OP(BinaryConvolution, ngraph::op::v1) -OPENVINO_OP(Broadcast, ngraph::op::v3) -OPENVINO_OP(Bucketize, ngraph::op::v3) -OPENVINO_OP(CTCGreedyDecoder, ngraph::op::v0) -OPENVINO_OP(Ceiling, ngraph::op::v0) -OPENVINO_OP(Clamp, ngraph::op::v0) -OPENVINO_OP(Concat, ngraph::op::v0) -OPENVINO_OP(Constant, ngraph::op) -OPENVINO_OP(Convert, ngraph::op::v0) -OPENVINO_OP(ConvertLike, ngraph::op::v1) -OPENVINO_OP(Convolution, ngraph::op::v1) -OPENVINO_OP(ConvolutionBackpropData, ngraph::op::v1) -OPENVINO_OP(Cos, ngraph::op::v0) -OPENVINO_OP(Cosh, ngraph::op::v0) -OPENVINO_OP(CumSum, ngraph::op::v0) -OPENVINO_OP(DeformableConvolution, ngraph::op::v1) -OPENVINO_OP(DeformablePSROIPooling, ngraph::op::v1) -OPENVINO_OP(DepthToSpace, ngraph::op::v0) -OPENVINO_OP(DetectionOutput, ngraph::op::v0) -OPENVINO_OP(Divide, ngraph::op::v1) -OPENVINO_OP(Elu, ngraph::op::v0) -OPENVINO_OP(Erf, ngraph::op::v0) -OPENVINO_OP(Equal, ngraph::op::v1) -OPENVINO_OP(Exp, ngraph::op::v0) -OPENVINO_OP(ExtractImagePatches, ngraph::op::v3) -OPENVINO_OP(FakeQuantize, ngraph::op::v0) -OPENVINO_OP(Floor, ngraph::op::v0) -OPENVINO_OP(FloorMod, ngraph::op::v1) -OPENVINO_OP(Gather, ngraph::op::v1) -OPENVINO_OP(GatherTree, ngraph::op::v1) -OPENVINO_OP(Greater, ngraph::op::v1) -OPENVINO_OP(GreaterEqual, ngraph::op::v1) -OPENVINO_OP(GroupConvolution, ngraph::op::v1) -OPENVINO_OP(GroupConvolutionBackpropData, ngraph::op::v1) -OPENVINO_OP(GRN, ngraph::op::v0) -OPENVINO_OP(HardSigmoid, ngraph::op::v0) -OPENVINO_OP(Less, ngraph::op::v1) -OPENVINO_OP(LessEqual, ngraph::op::v1) -OPENVINO_OP(Log, ngraph::op::v0) -OPENVINO_OP(LogicalAnd, ngraph::op::v1) -OPENVINO_OP(LogicalNot, ngraph::op::v1) -OPENVINO_OP(LogicalOr, ngraph::op::v1) -OPENVINO_OP(LogicalXor, ngraph::op::v1) -OPENVINO_OP(LRN, ngraph::op::v0) -OPENVINO_OP(LSTMCell, ngraph::op::v4) -OPENVINO_OP(MatMul, ngraph::op::v0) -OPENVINO_OP(MaxPool, ngraph::op::v1) -OPENVINO_OP(Maximum, ngraph::op::v1) -OPENVINO_OP(Minimum, ngraph::op::v1) -OPENVINO_OP(Mod, ngraph::op::v1) -OPENVINO_OP(Multiply, ngraph::op::v1) -OPENVINO_OP(MVN, ngraph::op::v0) -OPENVINO_OP(Negative, ngraph::op::v0) -OPENVINO_OP(NormalizeL2, ngraph::op::v0) -OPENVINO_OP(NotEqual, ngraph::op::v1) -OPENVINO_OP(OneHot, ngraph::op::v1) -OPENVINO_OP(PRelu, ngraph::op::v0) -OPENVINO_OP(PSROIPooling, ngraph::op::v0) -OPENVINO_OP(Pad, ngraph::op::v1) -OPENVINO_OP(Parameter, ngraph::op::v0) -OPENVINO_OP(Power, ngraph::op::v1) -OPENVINO_OP(PriorBox, ngraph::op::v0) -OPENVINO_OP(PriorBoxClustered, ngraph::op::v0) -OPENVINO_OP(Proposal, ngraph::op::v4) -OPENVINO_OP(Range, ngraph::op::v4) -OPENVINO_OP(Relu, ngraph::op::v0) -OPENVINO_OP(ReduceMax, ngraph::op::v1) -OPENVINO_OP(ReduceLogicalAnd, ngraph::op::v1) -OPENVINO_OP(ReduceLogicalOr, ngraph::op::v1) -OPENVINO_OP(ReduceMean, ngraph::op::v1) -OPENVINO_OP(ReduceMin, ngraph::op::v1) -OPENVINO_OP(ReduceProd, ngraph::op::v1) -OPENVINO_OP(ReduceSum, ngraph::op::v1) -OPENVINO_OP(RegionYolo, ngraph::op::v0) -OPENVINO_OP(ReorgYolo, ngraph::op::v0) -OPENVINO_OP(Reshape, ngraph::op::v1) -OPENVINO_OP(Result, ngraph::op::v0) -OPENVINO_OP(ReverseSequence, ngraph::op::v0) -OPENVINO_OP(ROIPooling, ngraph::op::v0) -OPENVINO_OP(ScatterNDUpdate, ngraph::op::v3) -OPENVINO_OP(Select, ngraph::op::v1) -OPENVINO_OP(Selu, ngraph::op::v0) -OPENVINO_OP(Sign, ngraph::op::v0) -OPENVINO_OP(Sigmoid, ngraph::op::v0) -OPENVINO_OP(Sin, ngraph::op::v0) -OPENVINO_OP(Sinh, ngraph::op::v0) -OPENVINO_OP(Softmax, ngraph::op::v1) -OPENVINO_OP(Sqrt, ngraph::op::v0) -OPENVINO_OP(SpaceToDepth, ngraph::op::v0) -OPENVINO_OP(Split, ngraph::op::v1) -OPENVINO_OP(SquaredDifference, ngraph::op::v0) -OPENVINO_OP(Squeeze, ngraph::op::v0) -OPENVINO_OP(StridedSlice, ngraph::op::v1) -OPENVINO_OP(Subtract, ngraph::op::v1) -OPENVINO_OP(Tan, ngraph::op::v0) -OPENVINO_OP(Tanh, ngraph::op::v0) -OPENVINO_OP(TensorIterator, ngraph::op::v0) -OPENVINO_OP(Tile, ngraph::op::v0) -OPENVINO_OP(Transpose, ngraph::op::v1) -OPENVINO_OP(Unsqueeze, ngraph::op::v0) -OPENVINO_OP(VariadicSplit, ngraph::op::v1) +OPENVINO_OP(Abs, ov::op::v0) +OPENVINO_OP(Acos, ov::op::v0) +OPENVINO_OP(Add, ov::op::v1) +OPENVINO_OP(Asin, ov::op::v0) +OPENVINO_OP(Atan, ov::op::v0) +OPENVINO_OP(AvgPool, ov::op::v1) +OPENVINO_OP(BatchNormInference, ov::op::v5) +OPENVINO_OP(BinaryConvolution, ov::op::v1) +OPENVINO_OP(Broadcast, ov::op::v3) +OPENVINO_OP(Bucketize, ov::op::v3) +OPENVINO_OP(CTCGreedyDecoder, ov::op::v0) +OPENVINO_OP(Ceiling, ov::op::v0) +OPENVINO_OP(Clamp, ov::op::v0) +OPENVINO_OP(Concat, ov::op::v0) +OPENVINO_OP(Constant, ov::op::v0) +OPENVINO_OP(Convert, ov::op::v0) +OPENVINO_OP(ConvertLike, ov::op::v1) +OPENVINO_OP(Convolution, ov::op::v1) +OPENVINO_OP(ConvolutionBackpropData, ov::op::v1) +OPENVINO_OP(Cos, ov::op::v0) +OPENVINO_OP(Cosh, ov::op::v0) +OPENVINO_OP(CumSum, ov::op::v0) +OPENVINO_OP(DeformableConvolution, ov::op::v1) +OPENVINO_OP(DeformablePSROIPooling, ov::op::v1) +OPENVINO_OP(DepthToSpace, ov::op::v0) +OPENVINO_OP(DetectionOutput, ov::op::v0) +OPENVINO_OP(Divide, ov::op::v1) +OPENVINO_OP(Elu, ov::op::v0) +OPENVINO_OP(Erf, ov::op::v0) +OPENVINO_OP(Equal, ov::op::v1) +OPENVINO_OP(Exp, ov::op::v0) +OPENVINO_OP(ExtractImagePatches, ov::op::v3) +OPENVINO_OP(FakeQuantize, ov::op::v0) +OPENVINO_OP(Floor, ov::op::v0) +OPENVINO_OP(FloorMod, ov::op::v1) +OPENVINO_OP(Gather, ov::op::v1) +OPENVINO_OP(GatherTree, ov::op::v1) +OPENVINO_OP(Greater, ov::op::v1) +OPENVINO_OP(GreaterEqual, ov::op::v1) +OPENVINO_OP(GroupConvolution, ov::op::v1) +OPENVINO_OP(GroupConvolutionBackpropData, ov::op::v1) +OPENVINO_OP(GRN, ov::op::v0) +OPENVINO_OP(HardSigmoid, ov::op::v0) +OPENVINO_OP(Less, ov::op::v1) +OPENVINO_OP(LessEqual, ov::op::v1) +OPENVINO_OP(Log, ov::op::v0) +OPENVINO_OP(LogicalAnd, ov::op::v1) +OPENVINO_OP(LogicalNot, ov::op::v1) +OPENVINO_OP(LogicalOr, ov::op::v1) +OPENVINO_OP(LogicalXor, ov::op::v1) +OPENVINO_OP(LRN, ov::op::v0) +OPENVINO_OP(LSTMCell, ov::op::v4) +OPENVINO_OP(MatMul, ov::op::v0) +OPENVINO_OP(MaxPool, ov::op::v1) +OPENVINO_OP(Maximum, ov::op::v1) +OPENVINO_OP(Minimum, ov::op::v1) +OPENVINO_OP(Mod, ov::op::v1) +OPENVINO_OP(Multiply, ov::op::v1) +OPENVINO_OP(MVN, ov::op::v0) +OPENVINO_OP(Negative, ov::op::v0) +OPENVINO_OP(NormalizeL2, ov::op::v0) +OPENVINO_OP(NotEqual, ov::op::v1) +OPENVINO_OP(OneHot, ov::op::v1) +OPENVINO_OP(PRelu, ov::op::v0) +OPENVINO_OP(PSROIPooling, ov::op::v0) +OPENVINO_OP(Pad, ov::op::v1) +OPENVINO_OP(Parameter, ov::op::v0) +OPENVINO_OP(Power, ov::op::v1) +OPENVINO_OP(PriorBox, ov::op::v0) +OPENVINO_OP(PriorBoxClustered, ov::op::v0) +OPENVINO_OP(Proposal, ov::op::v4) +OPENVINO_OP(Range, ov::op::v4) +OPENVINO_OP(Relu, ov::op::v0) +OPENVINO_OP(ReduceMax, ov::op::v1) +OPENVINO_OP(ReduceLogicalAnd, ov::op::v1) +OPENVINO_OP(ReduceLogicalOr, ov::op::v1) +OPENVINO_OP(ReduceMean, ov::op::v1) +OPENVINO_OP(ReduceMin, ov::op::v1) +OPENVINO_OP(ReduceProd, ov::op::v1) +OPENVINO_OP(ReduceSum, ov::op::v1) +OPENVINO_OP(RegionYolo, ov::op::v0) +OPENVINO_OP(ReorgYolo, ov::op::v0) +OPENVINO_OP(Reshape, ov::op::v1) +OPENVINO_OP(Result, ov::op::v0) +OPENVINO_OP(ReverseSequence, ov::op::v0) +OPENVINO_OP(ROIPooling, ov::op::v0) +OPENVINO_OP(ScatterNDUpdate, ov::op::v3) +OPENVINO_OP(Select, ov::op::v1) +OPENVINO_OP(Selu, ov::op::v0) +OPENVINO_OP(Sign, ov::op::v0) +OPENVINO_OP(Sigmoid, ov::op::v0) +OPENVINO_OP(Sin, ov::op::v0) +OPENVINO_OP(Sinh, ov::op::v0) +OPENVINO_OP(Softmax, ov::op::v1) +OPENVINO_OP(Sqrt, ov::op::v0) +OPENVINO_OP(SpaceToDepth, ov::op::v0) +OPENVINO_OP(Split, ov::op::v1) +OPENVINO_OP(SquaredDifference, ov::op::v0) +OPENVINO_OP(Squeeze, ov::op::v0) +OPENVINO_OP(StridedSlice, ov::op::v1) +OPENVINO_OP(Subtract, ov::op::v1) +OPENVINO_OP(Tan, ov::op::v0) +OPENVINO_OP(Tanh, ov::op::v0) +OPENVINO_OP(TensorIterator, ov::op::v0) +OPENVINO_OP(Tile, ov::op::v0) +OPENVINO_OP(Transpose, ov::op::v1) +OPENVINO_OP(Unsqueeze, ov::op::v0) +OPENVINO_OP(VariadicSplit, ov::op::v1) // New operations added in opset2 -OPENVINO_OP(Gelu, ngraph::op::v0) -OPENVINO_OP(BatchToSpace, ngraph::op::v1) -OPENVINO_OP(SpaceToBatch, ngraph::op::v1) +OPENVINO_OP(Gelu, ov::op::v0) +OPENVINO_OP(BatchToSpace, ov::op::v1) +OPENVINO_OP(SpaceToBatch, ov::op::v1) // New operations added in opset3 -OPENVINO_OP(EmbeddingBagPackedSum, ngraph::op::v3) -OPENVINO_OP(EmbeddingSegmentsSum, ngraph::op::v3) -OPENVINO_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) -OPENVINO_OP(GRUCell, ngraph::op::v3) -OPENVINO_OP(NonZero, ngraph::op::v3) -OPENVINO_OP(RNNCell, ngraph::op::v0) -OPENVINO_OP(ROIAlign, ngraph::op::v3) -OPENVINO_OP(ScatterElementsUpdate, ngraph::op::v3) -OPENVINO_OP(ScatterUpdate, ngraph::op::v3) -OPENVINO_OP(ShuffleChannels, ngraph::op::v0) -OPENVINO_OP(ShapeOf, ngraph::op::v3) -OPENVINO_OP(Assign, ngraph::op::v3) -OPENVINO_OP(ReadValue, ngraph::op::v3) -OPENVINO_OP(TopK, ngraph::op::v3) +OPENVINO_OP(EmbeddingBagPackedSum, ov::op::v3) +OPENVINO_OP(EmbeddingSegmentsSum, ov::op::v3) +OPENVINO_OP(EmbeddingBagOffsetsSum, ov::op::v3) +OPENVINO_OP(GRUCell, ov::op::v3) +OPENVINO_OP(NonZero, ov::op::v3) +OPENVINO_OP(RNNCell, ov::op::v0) +OPENVINO_OP(ROIAlign, ov::op::v3) +OPENVINO_OP(ScatterElementsUpdate, ov::op::v3) +OPENVINO_OP(ScatterUpdate, ov::op::v3) +OPENVINO_OP(ShuffleChannels, ov::op::v0) +OPENVINO_OP(ShapeOf, ov::op::v3) +OPENVINO_OP(Assign, ov::op::v3) +OPENVINO_OP(ReadValue, ov::op::v3) +OPENVINO_OP(TopK, ov::op::v3) // New operations added in opset4 -OPENVINO_OP(Acosh, ngraph::op::v3) -OPENVINO_OP(Asinh, ngraph::op::v3) -OPENVINO_OP(Atanh, ngraph::op::v3) -OPENVINO_OP(CTCLoss, ngraph::op::v4) -OPENVINO_OP(HSwish, ngraph::op::v4) -OPENVINO_OP(Interpolate, ngraph::op::v4) -OPENVINO_OP(Mish, ngraph::op::v4) -OPENVINO_OP(ReduceL1, ngraph::op::v4) -OPENVINO_OP(ReduceL2, ngraph::op::v4) -OPENVINO_OP(SoftPlus, ngraph::op::v4) -OPENVINO_OP(Swish, ngraph::op::v4) +OPENVINO_OP(Acosh, ov::op::v3) +OPENVINO_OP(Asinh, ov::op::v3) +OPENVINO_OP(Atanh, ov::op::v3) +OPENVINO_OP(CTCLoss, ov::op::v4) +OPENVINO_OP(HSwish, ov::op::v4) +OPENVINO_OP(Interpolate, ov::op::v4) +OPENVINO_OP(Mish, ov::op::v4) +OPENVINO_OP(ReduceL1, ov::op::v4) +OPENVINO_OP(ReduceL2, ov::op::v4) +OPENVINO_OP(SoftPlus, ov::op::v4) +OPENVINO_OP(Swish, ov::op::v4) // New operations added in opset5 -OPENVINO_OP(GatherND, ngraph::op::v5) -OPENVINO_OP(GRUSequence, ngraph::op::v5) -OPENVINO_OP(HSigmoid, ngraph::op::v5) -OPENVINO_OP(LogSoftmax, ngraph::op::v5) -OPENVINO_OP(Loop, ngraph::op::v5) -OPENVINO_OP(LSTMSequence, ngraph::op::v5) -OPENVINO_OP(NonMaxSuppression, ngraph::op::v5) -OPENVINO_OP(RNNSequence, ngraph::op::v5) -OPENVINO_OP(Round, ngraph::op::v5) +OPENVINO_OP(GatherND, ov::op::v5) +OPENVINO_OP(GRUSequence, ov::op::v5) +OPENVINO_OP(HSigmoid, ov::op::v5) +OPENVINO_OP(LogSoftmax, ov::op::v5) +OPENVINO_OP(Loop, ov::op::v5) +OPENVINO_OP(LSTMSequence, ov::op::v5) +OPENVINO_OP(NonMaxSuppression, ov::op::v5) +OPENVINO_OP(RNNSequence, ov::op::v5) +OPENVINO_OP(Round, ov::op::v5) diff --git a/ngraph/core/include/openvino/opsets/opset6_tbl.hpp b/ngraph/core/include/openvino/opsets/opset6_tbl.hpp index 27d04eacd425e9..eb23908c61da34 100644 --- a/ngraph/core/include/openvino/opsets/opset6_tbl.hpp +++ b/ngraph/core/include/openvino/opsets/opset6_tbl.hpp @@ -7,165 +7,165 @@ # define OPENVINO_OP(x, y) #endif -OPENVINO_OP(Abs, ngraph::op::v0) -OPENVINO_OP(Acos, ngraph::op::v0) -OPENVINO_OP(Add, ngraph::op::v1) -OPENVINO_OP(Asin, ngraph::op::v0) -OPENVINO_OP(Atan, ngraph::op::v0) -OPENVINO_OP(AvgPool, ngraph::op::v1) -OPENVINO_OP(BatchNormInference, ngraph::op::v5) -OPENVINO_OP(BinaryConvolution, ngraph::op::v1) -OPENVINO_OP(Broadcast, ngraph::op::v3) -OPENVINO_OP(Bucketize, ngraph::op::v3) -OPENVINO_OP(CTCGreedyDecoder, ngraph::op::v0) -OPENVINO_OP(Ceiling, ngraph::op::v0) -OPENVINO_OP(Clamp, ngraph::op::v0) -OPENVINO_OP(Concat, ngraph::op::v0) -OPENVINO_OP(Constant, ngraph::op) -OPENVINO_OP(Convert, ngraph::op::v0) -OPENVINO_OP(ConvertLike, ngraph::op::v1) -OPENVINO_OP(Convolution, ngraph::op::v1) -OPENVINO_OP(ConvolutionBackpropData, ngraph::op::v1) -OPENVINO_OP(Cos, ngraph::op::v0) -OPENVINO_OP(Cosh, ngraph::op::v0) -OPENVINO_OP(CumSum, ngraph::op::v0) -OPENVINO_OP(DeformableConvolution, ngraph::op::v1) -OPENVINO_OP(DeformablePSROIPooling, ngraph::op::v1) -OPENVINO_OP(DepthToSpace, ngraph::op::v0) -OPENVINO_OP(DetectionOutput, ngraph::op::v0) -OPENVINO_OP(Divide, ngraph::op::v1) -OPENVINO_OP(Elu, ngraph::op::v0) -OPENVINO_OP(Erf, ngraph::op::v0) -OPENVINO_OP(Equal, ngraph::op::v1) -OPENVINO_OP(Exp, ngraph::op::v0) -OPENVINO_OP(ExtractImagePatches, ngraph::op::v3) -OPENVINO_OP(FakeQuantize, ngraph::op::v0) -OPENVINO_OP(Floor, ngraph::op::v0) -OPENVINO_OP(FloorMod, ngraph::op::v1) -OPENVINO_OP(Gather, ngraph::op::v1) -OPENVINO_OP(GatherTree, ngraph::op::v1) -OPENVINO_OP(Greater, ngraph::op::v1) -OPENVINO_OP(GreaterEqual, ngraph::op::v1) -OPENVINO_OP(GroupConvolution, ngraph::op::v1) -OPENVINO_OP(GroupConvolutionBackpropData, ngraph::op::v1) -OPENVINO_OP(GRN, ngraph::op::v0) -OPENVINO_OP(HardSigmoid, ngraph::op::v0) -OPENVINO_OP(Less, ngraph::op::v1) -OPENVINO_OP(LessEqual, ngraph::op::v1) -OPENVINO_OP(Log, ngraph::op::v0) -OPENVINO_OP(LogicalAnd, ngraph::op::v1) -OPENVINO_OP(LogicalNot, ngraph::op::v1) -OPENVINO_OP(LogicalOr, ngraph::op::v1) -OPENVINO_OP(LogicalXor, ngraph::op::v1) -OPENVINO_OP(LRN, ngraph::op::v0) -OPENVINO_OP(LSTMCell, ngraph::op::v4) -OPENVINO_OP(MatMul, ngraph::op::v0) -OPENVINO_OP(MaxPool, ngraph::op::v1) -OPENVINO_OP(Maximum, ngraph::op::v1) -OPENVINO_OP(Minimum, ngraph::op::v1) -OPENVINO_OP(Mod, ngraph::op::v1) -OPENVINO_OP(Multiply, ngraph::op::v1) -OPENVINO_OP(Negative, ngraph::op::v0) -OPENVINO_OP(NormalizeL2, ngraph::op::v0) -OPENVINO_OP(NotEqual, ngraph::op::v1) -OPENVINO_OP(OneHot, ngraph::op::v1) -OPENVINO_OP(PRelu, ngraph::op::v0) -OPENVINO_OP(PSROIPooling, ngraph::op::v0) -OPENVINO_OP(Pad, ngraph::op::v1) -OPENVINO_OP(Parameter, ngraph::op::v0) -OPENVINO_OP(Power, ngraph::op::v1) -OPENVINO_OP(PriorBox, ngraph::op::v0) -OPENVINO_OP(PriorBoxClustered, ngraph::op::v0) -OPENVINO_OP(Proposal, ngraph::op::v4) -OPENVINO_OP(Range, ngraph::op::v4) -OPENVINO_OP(Relu, ngraph::op::v0) -OPENVINO_OP(ReduceMax, ngraph::op::v1) -OPENVINO_OP(ReduceLogicalAnd, ngraph::op::v1) -OPENVINO_OP(ReduceLogicalOr, ngraph::op::v1) -OPENVINO_OP(ReduceMean, ngraph::op::v1) -OPENVINO_OP(ReduceMin, ngraph::op::v1) -OPENVINO_OP(ReduceProd, ngraph::op::v1) -OPENVINO_OP(ReduceSum, ngraph::op::v1) -OPENVINO_OP(RegionYolo, ngraph::op::v0) -OPENVINO_OP(ReorgYolo, ngraph::op::v0) -OPENVINO_OP(Reshape, ngraph::op::v1) -OPENVINO_OP(Result, ngraph::op::v0) -OPENVINO_OP(ReverseSequence, ngraph::op::v0) -OPENVINO_OP(ROIPooling, ngraph::op::v0) -OPENVINO_OP(ScatterNDUpdate, ngraph::op::v3) -OPENVINO_OP(Select, ngraph::op::v1) -OPENVINO_OP(Selu, ngraph::op::v0) -OPENVINO_OP(Sign, ngraph::op::v0) -OPENVINO_OP(Sigmoid, ngraph::op::v0) -OPENVINO_OP(Sin, ngraph::op::v0) -OPENVINO_OP(Sinh, ngraph::op::v0) -OPENVINO_OP(Softmax, ngraph::op::v1) -OPENVINO_OP(Sqrt, ngraph::op::v0) -OPENVINO_OP(SpaceToDepth, ngraph::op::v0) -OPENVINO_OP(Split, ngraph::op::v1) -OPENVINO_OP(SquaredDifference, ngraph::op::v0) -OPENVINO_OP(Squeeze, ngraph::op::v0) -OPENVINO_OP(StridedSlice, ngraph::op::v1) -OPENVINO_OP(Subtract, ngraph::op::v1) -OPENVINO_OP(Tan, ngraph::op::v0) -OPENVINO_OP(Tanh, ngraph::op::v0) -OPENVINO_OP(TensorIterator, ngraph::op::v0) -OPENVINO_OP(Tile, ngraph::op::v0) -OPENVINO_OP(Transpose, ngraph::op::v1) -OPENVINO_OP(Unsqueeze, ngraph::op::v0) -OPENVINO_OP(VariadicSplit, ngraph::op::v1) +OPENVINO_OP(Abs, ov::op::v0) +OPENVINO_OP(Acos, ov::op::v0) +OPENVINO_OP(Add, ov::op::v1) +OPENVINO_OP(Asin, ov::op::v0) +OPENVINO_OP(Atan, ov::op::v0) +OPENVINO_OP(AvgPool, ov::op::v1) +OPENVINO_OP(BatchNormInference, ov::op::v5) +OPENVINO_OP(BinaryConvolution, ov::op::v1) +OPENVINO_OP(Broadcast, ov::op::v3) +OPENVINO_OP(Bucketize, ov::op::v3) +OPENVINO_OP(CTCGreedyDecoder, ov::op::v0) +OPENVINO_OP(Ceiling, ov::op::v0) +OPENVINO_OP(Clamp, ov::op::v0) +OPENVINO_OP(Concat, ov::op::v0) +OPENVINO_OP(Constant, ov::op::v0) +OPENVINO_OP(Convert, ov::op::v0) +OPENVINO_OP(ConvertLike, ov::op::v1) +OPENVINO_OP(Convolution, ov::op::v1) +OPENVINO_OP(ConvolutionBackpropData, ov::op::v1) +OPENVINO_OP(Cos, ov::op::v0) +OPENVINO_OP(Cosh, ov::op::v0) +OPENVINO_OP(CumSum, ov::op::v0) +OPENVINO_OP(DeformableConvolution, ov::op::v1) +OPENVINO_OP(DeformablePSROIPooling, ov::op::v1) +OPENVINO_OP(DepthToSpace, ov::op::v0) +OPENVINO_OP(DetectionOutput, ov::op::v0) +OPENVINO_OP(Divide, ov::op::v1) +OPENVINO_OP(Elu, ov::op::v0) +OPENVINO_OP(Erf, ov::op::v0) +OPENVINO_OP(Equal, ov::op::v1) +OPENVINO_OP(Exp, ov::op::v0) +OPENVINO_OP(ExtractImagePatches, ov::op::v3) +OPENVINO_OP(FakeQuantize, ov::op::v0) +OPENVINO_OP(Floor, ov::op::v0) +OPENVINO_OP(FloorMod, ov::op::v1) +OPENVINO_OP(Gather, ov::op::v1) +OPENVINO_OP(GatherTree, ov::op::v1) +OPENVINO_OP(Greater, ov::op::v1) +OPENVINO_OP(GreaterEqual, ov::op::v1) +OPENVINO_OP(GroupConvolution, ov::op::v1) +OPENVINO_OP(GroupConvolutionBackpropData, ov::op::v1) +OPENVINO_OP(GRN, ov::op::v0) +OPENVINO_OP(HardSigmoid, ov::op::v0) +OPENVINO_OP(Less, ov::op::v1) +OPENVINO_OP(LessEqual, ov::op::v1) +OPENVINO_OP(Log, ov::op::v0) +OPENVINO_OP(LogicalAnd, ov::op::v1) +OPENVINO_OP(LogicalNot, ov::op::v1) +OPENVINO_OP(LogicalOr, ov::op::v1) +OPENVINO_OP(LogicalXor, ov::op::v1) +OPENVINO_OP(LRN, ov::op::v0) +OPENVINO_OP(LSTMCell, ov::op::v4) +OPENVINO_OP(MatMul, ov::op::v0) +OPENVINO_OP(MaxPool, ov::op::v1) +OPENVINO_OP(Maximum, ov::op::v1) +OPENVINO_OP(Minimum, ov::op::v1) +OPENVINO_OP(Mod, ov::op::v1) +OPENVINO_OP(Multiply, ov::op::v1) +OPENVINO_OP(Negative, ov::op::v0) +OPENVINO_OP(NormalizeL2, ov::op::v0) +OPENVINO_OP(NotEqual, ov::op::v1) +OPENVINO_OP(OneHot, ov::op::v1) +OPENVINO_OP(PRelu, ov::op::v0) +OPENVINO_OP(PSROIPooling, ov::op::v0) +OPENVINO_OP(Pad, ov::op::v1) +OPENVINO_OP(Parameter, ov::op::v0) +OPENVINO_OP(Power, ov::op::v1) +OPENVINO_OP(PriorBox, ov::op::v0) +OPENVINO_OP(PriorBoxClustered, ov::op::v0) +OPENVINO_OP(Proposal, ov::op::v4) +OPENVINO_OP(Range, ov::op::v4) +OPENVINO_OP(Relu, ov::op::v0) +OPENVINO_OP(ReduceMax, ov::op::v1) +OPENVINO_OP(ReduceLogicalAnd, ov::op::v1) +OPENVINO_OP(ReduceLogicalOr, ov::op::v1) +OPENVINO_OP(ReduceMean, ov::op::v1) +OPENVINO_OP(ReduceMin, ov::op::v1) +OPENVINO_OP(ReduceProd, ov::op::v1) +OPENVINO_OP(ReduceSum, ov::op::v1) +OPENVINO_OP(RegionYolo, ov::op::v0) +OPENVINO_OP(ReorgYolo, ov::op::v0) +OPENVINO_OP(Reshape, ov::op::v1) +OPENVINO_OP(Result, ov::op::v0) +OPENVINO_OP(ReverseSequence, ov::op::v0) +OPENVINO_OP(ROIPooling, ov::op::v0) +OPENVINO_OP(ScatterNDUpdate, ov::op::v3) +OPENVINO_OP(Select, ov::op::v1) +OPENVINO_OP(Selu, ov::op::v0) +OPENVINO_OP(Sign, ov::op::v0) +OPENVINO_OP(Sigmoid, ov::op::v0) +OPENVINO_OP(Sin, ov::op::v0) +OPENVINO_OP(Sinh, ov::op::v0) +OPENVINO_OP(Softmax, ov::op::v1) +OPENVINO_OP(Sqrt, ov::op::v0) +OPENVINO_OP(SpaceToDepth, ov::op::v0) +OPENVINO_OP(Split, ov::op::v1) +OPENVINO_OP(SquaredDifference, ov::op::v0) +OPENVINO_OP(Squeeze, ov::op::v0) +OPENVINO_OP(StridedSlice, ov::op::v1) +OPENVINO_OP(Subtract, ov::op::v1) +OPENVINO_OP(Tan, ov::op::v0) +OPENVINO_OP(Tanh, ov::op::v0) +OPENVINO_OP(TensorIterator, ov::op::v0) +OPENVINO_OP(Tile, ov::op::v0) +OPENVINO_OP(Transpose, ov::op::v1) +OPENVINO_OP(Unsqueeze, ov::op::v0) +OPENVINO_OP(VariadicSplit, ov::op::v1) // New operations added in opset2 -OPENVINO_OP(Gelu, ngraph::op::v0) -OPENVINO_OP(BatchToSpace, ngraph::op::v1) -OPENVINO_OP(SpaceToBatch, ngraph::op::v1) +OPENVINO_OP(Gelu, ov::op::v0) +OPENVINO_OP(BatchToSpace, ov::op::v1) +OPENVINO_OP(SpaceToBatch, ov::op::v1) // New operations added in opset3 -OPENVINO_OP(EmbeddingBagPackedSum, ngraph::op::v3) -OPENVINO_OP(EmbeddingSegmentsSum, ngraph::op::v3) -OPENVINO_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) -OPENVINO_OP(GRUCell, ngraph::op::v3) -OPENVINO_OP(NonZero, ngraph::op::v3) -OPENVINO_OP(RNNCell, ngraph::op::v0) -OPENVINO_OP(ROIAlign, ngraph::op::v3) -OPENVINO_OP(ScatterElementsUpdate, ngraph::op::v3) -OPENVINO_OP(ScatterUpdate, ngraph::op::v3) -OPENVINO_OP(ShuffleChannels, ngraph::op::v0) -OPENVINO_OP(ShapeOf, ngraph::op::v3) -OPENVINO_OP(TopK, ngraph::op::v3) +OPENVINO_OP(EmbeddingBagPackedSum, ov::op::v3) +OPENVINO_OP(EmbeddingSegmentsSum, ov::op::v3) +OPENVINO_OP(EmbeddingBagOffsetsSum, ov::op::v3) +OPENVINO_OP(GRUCell, ov::op::v3) +OPENVINO_OP(NonZero, ov::op::v3) +OPENVINO_OP(RNNCell, ov::op::v0) +OPENVINO_OP(ROIAlign, ov::op::v3) +OPENVINO_OP(ScatterElementsUpdate, ov::op::v3) +OPENVINO_OP(ScatterUpdate, ov::op::v3) +OPENVINO_OP(ShuffleChannels, ov::op::v0) +OPENVINO_OP(ShapeOf, ov::op::v3) +OPENVINO_OP(TopK, ov::op::v3) // New operations added in opset4 -OPENVINO_OP(Acosh, ngraph::op::v3) -OPENVINO_OP(Asinh, ngraph::op::v3) -OPENVINO_OP(Atanh, ngraph::op::v3) -OPENVINO_OP(CTCLoss, ngraph::op::v4) -OPENVINO_OP(HSwish, ngraph::op::v4) -OPENVINO_OP(Interpolate, ngraph::op::v4) -OPENVINO_OP(Mish, ngraph::op::v4) -OPENVINO_OP(ReduceL1, ngraph::op::v4) -OPENVINO_OP(ReduceL2, ngraph::op::v4) -OPENVINO_OP(SoftPlus, ngraph::op::v4) -OPENVINO_OP(Swish, ngraph::op::v4) +OPENVINO_OP(Acosh, ov::op::v3) +OPENVINO_OP(Asinh, ov::op::v3) +OPENVINO_OP(Atanh, ov::op::v3) +OPENVINO_OP(CTCLoss, ov::op::v4) +OPENVINO_OP(HSwish, ov::op::v4) +OPENVINO_OP(Interpolate, ov::op::v4) +OPENVINO_OP(Mish, ov::op::v4) +OPENVINO_OP(ReduceL1, ov::op::v4) +OPENVINO_OP(ReduceL2, ov::op::v4) +OPENVINO_OP(SoftPlus, ov::op::v4) +OPENVINO_OP(Swish, ov::op::v4) // New operations added in opset5 -OPENVINO_OP(GatherND, ngraph::op::v5) -OPENVINO_OP(GRUSequence, ngraph::op::v5) -OPENVINO_OP(HSigmoid, ngraph::op::v5) -OPENVINO_OP(LogSoftmax, ngraph::op::v5) -OPENVINO_OP(Loop, ngraph::op::v5) -OPENVINO_OP(LSTMSequence, ngraph::op::v5) -OPENVINO_OP(NonMaxSuppression, ngraph::op::v5) -OPENVINO_OP(RNNSequence, ngraph::op::v5) -OPENVINO_OP(Round, ngraph::op::v5) +OPENVINO_OP(GatherND, ov::op::v5) +OPENVINO_OP(GRUSequence, ov::op::v5) +OPENVINO_OP(HSigmoid, ov::op::v5) +OPENVINO_OP(LogSoftmax, ov::op::v5) +OPENVINO_OP(Loop, ov::op::v5) +OPENVINO_OP(LSTMSequence, ov::op::v5) +OPENVINO_OP(NonMaxSuppression, ov::op::v5) +OPENVINO_OP(RNNSequence, ov::op::v5) +OPENVINO_OP(Round, ov::op::v5) // New operations added in opset6 -OPENVINO_OP(CTCGreedyDecoderSeqLen, ngraph::op::v6) -OPENVINO_OP(ExperimentalDetectronDetectionOutput, ngraph::op::v6) -OPENVINO_OP(ExperimentalDetectronGenerateProposalsSingleImage, ngraph::op::v6) -OPENVINO_OP(ExperimentalDetectronPriorGridGenerator, ngraph::op::v6) -OPENVINO_OP(ExperimentalDetectronROIFeatureExtractor, ngraph::op::v6) -OPENVINO_OP(ExperimentalDetectronTopKROIs, ngraph::op::v6) -OPENVINO_OP(GatherElements, ngraph::op::v6) -OPENVINO_OP(MVN, ngraph::op::v6) -OPENVINO_OP(Assign, ngraph::op::v6) // new version -OPENVINO_OP(ReadValue, ngraph::op::v6) // new version +OPENVINO_OP(CTCGreedyDecoderSeqLen, ov::op::v6) +OPENVINO_OP(ExperimentalDetectronDetectionOutput, ov::op::v6) +OPENVINO_OP(ExperimentalDetectronGenerateProposalsSingleImage, ov::op::v6) +OPENVINO_OP(ExperimentalDetectronPriorGridGenerator, ov::op::v6) +OPENVINO_OP(ExperimentalDetectronROIFeatureExtractor, ov::op::v6) +OPENVINO_OP(ExperimentalDetectronTopKROIs, ov::op::v6) +OPENVINO_OP(GatherElements, ov::op::v6) +OPENVINO_OP(MVN, ov::op::v6) +OPENVINO_OP(Assign, ov::op::v6) // new version +OPENVINO_OP(ReadValue, ov::op::v6) // new version diff --git a/ngraph/core/include/openvino/opsets/opset7_tbl.hpp b/ngraph/core/include/openvino/opsets/opset7_tbl.hpp index 7804a8dfe7ef13..169a0534dd0cd1 100644 --- a/ngraph/core/include/openvino/opsets/opset7_tbl.hpp +++ b/ngraph/core/include/openvino/opsets/opset7_tbl.hpp @@ -7,171 +7,171 @@ # define OPENVINO_OP(x, y) #endif -OPENVINO_OP(Abs, ngraph::op::v0) -OPENVINO_OP(Acos, ngraph::op::v0) -OPENVINO_OP(Add, ngraph::op::v1) -OPENVINO_OP(Asin, ngraph::op::v0) -OPENVINO_OP(Atan, ngraph::op::v0) -OPENVINO_OP(AvgPool, ngraph::op::v1) -OPENVINO_OP(BatchNormInference, ngraph::op::v5) -OPENVINO_OP(BinaryConvolution, ngraph::op::v1) -OPENVINO_OP(Broadcast, ngraph::op::v3) -OPENVINO_OP(Bucketize, ngraph::op::v3) -OPENVINO_OP(CTCGreedyDecoder, ngraph::op::v0) -OPENVINO_OP(Ceiling, ngraph::op::v0) -OPENVINO_OP(Clamp, ngraph::op::v0) -OPENVINO_OP(Concat, ngraph::op::v0) -OPENVINO_OP(Constant, ngraph::op) -OPENVINO_OP(Convert, ngraph::op::v0) -OPENVINO_OP(ConvertLike, ngraph::op::v1) -OPENVINO_OP(Convolution, ngraph::op::v1) -OPENVINO_OP(ConvolutionBackpropData, ngraph::op::v1) -OPENVINO_OP(Cos, ngraph::op::v0) -OPENVINO_OP(Cosh, ngraph::op::v0) -OPENVINO_OP(CumSum, ngraph::op::v0) -OPENVINO_OP(DeformableConvolution, ngraph::op::v1) -OPENVINO_OP(DeformablePSROIPooling, ngraph::op::v1) -OPENVINO_OP(DepthToSpace, ngraph::op::v0) -OPENVINO_OP(DetectionOutput, ngraph::op::v0) -OPENVINO_OP(Divide, ngraph::op::v1) -OPENVINO_OP(Elu, ngraph::op::v0) -OPENVINO_OP(Erf, ngraph::op::v0) -OPENVINO_OP(Equal, ngraph::op::v1) -OPENVINO_OP(Exp, ngraph::op::v0) -OPENVINO_OP(ExtractImagePatches, ngraph::op::v3) -OPENVINO_OP(FakeQuantize, ngraph::op::v0) -OPENVINO_OP(Floor, ngraph::op::v0) -OPENVINO_OP(FloorMod, ngraph::op::v1) -OPENVINO_OP(Gather, ngraph::op::v7) -OPENVINO_OP(GatherTree, ngraph::op::v1) -OPENVINO_OP(Greater, ngraph::op::v1) -OPENVINO_OP(GreaterEqual, ngraph::op::v1) -OPENVINO_OP(GroupConvolution, ngraph::op::v1) -OPENVINO_OP(GroupConvolutionBackpropData, ngraph::op::v1) -OPENVINO_OP(GRN, ngraph::op::v0) -OPENVINO_OP(HardSigmoid, ngraph::op::v0) -OPENVINO_OP(Less, ngraph::op::v1) -OPENVINO_OP(LessEqual, ngraph::op::v1) -OPENVINO_OP(Log, ngraph::op::v0) -OPENVINO_OP(LogicalAnd, ngraph::op::v1) -OPENVINO_OP(LogicalNot, ngraph::op::v1) -OPENVINO_OP(LogicalOr, ngraph::op::v1) -OPENVINO_OP(LogicalXor, ngraph::op::v1) -OPENVINO_OP(LRN, ngraph::op::v0) -OPENVINO_OP(LSTMCell, ngraph::op::v4) -OPENVINO_OP(MatMul, ngraph::op::v0) -OPENVINO_OP(MaxPool, ngraph::op::v1) -OPENVINO_OP(Maximum, ngraph::op::v1) -OPENVINO_OP(Minimum, ngraph::op::v1) -OPENVINO_OP(Mod, ngraph::op::v1) -OPENVINO_OP(Multiply, ngraph::op::v1) -OPENVINO_OP(Negative, ngraph::op::v0) -OPENVINO_OP(NormalizeL2, ngraph::op::v0) -OPENVINO_OP(NotEqual, ngraph::op::v1) -OPENVINO_OP(OneHot, ngraph::op::v1) -OPENVINO_OP(PRelu, ngraph::op::v0) -OPENVINO_OP(PSROIPooling, ngraph::op::v0) -OPENVINO_OP(Pad, ngraph::op::v1) -OPENVINO_OP(Parameter, ngraph::op::v0) -OPENVINO_OP(Power, ngraph::op::v1) -OPENVINO_OP(PriorBox, ngraph::op::v0) -OPENVINO_OP(PriorBoxClustered, ngraph::op::v0) -OPENVINO_OP(Proposal, ngraph::op::v4) -OPENVINO_OP(Range, ngraph::op::v4) -OPENVINO_OP(Relu, ngraph::op::v0) -OPENVINO_OP(ReduceMax, ngraph::op::v1) -OPENVINO_OP(ReduceLogicalAnd, ngraph::op::v1) -OPENVINO_OP(ReduceLogicalOr, ngraph::op::v1) -OPENVINO_OP(ReduceMean, ngraph::op::v1) -OPENVINO_OP(ReduceMin, ngraph::op::v1) -OPENVINO_OP(ReduceProd, ngraph::op::v1) -OPENVINO_OP(ReduceSum, ngraph::op::v1) -OPENVINO_OP(RegionYolo, ngraph::op::v0) -OPENVINO_OP(ReorgYolo, ngraph::op::v0) -OPENVINO_OP(Reshape, ngraph::op::v1) -OPENVINO_OP(Result, ngraph::op::v0) -OPENVINO_OP(ReverseSequence, ngraph::op::v0) -OPENVINO_OP(ROIPooling, ngraph::op::v0) -OPENVINO_OP(ScatterNDUpdate, ngraph::op::v3) -OPENVINO_OP(Select, ngraph::op::v1) -OPENVINO_OP(Selu, ngraph::op::v0) -OPENVINO_OP(Sign, ngraph::op::v0) -OPENVINO_OP(Sigmoid, ngraph::op::v0) -OPENVINO_OP(Sin, ngraph::op::v0) -OPENVINO_OP(Sinh, ngraph::op::v0) -OPENVINO_OP(Softmax, ngraph::op::v1) -OPENVINO_OP(Sqrt, ngraph::op::v0) -OPENVINO_OP(SpaceToDepth, ngraph::op::v0) -OPENVINO_OP(Split, ngraph::op::v1) -OPENVINO_OP(SquaredDifference, ngraph::op::v0) -OPENVINO_OP(Squeeze, ngraph::op::v0) -OPENVINO_OP(StridedSlice, ngraph::op::v1) -OPENVINO_OP(Subtract, ngraph::op::v1) -OPENVINO_OP(Tan, ngraph::op::v0) -OPENVINO_OP(Tanh, ngraph::op::v0) -OPENVINO_OP(TensorIterator, ngraph::op::v0) -OPENVINO_OP(Tile, ngraph::op::v0) -OPENVINO_OP(Transpose, ngraph::op::v1) -OPENVINO_OP(Unsqueeze, ngraph::op::v0) -OPENVINO_OP(VariadicSplit, ngraph::op::v1) +OPENVINO_OP(Abs, ov::op::v0) +OPENVINO_OP(Acos, ov::op::v0) +OPENVINO_OP(Add, ov::op::v1) +OPENVINO_OP(Asin, ov::op::v0) +OPENVINO_OP(Atan, ov::op::v0) +OPENVINO_OP(AvgPool, ov::op::v1) +OPENVINO_OP(BatchNormInference, ov::op::v5) +OPENVINO_OP(BinaryConvolution, ov::op::v1) +OPENVINO_OP(Broadcast, ov::op::v3) +OPENVINO_OP(Bucketize, ov::op::v3) +OPENVINO_OP(CTCGreedyDecoder, ov::op::v0) +OPENVINO_OP(Ceiling, ov::op::v0) +OPENVINO_OP(Clamp, ov::op::v0) +OPENVINO_OP(Concat, ov::op::v0) +OPENVINO_OP(Constant, ov::op::v0) +OPENVINO_OP(Convert, ov::op::v0) +OPENVINO_OP(ConvertLike, ov::op::v1) +OPENVINO_OP(Convolution, ov::op::v1) +OPENVINO_OP(ConvolutionBackpropData, ov::op::v1) +OPENVINO_OP(Cos, ov::op::v0) +OPENVINO_OP(Cosh, ov::op::v0) +OPENVINO_OP(CumSum, ov::op::v0) +OPENVINO_OP(DeformableConvolution, ov::op::v1) +OPENVINO_OP(DeformablePSROIPooling, ov::op::v1) +OPENVINO_OP(DepthToSpace, ov::op::v0) +OPENVINO_OP(DetectionOutput, ov::op::v0) +OPENVINO_OP(Divide, ov::op::v1) +OPENVINO_OP(Elu, ov::op::v0) +OPENVINO_OP(Erf, ov::op::v0) +OPENVINO_OP(Equal, ov::op::v1) +OPENVINO_OP(Exp, ov::op::v0) +OPENVINO_OP(ExtractImagePatches, ov::op::v3) +OPENVINO_OP(FakeQuantize, ov::op::v0) +OPENVINO_OP(Floor, ov::op::v0) +OPENVINO_OP(FloorMod, ov::op::v1) +OPENVINO_OP(Gather, ov::op::v7) +OPENVINO_OP(GatherTree, ov::op::v1) +OPENVINO_OP(Greater, ov::op::v1) +OPENVINO_OP(GreaterEqual, ov::op::v1) +OPENVINO_OP(GroupConvolution, ov::op::v1) +OPENVINO_OP(GroupConvolutionBackpropData, ov::op::v1) +OPENVINO_OP(GRN, ov::op::v0) +OPENVINO_OP(HardSigmoid, ov::op::v0) +OPENVINO_OP(Less, ov::op::v1) +OPENVINO_OP(LessEqual, ov::op::v1) +OPENVINO_OP(Log, ov::op::v0) +OPENVINO_OP(LogicalAnd, ov::op::v1) +OPENVINO_OP(LogicalNot, ov::op::v1) +OPENVINO_OP(LogicalOr, ov::op::v1) +OPENVINO_OP(LogicalXor, ov::op::v1) +OPENVINO_OP(LRN, ov::op::v0) +OPENVINO_OP(LSTMCell, ov::op::v4) +OPENVINO_OP(MatMul, ov::op::v0) +OPENVINO_OP(MaxPool, ov::op::v1) +OPENVINO_OP(Maximum, ov::op::v1) +OPENVINO_OP(Minimum, ov::op::v1) +OPENVINO_OP(Mod, ov::op::v1) +OPENVINO_OP(Multiply, ov::op::v1) +OPENVINO_OP(Negative, ov::op::v0) +OPENVINO_OP(NormalizeL2, ov::op::v0) +OPENVINO_OP(NotEqual, ov::op::v1) +OPENVINO_OP(OneHot, ov::op::v1) +OPENVINO_OP(PRelu, ov::op::v0) +OPENVINO_OP(PSROIPooling, ov::op::v0) +OPENVINO_OP(Pad, ov::op::v1) +OPENVINO_OP(Parameter, ov::op::v0) +OPENVINO_OP(Power, ov::op::v1) +OPENVINO_OP(PriorBox, ov::op::v0) +OPENVINO_OP(PriorBoxClustered, ov::op::v0) +OPENVINO_OP(Proposal, ov::op::v4) +OPENVINO_OP(Range, ov::op::v4) +OPENVINO_OP(Relu, ov::op::v0) +OPENVINO_OP(ReduceMax, ov::op::v1) +OPENVINO_OP(ReduceLogicalAnd, ov::op::v1) +OPENVINO_OP(ReduceLogicalOr, ov::op::v1) +OPENVINO_OP(ReduceMean, ov::op::v1) +OPENVINO_OP(ReduceMin, ov::op::v1) +OPENVINO_OP(ReduceProd, ov::op::v1) +OPENVINO_OP(ReduceSum, ov::op::v1) +OPENVINO_OP(RegionYolo, ov::op::v0) +OPENVINO_OP(ReorgYolo, ov::op::v0) +OPENVINO_OP(Reshape, ov::op::v1) +OPENVINO_OP(Result, ov::op::v0) +OPENVINO_OP(ReverseSequence, ov::op::v0) +OPENVINO_OP(ROIPooling, ov::op::v0) +OPENVINO_OP(ScatterNDUpdate, ov::op::v3) +OPENVINO_OP(Select, ov::op::v1) +OPENVINO_OP(Selu, ov::op::v0) +OPENVINO_OP(Sign, ov::op::v0) +OPENVINO_OP(Sigmoid, ov::op::v0) +OPENVINO_OP(Sin, ov::op::v0) +OPENVINO_OP(Sinh, ov::op::v0) +OPENVINO_OP(Softmax, ov::op::v1) +OPENVINO_OP(Sqrt, ov::op::v0) +OPENVINO_OP(SpaceToDepth, ov::op::v0) +OPENVINO_OP(Split, ov::op::v1) +OPENVINO_OP(SquaredDifference, ov::op::v0) +OPENVINO_OP(Squeeze, ov::op::v0) +OPENVINO_OP(StridedSlice, ov::op::v1) +OPENVINO_OP(Subtract, ov::op::v1) +OPENVINO_OP(Tan, ov::op::v0) +OPENVINO_OP(Tanh, ov::op::v0) +OPENVINO_OP(TensorIterator, ov::op::v0) +OPENVINO_OP(Tile, ov::op::v0) +OPENVINO_OP(Transpose, ov::op::v1) +OPENVINO_OP(Unsqueeze, ov::op::v0) +OPENVINO_OP(VariadicSplit, ov::op::v1) // New operations added in opset2 -OPENVINO_OP(BatchToSpace, ngraph::op::v1) -OPENVINO_OP(SpaceToBatch, ngraph::op::v1) +OPENVINO_OP(BatchToSpace, ov::op::v1) +OPENVINO_OP(SpaceToBatch, ov::op::v1) // New operations added in opset3 -OPENVINO_OP(EmbeddingBagPackedSum, ngraph::op::v3) -OPENVINO_OP(EmbeddingSegmentsSum, ngraph::op::v3) -OPENVINO_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) -OPENVINO_OP(GRUCell, ngraph::op::v3) -OPENVINO_OP(NonZero, ngraph::op::v3) -OPENVINO_OP(RNNCell, ngraph::op::v0) -OPENVINO_OP(ROIAlign, ngraph::op::v3) -OPENVINO_OP(ScatterElementsUpdate, ngraph::op::v3) -OPENVINO_OP(ScatterUpdate, ngraph::op::v3) -OPENVINO_OP(ShuffleChannels, ngraph::op::v0) -OPENVINO_OP(ShapeOf, ngraph::op::v3) -OPENVINO_OP(TopK, ngraph::op::v3) +OPENVINO_OP(EmbeddingBagPackedSum, ov::op::v3) +OPENVINO_OP(EmbeddingSegmentsSum, ov::op::v3) +OPENVINO_OP(EmbeddingBagOffsetsSum, ov::op::v3) +OPENVINO_OP(GRUCell, ov::op::v3) +OPENVINO_OP(NonZero, ov::op::v3) +OPENVINO_OP(RNNCell, ov::op::v0) +OPENVINO_OP(ROIAlign, ov::op::v3) +OPENVINO_OP(ScatterElementsUpdate, ov::op::v3) +OPENVINO_OP(ScatterUpdate, ov::op::v3) +OPENVINO_OP(ShuffleChannels, ov::op::v0) +OPENVINO_OP(ShapeOf, ov::op::v3) +OPENVINO_OP(TopK, ov::op::v3) // New operations added in opset4 -OPENVINO_OP(Acosh, ngraph::op::v3) -OPENVINO_OP(Asinh, ngraph::op::v3) -OPENVINO_OP(Atanh, ngraph::op::v3) -OPENVINO_OP(CTCLoss, ngraph::op::v4) -OPENVINO_OP(HSwish, ngraph::op::v4) -OPENVINO_OP(Interpolate, ngraph::op::v4) -OPENVINO_OP(Mish, ngraph::op::v4) -OPENVINO_OP(ReduceL1, ngraph::op::v4) -OPENVINO_OP(ReduceL2, ngraph::op::v4) -OPENVINO_OP(SoftPlus, ngraph::op::v4) -OPENVINO_OP(Swish, ngraph::op::v4) +OPENVINO_OP(Acosh, ov::op::v3) +OPENVINO_OP(Asinh, ov::op::v3) +OPENVINO_OP(Atanh, ov::op::v3) +OPENVINO_OP(CTCLoss, ov::op::v4) +OPENVINO_OP(HSwish, ov::op::v4) +OPENVINO_OP(Interpolate, ov::op::v4) +OPENVINO_OP(Mish, ov::op::v4) +OPENVINO_OP(ReduceL1, ov::op::v4) +OPENVINO_OP(ReduceL2, ov::op::v4) +OPENVINO_OP(SoftPlus, ov::op::v4) +OPENVINO_OP(Swish, ov::op::v4) // New operations added in opset5 -OPENVINO_OP(GatherND, ngraph::op::v5) -OPENVINO_OP(GRUSequence, ngraph::op::v5) -OPENVINO_OP(HSigmoid, ngraph::op::v5) -OPENVINO_OP(LogSoftmax, ngraph::op::v5) -OPENVINO_OP(Loop, ngraph::op::v5) -OPENVINO_OP(LSTMSequence, ngraph::op::v5) -OPENVINO_OP(NonMaxSuppression, ngraph::op::v5) -OPENVINO_OP(RNNSequence, ngraph::op::v5) -OPENVINO_OP(Round, ngraph::op::v5) +OPENVINO_OP(GatherND, ov::op::v5) +OPENVINO_OP(GRUSequence, ov::op::v5) +OPENVINO_OP(HSigmoid, ov::op::v5) +OPENVINO_OP(LogSoftmax, ov::op::v5) +OPENVINO_OP(Loop, ov::op::v5) +OPENVINO_OP(LSTMSequence, ov::op::v5) +OPENVINO_OP(NonMaxSuppression, ov::op::v5) +OPENVINO_OP(RNNSequence, ov::op::v5) +OPENVINO_OP(Round, ov::op::v5) // New operations added in opset6 -OPENVINO_OP(CTCGreedyDecoderSeqLen, ngraph::op::v6) -OPENVINO_OP(ExperimentalDetectronDetectionOutput, ngraph::op::v6) -OPENVINO_OP(ExperimentalDetectronGenerateProposalsSingleImage, ngraph::op::v6) -OPENVINO_OP(ExperimentalDetectronPriorGridGenerator, ngraph::op::v6) -OPENVINO_OP(ExperimentalDetectronROIFeatureExtractor, ngraph::op::v6) -OPENVINO_OP(ExperimentalDetectronTopKROIs, ngraph::op::v6) -OPENVINO_OP(GatherElements, ngraph::op::v6) -OPENVINO_OP(MVN, ngraph::op::v6) -OPENVINO_OP(Assign, ngraph::op::v6) // new version -OPENVINO_OP(ReadValue, ngraph::op::v6) // new version +OPENVINO_OP(CTCGreedyDecoderSeqLen, ov::op::v6) +OPENVINO_OP(ExperimentalDetectronDetectionOutput, ov::op::v6) +OPENVINO_OP(ExperimentalDetectronGenerateProposalsSingleImage, ov::op::v6) +OPENVINO_OP(ExperimentalDetectronPriorGridGenerator, ov::op::v6) +OPENVINO_OP(ExperimentalDetectronROIFeatureExtractor, ov::op::v6) +OPENVINO_OP(ExperimentalDetectronTopKROIs, ov::op::v6) +OPENVINO_OP(GatherElements, ov::op::v6) +OPENVINO_OP(MVN, ov::op::v6) +OPENVINO_OP(Assign, ov::op::v6) // new version +OPENVINO_OP(ReadValue, ov::op::v6) // new version // New operations added in opset7 -OPENVINO_OP(DFT, ngraph::op::v7) -OPENVINO_OP(Einsum, ngraph::op::v7) -OPENVINO_OP(Gelu, ngraph::op::v7) -OPENVINO_OP(IDFT, ngraph::op::v7) -OPENVINO_OP(Roll, ngraph::op::v7) +OPENVINO_OP(DFT, ov::op::v7) +OPENVINO_OP(Einsum, ov::op::v7) +OPENVINO_OP(Gelu, ov::op::v7) +OPENVINO_OP(IDFT, ov::op::v7) +OPENVINO_OP(Roll, ov::op::v7) diff --git a/ngraph/core/include/openvino/opsets/opset8_tbl.hpp b/ngraph/core/include/openvino/opsets/opset8_tbl.hpp index be2b7c303de00d..ae3eac8380b456 100644 --- a/ngraph/core/include/openvino/opsets/opset8_tbl.hpp +++ b/ngraph/core/include/openvino/opsets/opset8_tbl.hpp @@ -7,179 +7,179 @@ # define OPENVINO_OP(x, y) #endif -OPENVINO_OP(Abs, ngraph::op::v0) -OPENVINO_OP(Acos, ngraph::op::v0) -OPENVINO_OP(Add, ngraph::op::v1) -OPENVINO_OP(Asin, ngraph::op::v0) -OPENVINO_OP(Atan, ngraph::op::v0) -OPENVINO_OP(AvgPool, ngraph::op::v1) -OPENVINO_OP(BatchNormInference, ngraph::op::v5) -OPENVINO_OP(BinaryConvolution, ngraph::op::v1) -OPENVINO_OP(Broadcast, ngraph::op::v3) -OPENVINO_OP(Bucketize, ngraph::op::v3) -OPENVINO_OP(CTCGreedyDecoder, ngraph::op::v0) -OPENVINO_OP(Ceiling, ngraph::op::v0) -OPENVINO_OP(Clamp, ngraph::op::v0) -OPENVINO_OP(Concat, ngraph::op::v0) -OPENVINO_OP(Constant, ngraph::op) -OPENVINO_OP(Convert, ngraph::op::v0) -OPENVINO_OP(ConvertLike, ngraph::op::v1) -OPENVINO_OP(Convolution, ngraph::op::v1) -OPENVINO_OP(ConvolutionBackpropData, ngraph::op::v1) -OPENVINO_OP(Cos, ngraph::op::v0) -OPENVINO_OP(Cosh, ngraph::op::v0) -OPENVINO_OP(CumSum, ngraph::op::v0) -OPENVINO_OP(DeformablePSROIPooling, ngraph::op::v1) -OPENVINO_OP(DepthToSpace, ngraph::op::v0) -OPENVINO_OP(DetectionOutput, ngraph::op::v0) -OPENVINO_OP(Divide, ngraph::op::v1) -OPENVINO_OP(Elu, ngraph::op::v0) -OPENVINO_OP(Erf, ngraph::op::v0) -OPENVINO_OP(Equal, ngraph::op::v1) -OPENVINO_OP(Exp, ngraph::op::v0) -OPENVINO_OP(ExtractImagePatches, ngraph::op::v3) -OPENVINO_OP(FakeQuantize, ngraph::op::v0) -OPENVINO_OP(Floor, ngraph::op::v0) -OPENVINO_OP(FloorMod, ngraph::op::v1) -OPENVINO_OP(GatherTree, ngraph::op::v1) -OPENVINO_OP(Greater, ngraph::op::v1) -OPENVINO_OP(GreaterEqual, ngraph::op::v1) -OPENVINO_OP(GroupConvolution, ngraph::op::v1) -OPENVINO_OP(GroupConvolutionBackpropData, ngraph::op::v1) -OPENVINO_OP(GRN, ngraph::op::v0) -OPENVINO_OP(HardSigmoid, ngraph::op::v0) -OPENVINO_OP(Less, ngraph::op::v1) -OPENVINO_OP(LessEqual, ngraph::op::v1) -OPENVINO_OP(Log, ngraph::op::v0) -OPENVINO_OP(LogicalAnd, ngraph::op::v1) -OPENVINO_OP(LogicalNot, ngraph::op::v1) -OPENVINO_OP(LogicalOr, ngraph::op::v1) -OPENVINO_OP(LogicalXor, ngraph::op::v1) -OPENVINO_OP(LRN, ngraph::op::v0) -OPENVINO_OP(LSTMCell, ngraph::op::v4) -OPENVINO_OP(MatMul, ngraph::op::v0) -OPENVINO_OP(Maximum, ngraph::op::v1) -OPENVINO_OP(Minimum, ngraph::op::v1) -OPENVINO_OP(Mod, ngraph::op::v1) -OPENVINO_OP(Multiply, ngraph::op::v1) -OPENVINO_OP(Negative, ngraph::op::v0) -OPENVINO_OP(NormalizeL2, ngraph::op::v0) -OPENVINO_OP(NotEqual, ngraph::op::v1) -OPENVINO_OP(OneHot, ngraph::op::v1) -OPENVINO_OP(PRelu, ngraph::op::v0) -OPENVINO_OP(PSROIPooling, ngraph::op::v0) -OPENVINO_OP(Pad, ngraph::op::v1) -OPENVINO_OP(Parameter, ngraph::op::v0) -OPENVINO_OP(Power, ngraph::op::v1) -OPENVINO_OP(PriorBox, ngraph::op::v0) -OPENVINO_OP(PriorBoxClustered, ngraph::op::v0) -OPENVINO_OP(Proposal, ngraph::op::v4) -OPENVINO_OP(Range, ngraph::op::v4) -OPENVINO_OP(Relu, ngraph::op::v0) -OPENVINO_OP(ReduceMax, ngraph::op::v1) -OPENVINO_OP(ReduceLogicalAnd, ngraph::op::v1) -OPENVINO_OP(ReduceLogicalOr, ngraph::op::v1) -OPENVINO_OP(ReduceMean, ngraph::op::v1) -OPENVINO_OP(ReduceMin, ngraph::op::v1) -OPENVINO_OP(ReduceProd, ngraph::op::v1) -OPENVINO_OP(ReduceSum, ngraph::op::v1) -OPENVINO_OP(RegionYolo, ngraph::op::v0) -OPENVINO_OP(ReorgYolo, ngraph::op::v0) -OPENVINO_OP(Reshape, ngraph::op::v1) -OPENVINO_OP(Result, ngraph::op::v0) -OPENVINO_OP(ReverseSequence, ngraph::op::v0) -OPENVINO_OP(ROIPooling, ngraph::op::v0) -OPENVINO_OP(ScatterNDUpdate, ngraph::op::v3) -OPENVINO_OP(Select, ngraph::op::v1) -OPENVINO_OP(Selu, ngraph::op::v0) -OPENVINO_OP(Sign, ngraph::op::v0) -OPENVINO_OP(Sigmoid, ngraph::op::v0) -OPENVINO_OP(Sin, ngraph::op::v0) -OPENVINO_OP(Sinh, ngraph::op::v0) -OPENVINO_OP(Softmax, ngraph::op::v1) -OPENVINO_OP(Sqrt, ngraph::op::v0) -OPENVINO_OP(SpaceToDepth, ngraph::op::v0) -OPENVINO_OP(Split, ngraph::op::v1) -OPENVINO_OP(SquaredDifference, ngraph::op::v0) -OPENVINO_OP(Squeeze, ngraph::op::v0) -OPENVINO_OP(StridedSlice, ngraph::op::v1) -OPENVINO_OP(Subtract, ngraph::op::v1) -OPENVINO_OP(Tan, ngraph::op::v0) -OPENVINO_OP(Tanh, ngraph::op::v0) -OPENVINO_OP(TensorIterator, ngraph::op::v0) -OPENVINO_OP(Tile, ngraph::op::v0) -OPENVINO_OP(Transpose, ngraph::op::v1) -OPENVINO_OP(Unsqueeze, ngraph::op::v0) -OPENVINO_OP(VariadicSplit, ngraph::op::v1) +OPENVINO_OP(Abs, ov::op::v0) +OPENVINO_OP(Acos, ov::op::v0) +OPENVINO_OP(Add, ov::op::v1) +OPENVINO_OP(Asin, ov::op::v0) +OPENVINO_OP(Atan, ov::op::v0) +OPENVINO_OP(AvgPool, ov::op::v1) +OPENVINO_OP(BatchNormInference, ov::op::v5) +OPENVINO_OP(BinaryConvolution, ov::op::v1) +OPENVINO_OP(Broadcast, ov::op::v3) +OPENVINO_OP(Bucketize, ov::op::v3) +OPENVINO_OP(CTCGreedyDecoder, ov::op::v0) +OPENVINO_OP(Ceiling, ov::op::v0) +OPENVINO_OP(Clamp, ov::op::v0) +OPENVINO_OP(Concat, ov::op::v0) +OPENVINO_OP(Constant, ov::op::v0) +OPENVINO_OP(Convert, ov::op::v0) +OPENVINO_OP(ConvertLike, ov::op::v1) +OPENVINO_OP(Convolution, ov::op::v1) +OPENVINO_OP(ConvolutionBackpropData, ov::op::v1) +OPENVINO_OP(Cos, ov::op::v0) +OPENVINO_OP(Cosh, ov::op::v0) +OPENVINO_OP(CumSum, ov::op::v0) +OPENVINO_OP(DeformablePSROIPooling, ov::op::v1) +OPENVINO_OP(DepthToSpace, ov::op::v0) +OPENVINO_OP(DetectionOutput, ov::op::v0) +OPENVINO_OP(Divide, ov::op::v1) +OPENVINO_OP(Elu, ov::op::v0) +OPENVINO_OP(Erf, ov::op::v0) +OPENVINO_OP(Equal, ov::op::v1) +OPENVINO_OP(Exp, ov::op::v0) +OPENVINO_OP(ExtractImagePatches, ov::op::v3) +OPENVINO_OP(FakeQuantize, ov::op::v0) +OPENVINO_OP(Floor, ov::op::v0) +OPENVINO_OP(FloorMod, ov::op::v1) +OPENVINO_OP(GatherTree, ov::op::v1) +OPENVINO_OP(Greater, ov::op::v1) +OPENVINO_OP(GreaterEqual, ov::op::v1) +OPENVINO_OP(GroupConvolution, ov::op::v1) +OPENVINO_OP(GroupConvolutionBackpropData, ov::op::v1) +OPENVINO_OP(GRN, ov::op::v0) +OPENVINO_OP(HardSigmoid, ov::op::v0) +OPENVINO_OP(Less, ov::op::v1) +OPENVINO_OP(LessEqual, ov::op::v1) +OPENVINO_OP(Log, ov::op::v0) +OPENVINO_OP(LogicalAnd, ov::op::v1) +OPENVINO_OP(LogicalNot, ov::op::v1) +OPENVINO_OP(LogicalOr, ov::op::v1) +OPENVINO_OP(LogicalXor, ov::op::v1) +OPENVINO_OP(LRN, ov::op::v0) +OPENVINO_OP(LSTMCell, ov::op::v4) +OPENVINO_OP(MatMul, ov::op::v0) +OPENVINO_OP(Maximum, ov::op::v1) +OPENVINO_OP(Minimum, ov::op::v1) +OPENVINO_OP(Mod, ov::op::v1) +OPENVINO_OP(Multiply, ov::op::v1) +OPENVINO_OP(Negative, ov::op::v0) +OPENVINO_OP(NormalizeL2, ov::op::v0) +OPENVINO_OP(NotEqual, ov::op::v1) +OPENVINO_OP(OneHot, ov::op::v1) +OPENVINO_OP(PRelu, ov::op::v0) +OPENVINO_OP(PSROIPooling, ov::op::v0) +OPENVINO_OP(Pad, ov::op::v1) +OPENVINO_OP(Parameter, ov::op::v0) +OPENVINO_OP(Power, ov::op::v1) +OPENVINO_OP(PriorBox, ov::op::v0) +OPENVINO_OP(PriorBoxClustered, ov::op::v0) +OPENVINO_OP(Proposal, ov::op::v4) +OPENVINO_OP(Range, ov::op::v4) +OPENVINO_OP(Relu, ov::op::v0) +OPENVINO_OP(ReduceMax, ov::op::v1) +OPENVINO_OP(ReduceLogicalAnd, ov::op::v1) +OPENVINO_OP(ReduceLogicalOr, ov::op::v1) +OPENVINO_OP(ReduceMean, ov::op::v1) +OPENVINO_OP(ReduceMin, ov::op::v1) +OPENVINO_OP(ReduceProd, ov::op::v1) +OPENVINO_OP(ReduceSum, ov::op::v1) +OPENVINO_OP(RegionYolo, ov::op::v0) +OPENVINO_OP(ReorgYolo, ov::op::v0) +OPENVINO_OP(Reshape, ov::op::v1) +OPENVINO_OP(Result, ov::op::v0) +OPENVINO_OP(ReverseSequence, ov::op::v0) +OPENVINO_OP(ROIPooling, ov::op::v0) +OPENVINO_OP(ScatterNDUpdate, ov::op::v3) +OPENVINO_OP(Select, ov::op::v1) +OPENVINO_OP(Selu, ov::op::v0) +OPENVINO_OP(Sign, ov::op::v0) +OPENVINO_OP(Sigmoid, ov::op::v0) +OPENVINO_OP(Sin, ov::op::v0) +OPENVINO_OP(Sinh, ov::op::v0) +OPENVINO_OP(Softmax, ov::op::v1) +OPENVINO_OP(Sqrt, ov::op::v0) +OPENVINO_OP(SpaceToDepth, ov::op::v0) +OPENVINO_OP(Split, ov::op::v1) +OPENVINO_OP(SquaredDifference, ov::op::v0) +OPENVINO_OP(Squeeze, ov::op::v0) +OPENVINO_OP(StridedSlice, ov::op::v1) +OPENVINO_OP(Subtract, ov::op::v1) +OPENVINO_OP(Tan, ov::op::v0) +OPENVINO_OP(Tanh, ov::op::v0) +OPENVINO_OP(TensorIterator, ov::op::v0) +OPENVINO_OP(Tile, ov::op::v0) +OPENVINO_OP(Transpose, ov::op::v1) +OPENVINO_OP(Unsqueeze, ov::op::v0) +OPENVINO_OP(VariadicSplit, ov::op::v1) // New operations added in opset2 -OPENVINO_OP(BatchToSpace, ngraph::op::v1) -OPENVINO_OP(SpaceToBatch, ngraph::op::v1) +OPENVINO_OP(BatchToSpace, ov::op::v1) +OPENVINO_OP(SpaceToBatch, ov::op::v1) // New operations added in opset3 -OPENVINO_OP(EmbeddingBagPackedSum, ngraph::op::v3) -OPENVINO_OP(EmbeddingSegmentsSum, ngraph::op::v3) -OPENVINO_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) -OPENVINO_OP(GRUCell, ngraph::op::v3) -OPENVINO_OP(NonZero, ngraph::op::v3) -OPENVINO_OP(RNNCell, ngraph::op::v0) -OPENVINO_OP(ROIAlign, ngraph::op::v3) -OPENVINO_OP(ScatterElementsUpdate, ngraph::op::v3) -OPENVINO_OP(ScatterUpdate, ngraph::op::v3) -OPENVINO_OP(ShuffleChannels, ngraph::op::v0) -OPENVINO_OP(ShapeOf, ngraph::op::v3) -OPENVINO_OP(TopK, ngraph::op::v3) +OPENVINO_OP(EmbeddingBagPackedSum, ov::op::v3) +OPENVINO_OP(EmbeddingSegmentsSum, ov::op::v3) +OPENVINO_OP(EmbeddingBagOffsetsSum, ov::op::v3) +OPENVINO_OP(GRUCell, ov::op::v3) +OPENVINO_OP(NonZero, ov::op::v3) +OPENVINO_OP(RNNCell, ov::op::v0) +OPENVINO_OP(ROIAlign, ov::op::v3) +OPENVINO_OP(ScatterElementsUpdate, ov::op::v3) +OPENVINO_OP(ScatterUpdate, ov::op::v3) +OPENVINO_OP(ShuffleChannels, ov::op::v0) +OPENVINO_OP(ShapeOf, ov::op::v3) +OPENVINO_OP(TopK, ov::op::v3) // New operations added in opset4 -OPENVINO_OP(Acosh, ngraph::op::v3) -OPENVINO_OP(Asinh, ngraph::op::v3) -OPENVINO_OP(Atanh, ngraph::op::v3) -OPENVINO_OP(CTCLoss, ngraph::op::v4) -OPENVINO_OP(HSwish, ngraph::op::v4) -OPENVINO_OP(Interpolate, ngraph::op::v4) -OPENVINO_OP(Mish, ngraph::op::v4) -OPENVINO_OP(ReduceL1, ngraph::op::v4) -OPENVINO_OP(ReduceL2, ngraph::op::v4) -OPENVINO_OP(SoftPlus, ngraph::op::v4) -OPENVINO_OP(Swish, ngraph::op::v4) +OPENVINO_OP(Acosh, ov::op::v3) +OPENVINO_OP(Asinh, ov::op::v3) +OPENVINO_OP(Atanh, ov::op::v3) +OPENVINO_OP(CTCLoss, ov::op::v4) +OPENVINO_OP(HSwish, ov::op::v4) +OPENVINO_OP(Interpolate, ov::op::v4) +OPENVINO_OP(Mish, ov::op::v4) +OPENVINO_OP(ReduceL1, ov::op::v4) +OPENVINO_OP(ReduceL2, ov::op::v4) +OPENVINO_OP(SoftPlus, ov::op::v4) +OPENVINO_OP(Swish, ov::op::v4) // New operations added in opset5 -OPENVINO_OP(GatherND, ngraph::op::v5) -OPENVINO_OP(GRUSequence, ngraph::op::v5) -OPENVINO_OP(HSigmoid, ngraph::op::v5) -OPENVINO_OP(LogSoftmax, ngraph::op::v5) -OPENVINO_OP(Loop, ngraph::op::v5) -OPENVINO_OP(LSTMSequence, ngraph::op::v5) -OPENVINO_OP(NonMaxSuppression, ngraph::op::v5) -OPENVINO_OP(RNNSequence, ngraph::op::v5) -OPENVINO_OP(Round, ngraph::op::v5) +OPENVINO_OP(GatherND, ov::op::v5) +OPENVINO_OP(GRUSequence, ov::op::v5) +OPENVINO_OP(HSigmoid, ov::op::v5) +OPENVINO_OP(LogSoftmax, ov::op::v5) +OPENVINO_OP(Loop, ov::op::v5) +OPENVINO_OP(LSTMSequence, ov::op::v5) +OPENVINO_OP(NonMaxSuppression, ov::op::v5) +OPENVINO_OP(RNNSequence, ov::op::v5) +OPENVINO_OP(Round, ov::op::v5) // New operations added in opset6 -OPENVINO_OP(CTCGreedyDecoderSeqLen, ngraph::op::v6) -OPENVINO_OP(ExperimentalDetectronDetectionOutput, ngraph::op::v6) -OPENVINO_OP(ExperimentalDetectronGenerateProposalsSingleImage, ngraph::op::v6) -OPENVINO_OP(ExperimentalDetectronPriorGridGenerator, ngraph::op::v6) -OPENVINO_OP(ExperimentalDetectronROIFeatureExtractor, ngraph::op::v6) -OPENVINO_OP(ExperimentalDetectronTopKROIs, ngraph::op::v6) -OPENVINO_OP(GatherElements, ngraph::op::v6) -OPENVINO_OP(MVN, ngraph::op::v6) -OPENVINO_OP(Assign, ngraph::op::v6) // new version -OPENVINO_OP(ReadValue, ngraph::op::v6) // new version +OPENVINO_OP(CTCGreedyDecoderSeqLen, ov::op::v6) +OPENVINO_OP(ExperimentalDetectronDetectionOutput, ov::op::v6) +OPENVINO_OP(ExperimentalDetectronGenerateProposalsSingleImage, ov::op::v6) +OPENVINO_OP(ExperimentalDetectronPriorGridGenerator, ov::op::v6) +OPENVINO_OP(ExperimentalDetectronROIFeatureExtractor, ov::op::v6) +OPENVINO_OP(ExperimentalDetectronTopKROIs, ov::op::v6) +OPENVINO_OP(GatherElements, ov::op::v6) +OPENVINO_OP(MVN, ov::op::v6) +OPENVINO_OP(Assign, ov::op::v6) // new version +OPENVINO_OP(ReadValue, ov::op::v6) // new version // New operations added in opset7 -OPENVINO_OP(DFT, ngraph::op::v7) -OPENVINO_OP(Einsum, ngraph::op::v7) -OPENVINO_OP(Gelu, ngraph::op::v7) -OPENVINO_OP(IDFT, ngraph::op::v7) -OPENVINO_OP(Roll, ngraph::op::v7) +OPENVINO_OP(DFT, ov::op::v7) +OPENVINO_OP(Einsum, ov::op::v7) +OPENVINO_OP(Gelu, ov::op::v7) +OPENVINO_OP(IDFT, ov::op::v7) +OPENVINO_OP(Roll, ov::op::v7) // New operations added in opset8 -OPENVINO_OP(Gather, ngraph::op::v8) -OPENVINO_OP(AdaptiveAvgPool, ngraph::op::v8) -OPENVINO_OP(AdaptiveMaxPool, ngraph::op::v8) -OPENVINO_OP(DeformableConvolution, ngraph::op::v8) -OPENVINO_OP(MatrixNms, ngraph::op::v8) -OPENVINO_OP(MaxPool, ngraph::op::v8) -OPENVINO_OP(MulticlassNms, ngraph::op::v8) -OPENVINO_OP(RandomUniform, ngraph::op::v8) -OPENVINO_OP(If, ngraph::op::v8) +OPENVINO_OP(Gather, ov::op::v8) +OPENVINO_OP(AdaptiveAvgPool, ov::op::v8) +OPENVINO_OP(AdaptiveMaxPool, ov::op::v8) +OPENVINO_OP(DeformableConvolution, ov::op::v8) +OPENVINO_OP(MatrixNms, ov::op::v8) +OPENVINO_OP(MaxPool, ov::op::v8) +OPENVINO_OP(MulticlassNms, ov::op::v8) +OPENVINO_OP(RandomUniform, ov::op::v8) +OPENVINO_OP(If, ov::op::v8) diff --git a/ngraph/core/include/openvino/pass/pattern/op/any.hpp b/ngraph/core/include/openvino/pass/pattern/op/any.hpp index 3552e25ebc09ce..c86e10b116a92d 100644 --- a/ngraph/core/include/openvino/pass/pattern/op/any.hpp +++ b/ngraph/core/include/openvino/pass/pattern/op/any.hpp @@ -19,11 +19,11 @@ class OPENVINO_API Any : public Pattern { const NodeTypeInfo& get_type_info() const override; /// \brief creates a Any node containing a sub-pattern described by \sa type and \sa /// shape. - Any(const element::Type& type, const PartialShape& s, ValuePredicate pred, const OutputVector& wrapped_values) + Any(const element::Type& type, const Shape& s, ValuePredicate pred, const OutputVector& wrapped_values) : Pattern(wrapped_values, pred) { set_output_type(0, type, s); } - Any(const element::Type& type, const PartialShape& s, NodePredicate pred, const NodeVector& wrapped_values) + Any(const element::Type& type, const Shape& s, NodePredicate pred, const NodeVector& wrapped_values) : Any(type, s, as_value_predicate(pred), as_output_vector(wrapped_values)) {} /// \brief creates a Any node containing a sub-pattern described by the type and /// shape of \sa node. diff --git a/ngraph/core/include/openvino/pass/pattern/op/any_of.hpp b/ngraph/core/include/openvino/pass/pattern/op/any_of.hpp index ce07522173cbf5..c9061ebae5a010 100644 --- a/ngraph/core/include/openvino/pass/pattern/op/any_of.hpp +++ b/ngraph/core/include/openvino/pass/pattern/op/any_of.hpp @@ -24,14 +24,14 @@ class OPENVINO_API AnyOf : public Pattern { const NodeTypeInfo& get_type_info() const override; /// \brief creates a AnyOf node containing a sub-pattern described by \sa type and /// \sa shape. - AnyOf(const element::Type& type, const PartialShape& s, ValuePredicate pred, const OutputVector& wrapped_values) + AnyOf(const element::Type& type, const Shape& s, ValuePredicate pred, const OutputVector& wrapped_values) : Pattern(wrapped_values, pred) { if (wrapped_values.size() != 1) { throw Exception("AnyOf expects exactly one argument"); } set_output_type(0, type, s); } - AnyOf(const element::Type& type, const PartialShape& s, NodePredicate pred, const NodeVector& wrapped_values) + AnyOf(const element::Type& type, const Shape& s, NodePredicate pred, const NodeVector& wrapped_values) : AnyOf( type, s, diff --git a/ngraph/core/include/openvino/pass/pattern/op/branch.hpp b/ngraph/core/include/openvino/pass/pattern/op/branch.hpp index 902934f64abf85..a53564774a59c1 100644 --- a/ngraph/core/include/openvino/pass/pattern/op/branch.hpp +++ b/ngraph/core/include/openvino/pass/pattern/op/branch.hpp @@ -27,7 +27,7 @@ class OPENVINO_API Branch : public Pattern { /// \param pattern the destinationing pattern /// \param labels Labels where the destination may occur Branch() : Pattern(OutputVector{}) { - set_output_type(0, element::f32, ngraph::Shape{}); + set_output_type(0, element::f32, StaticShape{}); } void set_destination(const Output& destination) { diff --git a/ngraph/core/include/openvino/pass/pattern/op/label.hpp b/ngraph/core/include/openvino/pass/pattern/op/label.hpp index 507a8036ce49ab..69b0fc24fb5f8e 100644 --- a/ngraph/core/include/openvino/pass/pattern/op/label.hpp +++ b/ngraph/core/include/openvino/pass/pattern/op/label.hpp @@ -37,15 +37,12 @@ class OPENVINO_API Label : public Pattern { /// nullptr, /// OutputVector{add}); /// \endcode - Label(const element::Type& type, - const PartialShape& s, - const ValuePredicate pred, - const OutputVector& wrapped_values) + Label(const element::Type& type, const Shape& s, const ValuePredicate pred, const OutputVector& wrapped_values) : Pattern(OutputVector{wrap_values(wrapped_values)}, pred) { set_output_type(0, type, s); } - explicit Label(const element::Type& type = element::dynamic, const PartialShape& s = PartialShape::dynamic()) + explicit Label(const element::Type& type = element::dynamic, const Shape& s = Shape::dynamic()) : Label( type, s, @@ -54,13 +51,12 @@ class OPENVINO_API Label : public Pattern { }, OutputVector()) {} - Label(const element::Type& type, const PartialShape& s, ValuePredicate pred) - : Label(type, s, pred, OutputVector{}) {} + Label(const element::Type& type, const Shape& s, ValuePredicate pred) : Label(type, s, pred, OutputVector{}) {} - Label(const element::Type& type, const PartialShape& s, NodePredicate pred) + Label(const element::Type& type, const Shape& s, NodePredicate pred) : Label(type, s, as_value_predicate(pred), OutputVector{}) {} - Label(const element::Type& type, const PartialShape& s, const NodePredicate pred, const NodeVector& wrapped_values) + Label(const element::Type& type, const Shape& s, const NodePredicate pred, const NodeVector& wrapped_values) : Label(type, s, as_value_predicate(pred), as_output_vector(wrapped_values)) {} /// \brief creates a Label node containing a sub-pattern described by the type and diff --git a/ngraph/core/include/openvino/pass/pattern/op/wrap_type.hpp b/ngraph/core/include/openvino/pass/pattern/op/wrap_type.hpp index 01498b81b5f2d3..7b6e1859cab1dd 100644 --- a/ngraph/core/include/openvino/pass/pattern/op/wrap_type.hpp +++ b/ngraph/core/include/openvino/pass/pattern/op/wrap_type.hpp @@ -25,7 +25,7 @@ class OPENVINO_API WrapType : public Pattern { const OutputVector& input_values = {}) : Pattern(input_values, pred), m_wrapped_types({wrapped_type}) { - set_output_type(0, element::Type_t::dynamic, PartialShape::dynamic()); + set_output_type(0, element::Type_t::dynamic, Shape::dynamic()); } explicit WrapType( @@ -37,7 +37,7 @@ class OPENVINO_API WrapType : public Pattern { const OutputVector& input_values = {}) : Pattern(input_values, pred), m_wrapped_types(std::move(wrapped_types)) { - set_output_type(0, element::Type_t::dynamic, PartialShape::dynamic()); + set_output_type(0, element::Type_t::dynamic, Shape::dynamic()); } bool match_value(pattern::Matcher* matcher, diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/not_equal.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/not_equal.hpp index 7b10df0e22d197..fcc3206c2921f2 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/not_equal.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/not_equal.hpp @@ -18,17 +18,6 @@ namespace ngraph { namespace runtime { namespace reference { -template -void not_equal(const T* arg0, - const T* arg1, - char* out, - size_t count) // TODO: using char for bool, is this right? -{ - for (size_t i = 0; i < count; i++) { - out[i] = arg0[i] != arg1[i]; - } -} - template void not_equal(const T* arg0, const T* arg1, diff --git a/ngraph/core/src/descriptor/input.cpp b/ngraph/core/src/descriptor/input.cpp index 1e08d4734932a6..83a691190941c8 100644 --- a/ngraph/core/src/descriptor/input.cpp +++ b/ngraph/core/src/descriptor/input.cpp @@ -5,11 +5,11 @@ #include "openvino/core/descriptor/input.hpp" #include "ngraph/env_util.hpp" -#include "ngraph/node.hpp" #include "openvino/core/descriptor/output.hpp" +#include "openvino/core/node.hpp" #include "openvino/core/type/element_type.hpp" -ov::descriptor::Input::Input(ngraph::Node* node, size_t index, Output& output) +ov::descriptor::Input::Input(ov::Node* node, size_t index, Output& output) : m_node(node), m_index(index), m_output(&output), @@ -19,7 +19,7 @@ ov::descriptor::Input::Input(ngraph::Node* node, size_t index, Output& output) output.add_input(this); } -ov::descriptor::Input::Input(ngraph::Node* node, size_t index) +ov::descriptor::Input::Input(ov::Node* node, size_t index) : m_node(node), m_index(index), m_output(nullptr), @@ -46,7 +46,7 @@ void ov::descriptor::Input::replace_output(Output& new_output) { } } -void ov::descriptor::Input::replace_output(const std::shared_ptr& node, size_t i) { +void ov::descriptor::Input::replace_output(const std::shared_ptr& node, size_t i) { replace_output(node->m_outputs.at(i)); } @@ -78,11 +78,11 @@ std::shared_ptr ov::descriptor::Input::get_tensor_ptr() return m_output->get_tensor_ptr(); } -const ngraph::Shape& ov::descriptor::Input::get_shape() const { +const ov::StaticShape& ov::descriptor::Input::get_shape() const { return m_output->get_shape(); } -const ov::PartialShape& ov::descriptor::Input::get_partial_shape() const { +const ov::Shape& ov::descriptor::Input::get_partial_shape() const { return m_output->get_partial_shape(); } diff --git a/ngraph/core/src/descriptor/output.cpp b/ngraph/core/src/descriptor/output.cpp index c90885bc5df8cb..b9dcb6de276d7e 100644 --- a/ngraph/core/src/descriptor/output.cpp +++ b/ngraph/core/src/descriptor/output.cpp @@ -47,7 +47,7 @@ const ngraph::Shape& ov::descriptor::Output::get_shape() const { return m_tensor->get_shape(); } -const ov::PartialShape& ov::descriptor::Output::get_partial_shape() const { +const ov::Shape& ov::descriptor::Output::get_partial_shape() const { return m_tensor->get_partial_shape(); } diff --git a/ngraph/core/src/descriptor/tensor.cpp b/ngraph/core/src/descriptor/tensor.cpp index 2abc343a2b985d..f5a6091a7c5127 100644 --- a/ngraph/core/src/descriptor/tensor.cpp +++ b/ngraph/core/src/descriptor/tensor.cpp @@ -8,21 +8,21 @@ using namespace std; -ov::descriptor::Tensor::Tensor(const element::Type& element_type, const PartialShape& pshape, const std::string& name) +ov::descriptor::Tensor::Tensor(const element::Type& element_type, const Shape& pshape, const std::string& name) : m_element_type(element_type), m_partial_shape(pshape), m_name(name), m_shape_changed(true) {} ov::descriptor::Tensor::Tensor(const element::Type& element_type, - const PartialShape& pshape, + const Shape& pshape, ngraph::Node* node, size_t node_output_number) : m_element_type(element_type), m_partial_shape(pshape), m_shape_changed(true) {} -void ov::descriptor::Tensor::set_tensor_type(const element::Type& element_type, const PartialShape& pshape) { +void ov::descriptor::Tensor::set_tensor_type(const element::Type& element_type, const Shape& pshape) { set_element_type(element_type); set_partial_shape(pshape); } @@ -31,7 +31,7 @@ void ov::descriptor::Tensor::set_element_type(const element::Type& element_type) m_element_type = element_type; } -void ov::descriptor::Tensor::set_partial_shape(const PartialShape& partial_shape) { +void ov::descriptor::Tensor::set_partial_shape(const Shape& partial_shape) { m_partial_shape = partial_shape; m_shape_changed = true; } diff --git a/ngraph/core/src/function.cpp b/ngraph/core/src/function.cpp index 330747d2faa843..790cc37097c8b3 100644 --- a/ngraph/core/src/function.cpp +++ b/ngraph/core/src/function.cpp @@ -305,7 +305,7 @@ const ngraph::Shape& ov::Function::get_output_shape(size_t i) const { return m_results.at(i)->get_shape(); } -const ov::PartialShape& ov::Function::get_output_partial_shape(size_t i) const { +const ov::Shape& ov::Function::get_output_partial_shape(size_t i) const { return m_results.at(i)->get_output_partial_shape(0); } diff --git a/ngraph/core/src/graph_util.cpp b/ngraph/core/src/graph_util.cpp index f5bb89d84a0714..eeaa794620648e 100644 --- a/ngraph/core/src/graph_util.cpp +++ b/ngraph/core/src/graph_util.cpp @@ -348,8 +348,7 @@ std::shared_ptr ngraph::clone_function(const ngraph::Function& VariableVector cloned_vars; std::map> var_map; for (const auto& var : variables) { - auto cloned_var = std::make_shared( - VariableInfo{PartialShape::dynamic(), element::dynamic, var->get_info().variable_id}); + auto cloned_var = std::make_shared(var->get_info()); cloned_vars.push_back(cloned_var); var_map[cloned_var->get_info().variable_id] = cloned_var; } diff --git a/ngraph/core/src/node.cpp b/ngraph/core/src/node.cpp index c6f4a9dd352cdf..f8b09a9a1cb3f5 100644 --- a/ngraph/core/src/node.cpp +++ b/ngraph/core/src/node.cpp @@ -146,7 +146,7 @@ void ov::Node::safe_delete(NodeVector& nodes, bool recurse) { void ov::Node::set_arguments(const NodeVector& arguments) { OutputVector outputs; - for (auto arg : arguments) { + for (const auto& arg : arguments) { for (auto& output : arg->outputs()) { outputs.push_back(output); } @@ -174,7 +174,7 @@ ov::descriptor::Input& ov::Node::get_input_descriptor(size_t position) { ov::descriptor::Output& ov::Node::get_output_descriptor(size_t position) { while (m_outputs.size() <= position) { size_t i = m_outputs.size(); - auto tensor_descriptor = make_shared(element::dynamic, PartialShape::dynamic(), this, i); + auto tensor_descriptor = make_shared(element::dynamic, Shape::dynamic(), this, i); m_outputs.emplace_back(this, i, tensor_descriptor); } return m_outputs[position]; @@ -221,7 +221,7 @@ void ov::Node::set_input_is_relevant_to_value(size_t i, bool relevant) { m_inputs[i].m_is_relevant_to_value = relevant; } -void ov::Node::set_output_type(size_t i, const element::Type& element_type, const PartialShape& pshape) { +void ov::Node::set_output_type(size_t i, const element::Type& element_type, const Shape& pshape) { get_output_descriptor(i).get_tensor_ptr()->set_tensor_type(element_type, pshape); } @@ -282,7 +282,7 @@ shared_ptr ov::Node::add_provenance_group_members_above(const OutputVe base_set.insert(node); } vector todo; - for (auto value : input_values()) { + for (const auto& value : input_values()) { todo.push_back(value.get_node()); } while (!todo.empty()) { @@ -292,7 +292,7 @@ shared_ptr ov::Node::add_provenance_group_members_above(const OutputVe continue; } add_provenance_group_member(node->shared_from_this()); - for (auto value : node->input_values()) { + for (const auto& value : node->input_values()) { if (m_provenance_group.count(value.get_node_shared_ptr()) == 0) { todo.push_back(value.get_node()); } @@ -315,7 +315,7 @@ void ov::Node::add_provenance_tags_above(const OutputVector& base, const std::un continue; } node->add_provenance_tags(tag_set); - for (auto value : node->input_values()) { + for (const auto& value : node->input_values()) { todo.push_back(value.get_node()); } base_set.insert(node); @@ -328,7 +328,7 @@ const std::unordered_set& ov::Node::get_provenance_tags() const { void ov::Node::add_provenance_tag(const std::string& tag) { m_provenance_tags.insert(tag); - for (auto node : m_provenance_group) { + for (const auto& node : m_provenance_group) { node->add_provenance_tag(tag); } } @@ -497,17 +497,17 @@ const ov::element::Type& ov::Node::get_element_type() const { return get_output_element_type(0); } -const ngraph::Shape& ov::Node::get_output_shape(size_t i) const { +const ov::StaticShape& ov::Node::get_output_shape(size_t i) const { NGRAPH_CHECK(i < m_outputs.size(), "index '", i, "' out of range in get_output_shape(size_t i)"); return m_outputs[i].get_shape(); } -const ov::PartialShape& ov::Node::get_output_partial_shape(size_t i) const { +const ov::Shape& ov::Node::get_output_partial_shape(size_t i) const { NGRAPH_CHECK(i < m_outputs.size(), "index '", i, "' out of range in get_output_partial_shape(size_t i)"); return m_outputs[i].get_partial_shape(); } -const ngraph::Shape& ov::Node::get_shape() const { +const ov::StaticShape& ov::Node::get_shape() const { NODE_VALIDATION_CHECK(this, get_output_size() == 1, "get_shape() must be called on a node with exactly one output"); return get_output_shape(0); } @@ -542,12 +542,12 @@ const ov::element::Type& ov::Node::get_input_element_type(size_t i) const { return m_inputs[i].get_element_type(); } -const ngraph::Shape& ov::Node::get_input_shape(size_t i) const { +const ov::StaticShape& ov::Node::get_input_shape(size_t i) const { NGRAPH_CHECK(i < m_inputs.size(), "index '", i, "' out of range in get_input_shape(size_t i)"); return m_inputs[i].get_shape(); } -const ov::PartialShape& ov::Node::get_input_partial_shape(size_t i) const { +const ov::Shape& ov::Node::get_input_partial_shape(size_t i) const { NGRAPH_CHECK(i < m_inputs.size(), "index '", i, "' out of range in get_input_partial_shape(size_t i)"); return m_inputs[i].get_partial_shape(); } @@ -579,7 +579,7 @@ bool ov::Node::has_same_type(std::shared_ptr node) const { ov::NodeVector ov::Node::get_users(bool check_is_used) const { NodeVector result; - for (auto output : outputs()) { + for (const auto& output : outputs()) { for (auto input : output.get_target_inputs()) { Node* input_node = input.get_node(); if (!check_is_used || ngraph::is_used(input_node)) { @@ -610,7 +610,7 @@ const ov::NodeVector& ov::check_single_output_args(const NodeVector& args) { ov::OutputVector ov::as_output_vector(const NodeVector& args) { OutputVector output_vector; - for (auto arg : args) { + for (const auto& arg : args) { output_vector.push_back(arg); } return output_vector; @@ -681,7 +681,7 @@ ov::Input ov::Node::input(size_t input_index) { throw out_of_range("node input index is out of range"); } - return Input(this, input_index); + return {this, input_index}; } ov::Output ov::Node::input_value(size_t input_index) const { @@ -693,7 +693,7 @@ ov::Input ov::Node::input(size_t input_index) const { throw out_of_range("node input index is out of range"); } - return Input(this, input_index); + return {this, input_index}; } ov::Output ov::Node::output(size_t output_index) { diff --git a/ngraph/core/src/node_input.cpp b/ngraph/core/src/node_input.cpp index 5c6a45ab837ea6..38a38b216476c1 100644 --- a/ngraph/core/src/node_input.cpp +++ b/ngraph/core/src/node_input.cpp @@ -19,10 +19,10 @@ const element::Type& Input::get_element_type() const { return m_node->get_input_element_type(m_index); } -const ngraph::Shape& Input::get_shape() const { +const StaticShape& Input::get_shape() const { return m_node->get_input_shape(m_index); } -const PartialShape& Input::get_partial_shape() const { +const Shape& Input::get_partial_shape() const { return m_node->get_input_partial_shape(m_index); } @@ -95,10 +95,10 @@ size_t Input::get_index() const { const element::Type& Input::get_element_type() const { return m_node->get_input_element_type(m_index); } -const ngraph::Shape& Input::get_shape() const { +const StaticShape& Input::get_shape() const { return m_node->get_input_shape(m_index); } -const PartialShape& Input::get_partial_shape() const { +const Shape& Input::get_partial_shape() const { return m_node->get_input_partial_shape(m_index); } diff --git a/ngraph/core/src/node_output.cpp b/ngraph/core/src/node_output.cpp index fcd2285956e35a..34f8ca4a7907d1 100644 --- a/ngraph/core/src/node_output.cpp +++ b/ngraph/core/src/node_output.cpp @@ -39,10 +39,10 @@ std::shared_ptr Output::get_tensor_ptr() const { const element::Type& Output::get_element_type() const { return m_node->get_output_element_type(m_index); } -const ngraph::Shape& Output::get_shape() const { +const StaticShape& Output::get_shape() const { return m_node->get_output_shape(m_index); } -const PartialShape& Output::get_partial_shape() const { +const Shape& Output::get_partial_shape() const { return m_node->get_output_partial_shape(m_index); } @@ -128,10 +128,10 @@ std::shared_ptr Output::get_tensor_ptr() const { const element::Type& Output::get_element_type() const { return m_node->get_output_element_type(m_index); } -const ngraph::Shape& Output::get_shape() const { +const StaticShape& Output::get_shape() const { return m_node->get_output_shape(m_index); } -const PartialShape& Output::get_partial_shape() const { +const Shape& Output::get_partial_shape() const { return m_node->get_output_partial_shape(m_index); } diff --git a/ngraph/core/src/op/adaptive_avg_pool.cpp b/ngraph/core/src/op/adaptive_avg_pool.cpp index de8c00f5d012b0..69688cc109701f 100644 --- a/ngraph/core/src/op/adaptive_avg_pool.cpp +++ b/ngraph/core/src/op/adaptive_avg_pool.cpp @@ -27,7 +27,7 @@ bool op::v8::AdaptiveAvgPool::visit_attributes(AttributeVisitor& visitor) { void op::v8::AdaptiveAvgPool::validate_and_infer_types() { NGRAPH_OP_SCOPE(v8_AdaptiveAvgPool_validate_and_infer_types); - const PartialShape& data_shape = get_input_partial_shape(0); + const ov::Shape& data_shape = get_input_partial_shape(0); NODE_VALIDATION_CHECK( this, @@ -35,7 +35,7 @@ void op::v8::AdaptiveAvgPool::validate_and_infer_types() { "Expected a 3D, 4D or 5D tensor for the input. Got: ", data_shape); - auto output_shape = PartialShape::dynamic(data_shape.rank()); + auto output_shape = ov::Shape::dynamic(data_shape.rank()); if (data_shape.rank().is_static()) { if (data_shape[0].is_static()) { output_shape[0] = data_shape[0]; // batch size diff --git a/ngraph/core/src/op/adaptive_max_pool.cpp b/ngraph/core/src/op/adaptive_max_pool.cpp index 186e08799f143d..70a6e4a2acb5b9 100644 --- a/ngraph/core/src/op/adaptive_max_pool.cpp +++ b/ngraph/core/src/op/adaptive_max_pool.cpp @@ -35,7 +35,7 @@ void op::v8::AdaptiveMaxPool::validate_and_infer_types() { m_index_element_type == element::i64 || m_index_element_type == element::i32, "Index element type must be i32 or i64"); - const PartialShape& data_shape = get_input_partial_shape(0); + const ov::Shape& data_shape = get_input_partial_shape(0); NODE_VALIDATION_CHECK( this, @@ -43,7 +43,7 @@ void op::v8::AdaptiveMaxPool::validate_and_infer_types() { "Expected a 3D, 4D or 5D tensor for the input. Got: ", data_shape); - auto output_shape = PartialShape::dynamic(data_shape.rank()); + auto output_shape = ov::Shape::dynamic(data_shape.rank()); if (data_shape.rank().is_static()) { if (data_shape[0].is_static()) { output_shape[0] = data_shape[0]; // batch size diff --git a/ngraph/core/src/op/assign.cpp b/ngraph/core/src/op/assign.cpp index 2b7dc2c8741c98..c56d07cafc748f 100644 --- a/ngraph/core/src/op/assign.cpp +++ b/ngraph/core/src/op/assign.cpp @@ -53,7 +53,7 @@ void op::v3::Assign::validate_and_infer_types() { set_output_type(0, arg_t, output_shape); } else { - set_output_type(0, arg_t, PartialShape::dynamic()); + set_output_type(0, arg_t, ov::Shape::dynamic()); } } diff --git a/ngraph/core/src/op/avg_pool.cpp b/ngraph/core/src/op/avg_pool.cpp index b6cef6ad773fee..3bc5bb54dcb4fb 100644 --- a/ngraph/core/src/op/avg_pool.cpp +++ b/ngraph/core/src/op/avg_pool.cpp @@ -10,19 +10,18 @@ #include "ngraph/validation_util.hpp" using namespace std; -using namespace ngraph; // *** AvgPool OP SET 1 *** -OPENVINO_RTTI_DEFINITION(op::v1::AvgPool, "AvgPool", 1); - -op::v1::AvgPool::AvgPool(const Output& arg, - const Strides& strides, - const Shape& pads_begin, - const Shape& pads_end, - const Shape& kernel, - bool exclude_pad, - op::RoundingType rounding_type, - const PadType& auto_pad) +OPENVINO_RTTI_DEFINITION(ov::op::v1::AvgPool, "AvgPool", 1); + +ov::op::v1::AvgPool::AvgPool(const Output& arg, + const Strides& strides, + const StaticShape& pads_begin, + const StaticShape& pads_end, + const StaticShape& kernel, + bool exclude_pad, + op::RoundingType rounding_type, + const PadType& auto_pad) : Op({arg}), m_kernel(kernel), m_strides(strides), @@ -34,7 +33,7 @@ op::v1::AvgPool::AvgPool(const Output& arg, constructor_validate_and_infer_types(); } -bool op::v1::AvgPool::visit_attributes(AttributeVisitor& visitor) { +bool ov::op::v1::AvgPool::visit_attributes(AttributeVisitor& visitor) { NGRAPH_OP_SCOPE(v1_AvgPool_visit_attributes); visitor.on_attribute("kernel", m_kernel); visitor.on_attribute("strides", m_strides); @@ -46,21 +45,21 @@ bool op::v1::AvgPool::visit_attributes(AttributeVisitor& visitor) { return true; } -void op::v1::AvgPool::validate_and_infer_types() { +void ov::op::v1::AvgPool::validate_and_infer_types() { NGRAPH_OP_SCOPE(v1_AvgPool_validate_and_infer_types); if (0 == m_strides.size()) { m_strides = Strides(m_kernel.size(), 1); } if (0 == m_pads_begin.size()) { - m_pads_begin = Shape(m_kernel.size(), 0); + m_pads_begin = StaticShape(m_kernel.size(), 0); } if (0 == m_pads_end.size()) { - m_pads_end = Shape(m_kernel.size(), 0); + m_pads_end = StaticShape(m_kernel.size(), 0); } - const PartialShape& arg_shape = get_input_partial_shape(0); + const ov::Shape& arg_shape = get_input_partial_shape(0); NODE_VALIDATION_CHECK( this, @@ -88,7 +87,7 @@ void op::v1::AvgPool::validate_and_infer_types() { m_kernel.size()); } - auto output_shape = PartialShape::dynamic(); + auto output_shape = ov::Shape::dynamic(); if (arg_shape.rank().is_static()) { output_shape = std::vector(arg_shape.rank().get_max_length(), Dimension::dynamic()); if (arg_shape[0].is_static()) { @@ -102,96 +101,96 @@ void op::v1::AvgPool::validate_and_infer_types() { if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER) { CoordinateDiff pads_end; CoordinateDiff pads_begin; - update_auto_padding_succeed = try_apply_auto_padding(arg_shape, - m_kernel, - m_strides, - Strides(m_kernel.size(), 1), // No dilation - m_auto_pad, - pads_end, - pads_begin); - m_pads_end = Shape(pads_end.begin(), pads_end.end()); - m_pads_begin = Shape(pads_begin.begin(), pads_begin.end()); + update_auto_padding_succeed = ngraph::try_apply_auto_padding(arg_shape, + m_kernel, + m_strides, + Strides(m_kernel.size(), 1), // No dilation + m_auto_pad, + pads_end, + pads_begin); + m_pads_end = StaticShape(pads_end.begin(), pads_end.end()); + m_pads_begin = StaticShape(pads_begin.begin(), pads_begin.end()); } if (m_auto_pad == PadType::VALID) { - m_pads_end = Shape(m_pads_end.size(), 0); - m_pads_begin = Shape(m_pads_begin.size(), 0); + m_pads_end = StaticShape(m_pads_end.size(), 0); + m_pads_begin = StaticShape(m_pads_begin.size(), 0); } // infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for - // now still take Shape (no negative padding). + // now still take StaticShape (no negative padding). CoordinateDiff pads_begin(m_pads_begin.begin(), m_pads_begin.end()); CoordinateDiff pads_end(m_pads_end.begin(), m_pads_end.end()); set_output_type(0, get_input_element_type(0), update_auto_padding_succeed - ? infer_batched_pooling_forward(this, - arg_shape, - pads_begin, - pads_end, - m_kernel, - m_strides, - !m_exclude_pad, - m_rounding_type == op::RoundingType::CEIL, - Strides{}) // no dilation of the window + ? ngraph::infer_batched_pooling_forward(this, + arg_shape, + pads_begin, + pads_end, + m_kernel, + m_strides, + !m_exclude_pad, + m_rounding_type == op::RoundingType::CEIL, + Strides{}) // no dilation of the window : output_shape); } -const Shape& op::v1::AvgPool::get_kernel() const { +const ov::StaticShape& ov::op::v1::AvgPool::get_kernel() const { return m_kernel; } -void op::v1::AvgPool::set_kernel(const Shape& kernel) { +void ov::op::v1::AvgPool::set_kernel(const StaticShape& kernel) { m_kernel = kernel; } -const Strides& op::v1::AvgPool::get_strides() const { +const ov::Strides& ov::op::v1::AvgPool::get_strides() const { return m_strides; } -void op::v1::AvgPool::set_strides(const Strides& strides) { +void ov::op::v1::AvgPool::set_strides(const Strides& strides) { m_strides = strides; } -const Shape& op::v1::AvgPool::get_pads_begin() const { +const ov::StaticShape& ov::op::v1::AvgPool::get_pads_begin() const { return m_pads_begin; } -void op::v1::AvgPool::set_pads_begin(const Shape& pads_begin) { +void ov::op::v1::AvgPool::set_pads_begin(const StaticShape& pads_begin) { m_pads_begin = pads_begin; } -const Shape& op::v1::AvgPool::get_pads_end() const { +const ov::StaticShape& ov::op::v1::AvgPool::get_pads_end() const { return m_pads_end; } -void op::v1::AvgPool::set_pads_end(const Shape& pads_end) { +void ov::op::v1::AvgPool::set_pads_end(const StaticShape& pads_end) { m_pads_end = pads_end; } -bool op::v1::AvgPool::get_exclude_pad() const { +bool ov::op::v1::AvgPool::get_exclude_pad() const { return m_exclude_pad; } -void op::v1::AvgPool::set_exclude_pad(bool exclude_pad) { +void ov::op::v1::AvgPool::set_exclude_pad(bool exclude_pad) { m_exclude_pad = exclude_pad; } -const op::PadType& op::v1::AvgPool::get_auto_pad() const { +const ov::op::PadType& ov::op::v1::AvgPool::get_auto_pad() const { return m_auto_pad; } -void op::v1::AvgPool::set_auto_pad(const op::PadType& auto_pad) { +void ov::op::v1::AvgPool::set_auto_pad(const op::PadType& auto_pad) { m_auto_pad = auto_pad; } -op::RoundingType op::v1::AvgPool::get_rounding_type() const { +ov::op::RoundingType ov::op::v1::AvgPool::get_rounding_type() const { return m_rounding_type; } -void op::v1::AvgPool::set_rounding_type(op::RoundingType rounding_type) { +void ov::op::v1::AvgPool::set_rounding_type(op::RoundingType rounding_type) { m_rounding_type = rounding_type; } -shared_ptr op::v1::AvgPool::clone_with_new_inputs(const OutputVector& new_args) const { +shared_ptr ov::op::v1::AvgPool::clone_with_new_inputs(const OutputVector& new_args) const { NGRAPH_OP_SCOPE(v1_AvgPool_clone_with_new_inputs); check_new_args_count(this, new_args); return make_shared(new_args.at(0), @@ -204,6 +203,6 @@ shared_ptr op::v1::AvgPool::clone_with_new_inputs(const OutputVector& new_ m_auto_pad); } -shared_ptr op::v1::AvgPool::get_default_value() const { +shared_ptr ov::op::v1::AvgPool::get_default_value() const { return op::v0::Constant::create(get_element_type(), get_shape(), {0}); } diff --git a/ngraph/core/src/op/batch_norm.cpp b/ngraph/core/src/op/batch_norm.cpp index c053c2d704e5d3..5c1799d37a4a87 100644 --- a/ngraph/core/src/op/batch_norm.cpp +++ b/ngraph/core/src/op/batch_norm.cpp @@ -35,8 +35,8 @@ bool op::v0::BatchNormInference::visit_attributes(AttributeVisitor& visitor) { void op::v0::BatchNormInference::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_BatchNormInference_validate_and_infer_types); element::Type result_et; - PartialShape result_batch_shape; - PartialShape result_channel_shape; // unused here + ov::Shape result_batch_shape; + ov::Shape result_channel_shape; // unused here NODE_VALIDATION_CHECK(this, m_epsilon >= 0, @@ -93,8 +93,8 @@ bool op::v5::BatchNormInference::visit_attributes(AttributeVisitor& visitor) { void op::v5::BatchNormInference::validate_and_infer_types() { NGRAPH_OP_SCOPE(v5_BatchNormInference_validate_and_infer_types); element::Type result_et; - PartialShape result_batch_shape; - PartialShape result_channel_shape; // unused here + ov::Shape result_batch_shape; + ov::Shape result_channel_shape; // unused here NODE_VALIDATION_CHECK(this, m_epsilon >= 0, diff --git a/ngraph/core/src/op/batch_to_space.cpp b/ngraph/core/src/op/batch_to_space.cpp index 8bbac08abbfb0c..63e1429c962540 100644 --- a/ngraph/core/src/op/batch_to_space.cpp +++ b/ngraph/core/src/op/batch_to_space.cpp @@ -57,16 +57,16 @@ void op::v1::BatchToSpace::validate_and_infer_types() { "block_shape and crops inputs must have integer element type. Got: ", inputs_integer_et); - const PartialShape& data_pshape = get_input_partial_shape(0); - const PartialShape& block_shape_ps = get_input_partial_shape(1); - const PartialShape& crops_begin_ps = get_input_partial_shape(2); - const PartialShape& crops_end_ps = get_input_partial_shape(3); + const ov::Shape& data_pshape = get_input_partial_shape(0); + const ov::Shape& block_shape_ps = get_input_partial_shape(1); + const ov::Shape& crops_begin_ps = get_input_partial_shape(2); + const ov::Shape& crops_end_ps = get_input_partial_shape(3); - PartialShape inputs_same_ps{PartialShape::dynamic()}; + ov::Shape inputs_same_ps{ov::Shape::dynamic()}; NODE_VALIDATION_CHECK(this, - PartialShape::merge_into(inputs_same_ps, crops_begin_ps) && - PartialShape::merge_into(inputs_same_ps, crops_end_ps) && - PartialShape::merge_into(inputs_same_ps, block_shape_ps), + ov::Shape::merge_into(inputs_same_ps, crops_begin_ps) && + ov::Shape::merge_into(inputs_same_ps, crops_end_ps) && + ov::Shape::merge_into(inputs_same_ps, block_shape_ps), "block_shape, crops_begin and crops_end inputs must have the same shape. Got: ", block_shape_ps, ", ", @@ -103,7 +103,7 @@ void op::v1::BatchToSpace::validate_and_infer_types() { const auto crops_end_const = get_constant_from_source(input_value(3)); if (block_const && crops_begin_const && crops_end_const && data_pshape.is_static()) { - const Shape& data_sshape = data_pshape.to_shape(); + const ov::StaticShape& data_sshape = data_pshape.to_shape(); auto block_val = block_const->cast_vector(); auto crops_begin_val = crops_begin_const->cast_vector(); @@ -143,7 +143,7 @@ void op::v1::BatchToSpace::validate_and_infer_types() { "block_shape[i] * input_shape[i]"); } - Shape output_sshape = {static_cast(data_sshape[0] / block_prod)}; + ov::StaticShape output_sshape = {static_cast(data_sshape[0] / block_prod)}; for (size_t idx = 1; idx < data_sshape.size(); ++idx) { output_sshape.push_back( static_cast(data_sshape[idx] * block_val[idx] - crops_begin_val[idx] - crops_end_val[idx])); @@ -152,7 +152,7 @@ void op::v1::BatchToSpace::validate_and_infer_types() { set_output_size(1); set_output_type(0, data_et, output_sshape); } else { - set_output_type(0, data_et, PartialShape::dynamic(data_rank)); + set_output_type(0, data_et, ov::Shape::dynamic(data_rank)); } } @@ -219,12 +219,12 @@ bool batch_to_space_evaluate(const HostTensorVector& outputs, const HostTensorVe "Invalid crops values (out of bounds) with respect to the shape of data input"); } - Shape dispersed_shape(1); + ov::StaticShape dispersed_shape(1); dispersed_shape.insert(dispersed_shape.end(), data_shape.begin(), data_shape.end()); std::vector axes_order(block_values_size + 1); std::vector plain_axes_order(block_values_size + 1); std::iota(plain_axes_order.begin(), plain_axes_order.end(), 0); - Shape squeezed_shape(data_shape.begin(), data_shape.end()); + ov::StaticShape squeezed_shape(data_shape.begin(), data_shape.end()); if (squeezed_shape.size() > block_values_size) { return false; } @@ -232,7 +232,7 @@ bool batch_to_space_evaluate(const HostTensorVector& outputs, const HostTensorVe auto* flat_data = data->get_data_ptr(); std::vector dispersed_data(shape_size(data_shape) * elem_size); - Shape post_transpose_shape(axes_order.size()); + ov::StaticShape post_transpose_shape(axes_order.size()); std::vector post_transpose_data(shape_size(data_shape) * elem_size); for (size_t block_idx = 1; block_idx < block_values_size; ++block_idx) { diff --git a/ngraph/core/src/op/binary_convolution.cpp b/ngraph/core/src/op/binary_convolution.cpp index a3e420a6454b8f..b5831e2e2a164f 100644 --- a/ngraph/core/src/op/binary_convolution.cpp +++ b/ngraph/core/src/op/binary_convolution.cpp @@ -58,9 +58,9 @@ ov::op::v1::BinaryConvolution::BinaryConvolution(const Output& data, void ov::op::v1::BinaryConvolution::validate_and_infer_types() { NGRAPH_OP_SCOPE(v1_BinaryConvolution_validate_and_infer_types); - const PartialShape& data_batch_pshape = get_input_partial_shape(0); + const ov::Shape& data_batch_pshape = get_input_partial_shape(0); element::Type data_batch_et = get_input_element_type(0); - const PartialShape& filters_pshape = get_input_partial_shape(1); + const ov::Shape& filters_pshape = get_input_partial_shape(1); NODE_VALIDATION_CHECK(this, data_batch_et.is_real() || data_batch_et.is_integral_number(), @@ -78,15 +78,15 @@ void ov::op::v1::BinaryConvolution::validate_and_infer_types() { " and ", filters_pshape); - PartialShape result_shape = ngraph::validate_and_infer_convolution_forward_output_shape(this, - result_ps_rank, - data_batch_pshape, - filters_pshape, - m_auto_pad, - m_strides, - m_dilations, - m_pads_begin, - m_pads_end); + ov::Shape result_shape = ngraph::validate_and_infer_convolution_forward_output_shape(this, + result_ps_rank, + data_batch_pshape, + filters_pshape, + m_auto_pad, + m_strides, + m_dilations, + m_pads_begin, + m_pads_end); set_output_type(0, data_batch_et, result_shape); } diff --git a/ngraph/core/src/op/broadcast.cpp b/ngraph/core/src/op/broadcast.cpp index a7ee65bc51d206..39bc23f1683937 100644 --- a/ngraph/core/src/op/broadcast.cpp +++ b/ngraph/core/src/op/broadcast.cpp @@ -35,7 +35,8 @@ op::v3::Broadcast::Broadcast(const Output& arg, } namespace { -std::pair get_broadcast_axes_bidirectional(const Shape& arg_shape, const Shape& result_shape) { +std::pair get_broadcast_axes_bidirectional(const ov::StaticShape& arg_shape, + const ov::StaticShape& result_shape) { AxisSet broadcast_axes; bool axes_known = false; const auto start_axis = result_shape.size() - arg_shape.size(); @@ -67,12 +68,14 @@ std::pair op::v3::Broadcast::get_broadcast_axes() const { } namespace { -PartialShape get_result_shape_bidirectional(const Node* this_ptr, const PartialShape& arg_shape, Shape& target_shape) { +ov::Shape get_result_shape_bidirectional(const Node* this_ptr, + const ov::Shape& arg_shape, + ov::StaticShape& target_shape) { if (arg_shape.rank().is_dynamic()) { - return PartialShape::dynamic(); + return ov::Shape::dynamic(); } auto arg_shape_vec = static_cast>(arg_shape); - PartialShape result_shape; + ov::Shape result_shape; // Add left padding to shorter target or argument shape const auto target_padded_rank = std::max(arg_shape_vec.size(), target_shape.size()); while (arg_shape_vec.size() < target_padded_rank) { @@ -109,8 +112,8 @@ PartialShape get_result_shape_bidirectional(const Node* this_ptr, const PartialS bool op::v3::Broadcast::broadcast_evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { if (get_broadcast_spec().m_type == op::BroadcastType::BIDIRECTIONAL) { auto arg_shape = inputs[0]->get_shape(); - Shape target_shape = op::util::BroadcastBase::get_target_shape(inputs[1]); - PartialShape result_shape = get_result_shape_bidirectional(this, PartialShape{arg_shape}, target_shape); + ov::StaticShape target_shape = op::util::BroadcastBase::get_target_shape(inputs[1]); + ov::Shape result_shape = get_result_shape_bidirectional(this, ov::Shape{arg_shape}, target_shape); auto pair_broadcast_axes = get_broadcast_axes_bidirectional(arg_shape, result_shape.to_shape()); return op::util::BroadcastBase::evaluate_broadcast(inputs[0], outputs[0], @@ -219,7 +222,7 @@ op::v1::Broadcast::Broadcast(const Output& arg, const AutoBroadcastSpec& broadcast_spec) : util::BroadcastBase{arg, target_shape, - op::v0::Constant::create(element::u8, Shape{}, {0})->output(0), + op::v0::Constant::create(element::u8, ov::StaticShape{}, {0})->output(0), to_broadcast_mode(broadcast_spec)}, m_broadcast_spec{broadcast_spec} { constructor_validate_and_infer_types(); @@ -235,7 +238,7 @@ void op::v1::Broadcast::validate_and_infer_types() { // Mocking axes_mapping input for cases that don't require it if (m_broadcast_spec.m_type == AutoBroadcastType::NUMPY && get_input_size() < 3) { - auto output = op::v0::Constant::create(element::u8, Shape{}, {0})->output(0); + auto output = op::v0::Constant::create(element::u8, ov::StaticShape{}, {0})->output(0); set_argument(2, output); } diff --git a/ngraph/core/src/op/bucketize.cpp b/ngraph/core/src/op/bucketize.cpp index 0ac5682041cbf1..96b6cbb4c771c8 100644 --- a/ngraph/core/src/op/bucketize.cpp +++ b/ngraph/core/src/op/bucketize.cpp @@ -30,8 +30,8 @@ bool op::v3::Bucketize::visit_attributes(AttributeVisitor& visitor) { void op::v3::Bucketize::validate_and_infer_types() { NGRAPH_OP_SCOPE(v3_Bucketize_validate_and_infer_types); - const PartialShape& data_pshape = get_input_partial_shape(0); - const PartialShape& buckets_pshape = get_input_partial_shape(1); + const ov::Shape& data_pshape = get_input_partial_shape(0); + const ov::Shape& buckets_pshape = get_input_partial_shape(1); const auto data_et = get_input_element_type(0); const auto buckets_et = get_input_element_type(1); diff --git a/ngraph/core/src/op/concat.cpp b/ngraph/core/src/op/concat.cpp index 0e4ba6f6126875..0cc5a56cdc744d 100644 --- a/ngraph/core/src/op/concat.cpp +++ b/ngraph/core/src/op/concat.cpp @@ -32,7 +32,7 @@ void op::Concat::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_Concat_validate_and_infer_types); NODE_VALIDATION_CHECK(this, get_input_size() >= 1, "At least one argument required."); - PartialShape inputs_shape_scheme{PartialShape::dynamic()}; + ov::Shape inputs_shape_scheme{ov::Shape::dynamic()}; element::Type inputs_et{element::dynamic}; Dimension concatenation_axis_output_dim{0}; @@ -40,7 +40,7 @@ void op::Concat::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, element::Type::merge(inputs_et, inputs_et, get_input_element_type(i)), "Argument element types are inconsistent."); - PartialShape this_input_shape = get_input_partial_shape(i); + ov::Shape this_input_shape = get_input_partial_shape(i); Dimension this_input_rank = this_input_shape.rank(); if (this_input_rank.is_static()) { if (get_concatenation_axis() < 0) { @@ -66,7 +66,7 @@ void op::Concat::validate_and_infer_types() { this_input_shape[concat_axis] = Dimension::dynamic(); NODE_VALIDATION_CHECK(this, - PartialShape::merge_into(inputs_shape_scheme, this_input_shape), + ov::Shape::merge_into(inputs_shape_scheme, this_input_shape), "Argument shapes are inconsistent; they must have the same rank, and must " "have ", "equal dimension everywhere except on the concatenation axis (axis ", @@ -76,13 +76,13 @@ void op::Concat::validate_and_infer_types() { concatenation_axis_output_dim += Dimension::dynamic(); } } - PartialShape concatenated_shape = inputs_shape_scheme; + ov::Shape concatenated_shape = inputs_shape_scheme; if (concatenated_shape.rank().is_static()) { concatenated_shape[get_concatenation_axis()] = concatenation_axis_output_dim; set_output_type(0, inputs_et, concatenated_shape); } else { - set_output_type(0, inputs_et, PartialShape::dynamic(concatenation_axis_output_dim)); + set_output_type(0, inputs_et, ov::Shape::dynamic(concatenation_axis_output_dim)); } } @@ -95,8 +95,8 @@ shared_ptr op::Concat::clone_with_new_inputs(const OutputVector& new_args) namespace { bool evaluate_concat(const HostTensorVector& args, const HostTensorPtr& out, int64_t concatenation_axis) { std::vector arg_bufs; - std::vector arg_shapes; - Shape out_shape(args[0]->get_shape()); + std::vector arg_shapes; + ov::StaticShape out_shape(args[0]->get_shape()); out_shape[concatenation_axis] = 0; for (auto& input : args) { arg_bufs.push_back(input->get_data_ptr()); diff --git a/ngraph/core/src/op/constant.cpp b/ngraph/core/src/op/constant.cpp index 4374b8691a99d1..2280a037d05a82 100644 --- a/ngraph/core/src/op/constant.cpp +++ b/ngraph/core/src/op/constant.cpp @@ -54,7 +54,7 @@ ov::op::v0::Constant::Constant(const shared_ptr& tensor } ov::op::v0::Constant::Constant(const element::Type& type, - const ngraph::Shape& shape, + const ov::StaticShape& shape, const std::vector& values) : Constant(type, shape) { NGRAPH_SUPPRESS_DEPRECATED_START @@ -187,7 +187,7 @@ ov::op::v0::Constant::Constant(const element::Type& type, NGRAPH_SUPPRESS_DEPRECATED_END } -ov::op::v0::Constant::Constant(const element::Type& type, const ngraph::Shape& shape) +ov::op::v0::Constant::Constant(const element::Type& type, const ov::StaticShape& shape) : m_element_type(type), m_shape(shape) { allocate_buffer(); @@ -199,7 +199,7 @@ void ov::op::v0::Constant::allocate_buffer() { std::memset(m_data->get_ptr(), 0, m_data->size()); } -ov::op::v0::Constant::Constant(const element::Type& type, const ngraph::Shape& shape, const void* data) +ov::op::v0::Constant::Constant(const element::Type& type, const ov::StaticShape& shape, const void* data) : Constant(type, shape) { size_t size = ceil(shape_size(m_shape) * m_element_type.bitwidth() / 8.f); std::memcpy(get_data_ptr_nc(), data, size); @@ -214,9 +214,9 @@ ov::op::v0::Constant::Constant(const Constant& other) { constructor_validate_and_infer_types(); } -ov::op::v0::Constant::Constant(const Constant& other, const ngraph::Shape& new_shape) { +ov::op::v0::Constant::Constant(const Constant& other, const ov::StaticShape& new_shape) { NGRAPH_CHECK(shape_size(other.m_shape) == shape_size(new_shape), - "Shape size " + std::to_string(shape_size(new_shape)) + " is not equal to " + + "ov::StaticShape size " + std::to_string(shape_size(new_shape)) + " is not equal to " + std::to_string(shape_size(other.m_shape))); m_element_type = other.m_element_type; m_shape = new_shape; @@ -391,10 +391,10 @@ vector ov::op::v0::Constant::get_value_strings() const { return rc; } -ngraph::Shape ov::op::v0::Constant::get_shape_val() const { +ov::StaticShape ov::op::v0::Constant::get_shape_val() const { NGRAPH_CHECK(m_element_type.is_integral_number()); std::vector out_shape = cast_vector(); - ngraph::Shape output_shape(shape_size(m_shape)); + ov::StaticShape output_shape(shape_size(m_shape)); std::transform(out_shape.begin(), out_shape.end(), output_shape.begin(), [&](const int64_t& v) { return (v > 0) ? v : 0; }); @@ -454,7 +454,7 @@ ov::AxisSet ov::op::v0::Constant::get_axis_set_val() const { return output_axis_set; } -void ov::op::v0::Constant::set_data_shape(const ngraph::Shape& shape) { +void ov::op::v0::Constant::set_data_shape(const ov::StaticShape& shape) { NGRAPH_CHECK(shape_size(shape) == shape_size(m_shape)); m_shape = shape; } @@ -528,7 +528,7 @@ bool ov::op::v0::Constant::are_all_data_elements_bitwise_identical() const { bool ov::op::v0::Constant::visit_attributes(AttributeVisitor& visitor) { NGRAPH_OP_SCOPE(v0_Constant_visit_attributes); - ngraph::Shape prev_shape = m_shape; + ov::StaticShape prev_shape = m_shape; element::Type prev_type = m_element_type; visitor.on_attribute("element_type", m_element_type); visitor.on_attribute("shape", m_shape); diff --git a/ngraph/core/src/op/convolution.cpp b/ngraph/core/src/op/convolution.cpp index 2c8dbb8c4db613..7b9729897a645f 100644 --- a/ngraph/core/src/op/convolution.cpp +++ b/ngraph/core/src/op/convolution.cpp @@ -45,9 +45,9 @@ bool op::v1::Convolution::visit_attributes(AttributeVisitor& visitor) { void op::v1::Convolution::validate_and_infer_types() { NGRAPH_OP_SCOPE(v1_Convolution_validate_and_infer_types); - const PartialShape& data_batch_pshape = get_input_partial_shape(0); + const ov::Shape& data_batch_pshape = get_input_partial_shape(0); element::Type data_batch_et = get_input_element_type(0); - const PartialShape& filters_pshape = get_input_partial_shape(1); + const ov::Shape& filters_pshape = get_input_partial_shape(1); element::Type filters_et = get_input_element_type(1); element::Type result_et; @@ -72,15 +72,15 @@ void op::v1::Convolution::validate_and_infer_types() { " and ", filters_pshape); - PartialShape result_shape = validate_and_infer_convolution_forward_output_shape(this, - result_ps_rank, - data_batch_pshape, - filters_pshape, - m_auto_pad, - m_strides, - m_dilations, - m_pads_begin, - m_pads_end); + ov::Shape result_shape = validate_and_infer_convolution_forward_output_shape(this, + result_ps_rank, + data_batch_pshape, + filters_pshape, + m_auto_pad, + m_strides, + m_dilations, + m_pads_begin, + m_pads_end); set_output_type(0, result_et, result_shape); } @@ -161,31 +161,31 @@ bool op::v1::ConvolutionBackpropData::is_dynamic() const { return is_dynamic; } -const PartialShape op::v1::ConvolutionBackpropData::get_output_shape() const { +const ov::Shape op::v1::ConvolutionBackpropData::get_output_shape() const { auto data_pshape = get_input_partial_shape(0); auto filter_pshape = get_input_partial_shape(1); - PartialShape shape; + ov::Shape shape; bool is_output_shape_present = inputs().size() == 3; if (is_output_shape_present) { if (const auto& const_op = get_constant_from_source(input_value(2))) { - return PartialShape{const_op->get_shape_val()}; + return ov::Shape{const_op->get_shape_val()}; } } if (data_pshape.rank().is_static()) { - shape = PartialShape{vector(data_pshape.rank().get_length() - 2)}; + shape = ov::Shape{vector(data_pshape.rank().get_length() - 2)}; } else if (filter_pshape.rank().is_static()) { - shape = PartialShape{vector(filter_pshape.rank().get_length() - 2)}; + shape = ov::Shape{vector(filter_pshape.rank().get_length() - 2)}; } else { - shape = PartialShape::dynamic(); + shape = ov::Shape::dynamic(); } return shape; } -void op::v1::ConvolutionBackpropData::set_output_shape(const Shape& shape) { +void op::v1::ConvolutionBackpropData::set_output_shape(const ov::StaticShape& shape) { this->input(2).replace_source_output( - op::v0::Constant::create(this->get_input_element_type(2), Shape{shape.size()}, shape)->output(0)); + op::v0::Constant::create(this->get_input_element_type(2), ov::StaticShape{shape.size()}, shape)->output(0)); } void op::v1::ConvolutionBackpropData::infer_conv_backprop_output_spatial_shape( @@ -217,9 +217,9 @@ void op::v1::ConvolutionBackpropData::infer_conv_backprop_output_spatial_shape( void op::v1::ConvolutionBackpropData::validate_and_infer_types() { NGRAPH_OP_SCOPE(v1_ConvolutionBackpropData_validate_and_infer_types); - const PartialShape& data_pshape = get_input_partial_shape(0); + const ov::Shape& data_pshape = get_input_partial_shape(0); element::Type delta_et = get_input_element_type(0); - const PartialShape& filters_pshape = get_input_partial_shape(1); + const ov::Shape& filters_pshape = get_input_partial_shape(1); element::Type filters_et = get_input_element_type(1); element::Type result_et; @@ -260,7 +260,7 @@ void op::v1::ConvolutionBackpropData::validate_and_infer_types() { bool is_output_shape_present = inputs().size() == 3; if (is_output_shape_present) { - const PartialShape& output_shape_pshape = get_input_partial_shape(2); + const ov::Shape& output_shape_pshape = get_input_partial_shape(2); const element::Type output_shape_et = get_input_element_type(2); NODE_VALIDATION_CHECK(this, @@ -277,7 +277,7 @@ void op::v1::ConvolutionBackpropData::validate_and_infer_types() { output_shape_pshape, ")."); } - PartialShape output_spatial_pshape = get_output_shape(); + ov::Shape output_spatial_pshape = get_output_shape(); if (result_ps_rank.is_static()) { const auto num_spatial_dims = result_ps_rank.get_length() - 2; @@ -316,7 +316,7 @@ void op::v1::ConvolutionBackpropData::validate_and_infer_types() { "spatial features."); if (is_output_shape_present && output_spatial_pshape.is_static()) { - Shape output_shape = output_spatial_pshape.to_shape(); + ov::StaticShape output_shape = output_spatial_pshape.to_shape(); NODE_VALIDATION_CHECK(this, static_cast(output_shape.size()) == num_spatial_dims, "Output shape should be specified only and for " @@ -324,23 +324,23 @@ void op::v1::ConvolutionBackpropData::validate_and_infer_types() { } } - PartialShape result_pshape{PartialShape::dynamic()}; + ov::Shape result_pshape{ov::Shape::dynamic()}; // If output shape is provided, ignore current values for padding begin/end // and infer them. if (is_output_shape_present) { if (output_spatial_pshape.rank().is_static()) { if (data_pshape.rank().is_static() && filters_pshape.rank().is_static()) { - const PartialShape data_spatial_shape = [data_pshape]() { + const ov::Shape data_spatial_shape = [data_pshape]() { vector data_dims{data_pshape}; data_dims.erase(data_dims.begin(), data_dims.begin() + 2); // remove {N, C_IN} - return PartialShape{data_dims}; + return ov::Shape{data_dims}; }(); - const PartialShape filters_spatial_shape = [filters_pshape]() { + const ov::Shape filters_spatial_shape = [filters_pshape]() { vector filters_dims{filters_pshape}; filters_dims.erase(filters_dims.begin(), filters_dims.begin() + 2); // remove {C_IN, C_OUT} - return PartialShape{filters_dims}; + return ov::Shape{filters_dims}; }(); // If auto_pad has one of following mode we infer paddings. Otherwise in @@ -366,7 +366,7 @@ void op::v1::ConvolutionBackpropData::validate_and_infer_types() { // N auto batches = data_pshape.rank().is_static() ? data_pshape[0] : Dimension::dynamic(); output_pshape.insert(output_pshape.begin(), batches); - result_pshape = PartialShape{output_pshape}; + result_pshape = ov::Shape{output_pshape}; } set_input_is_relevant_to_shape(2); } @@ -410,7 +410,7 @@ void op::v1::ConvolutionBackpropData::validate_and_infer_types() { // N auto batches = data_pshape.rank().is_static() ? data_pshape[0] : Dimension::dynamic(); output_pshape.insert(output_pshape.begin(), batches); - result_pshape = PartialShape{output_pshape}; + result_pshape = ov::Shape{output_pshape}; } } set_input_is_relevant_to_shape(0); diff --git a/ngraph/core/src/op/ctc_greedy_decoder.cpp b/ngraph/core/src/op/ctc_greedy_decoder.cpp index 63fd3cf88109c0..5f9db48e9970fb 100644 --- a/ngraph/core/src/op/ctc_greedy_decoder.cpp +++ b/ngraph/core/src/op/ctc_greedy_decoder.cpp @@ -27,7 +27,7 @@ void op::CTCGreedyDecoder::validate_and_infer_types() { // output dynamic rank tensor if all inputs are of dynamic rank if (logits_pshape.rank().is_dynamic() && seq_mask_pshape.rank().is_dynamic()) { - set_output_type(0, input_et, PartialShape{Dimension::dynamic(), Dimension::dynamic(), 1, 1}); + set_output_type(0, input_et, ov::Shape{Dimension::dynamic(), Dimension::dynamic(), 1, 1}); } // check ranks of input tensors @@ -71,7 +71,7 @@ void op::CTCGreedyDecoder::validate_and_infer_types() { batch_size = seq_mask_pshape[1]; } } - set_output_type(0, input_et, PartialShape{batch_size, time_size, 1, 1}); + set_output_type(0, input_et, ov::Shape{batch_size, time_size, 1, 1}); } bool op::CTCGreedyDecoder::visit_attributes(AttributeVisitor& visitor) { diff --git a/ngraph/core/src/op/ctc_greedy_decoder_seq_len.cpp b/ngraph/core/src/op/ctc_greedy_decoder_seq_len.cpp index 5d66411b636452..5549640c1e1c01 100644 --- a/ngraph/core/src/op/ctc_greedy_decoder_seq_len.cpp +++ b/ngraph/core/src/op/ctc_greedy_decoder_seq_len.cpp @@ -65,7 +65,7 @@ void op::v6::CTCGreedyDecoderSeqLen::validate_and_infer_types() { const auto& blank_index_partial_shape = get_input_partial_shape(2); if (blank_index_partial_shape.is_static()) { - Shape blank_index_shape = blank_index_partial_shape.to_shape(); + ov::StaticShape blank_index_shape = blank_index_partial_shape.to_shape(); NODE_VALIDATION_CHECK( this, ngraph::is_scalar(blank_index_shape) || (is_vector(blank_index_shape) && (blank_index_shape[0] == 1)), @@ -100,8 +100,8 @@ void op::v6::CTCGreedyDecoderSeqLen::validate_and_infer_types() { batch_size = seq_len_pshape[0] & logits_pshape[0]; } - set_output_type(0, m_classes_index_type, PartialShape{batch_size, time_size}); - set_output_type(1, m_sequence_length_type, PartialShape{batch_size}); + set_output_type(0, m_classes_index_type, ov::Shape{batch_size, time_size}); + set_output_type(1, m_sequence_length_type, ov::Shape{batch_size}); } bool op::v6::CTCGreedyDecoderSeqLen::visit_attributes(AttributeVisitor& visitor) { diff --git a/ngraph/core/src/op/ctc_loss.cpp b/ngraph/core/src/op/ctc_loss.cpp index a1ba244fe166cd..60ed0a2f35d478 100644 --- a/ngraph/core/src/op/ctc_loss.cpp +++ b/ngraph/core/src/op/ctc_loss.cpp @@ -186,9 +186,9 @@ void op::v4::CTCLoss::validate_and_infer_types() { // set output shape set_output_size(1); if (is_batch_size_set) { - set_output_type(0, logits_type, Shape{batch_size}); + set_output_type(0, logits_type, ov::StaticShape{batch_size}); } else { - set_output_type(0, logits_type, PartialShape{Dimension::dynamic()}); + set_output_type(0, logits_type, ov::Shape{Dimension::dynamic()}); } } diff --git a/ngraph/core/src/op/cum_sum.cpp b/ngraph/core/src/op/cum_sum.cpp index d7995e55dce144..9e0b3d2151b7c8 100644 --- a/ngraph/core/src/op/cum_sum.cpp +++ b/ngraph/core/src/op/cum_sum.cpp @@ -23,7 +23,7 @@ op::v0::CumSum::CumSum(const Output& arg, const Output& axis, const } op::v0::CumSum::CumSum(const Output& arg, const bool exclusive, const bool reverse) - : Op({arg, op::v0::Constant::create(element::i32, Shape{}, {0})}), + : Op({arg, op::v0::Constant::create(element::i32, ov::StaticShape{}, {0})}), m_exclusive(exclusive), m_reverse(reverse) { constructor_validate_and_infer_types(); diff --git a/ngraph/core/src/op/deformable_convolution.cpp b/ngraph/core/src/op/deformable_convolution.cpp index 54ecd9cc3e1475..2a20ad18e37d9e 100644 --- a/ngraph/core/src/op/deformable_convolution.cpp +++ b/ngraph/core/src/op/deformable_convolution.cpp @@ -81,9 +81,9 @@ void op::v8::DeformableConvolution::validate_and_infer_types() { DeformableConvolutionBase::validate_and_infer_types(); if (inputs().size() == 4) { - const PartialShape& data_pshape = get_input_partial_shape(0); - const PartialShape& filters_pshape = get_input_partial_shape(2); - const PartialShape& mask_pshape = get_input_partial_shape(3); + const ov::Shape& data_pshape = get_input_partial_shape(0); + const ov::Shape& filters_pshape = get_input_partial_shape(2); + const ov::Shape& mask_pshape = get_input_partial_shape(3); element::Type mask_et = get_input_element_type(3); NODE_VALIDATION_CHECK(this, @@ -132,7 +132,7 @@ void op::v8::DeformableConvolution::validate_and_infer_types() { } } - PartialShape result_pshape = get_output_partial_shape(0); + ov::Shape result_pshape = get_output_partial_shape(0); if (result_pshape.rank().is_static() && mask_pshape.rank().is_static()) { NODE_VALIDATION_CHECK( this, diff --git a/ngraph/core/src/op/deformable_psroi_pooling.cpp b/ngraph/core/src/op/deformable_psroi_pooling.cpp index ac82f76745b20c..be445027aba700 100644 --- a/ngraph/core/src/op/deformable_psroi_pooling.cpp +++ b/ngraph/core/src/op/deformable_psroi_pooling.cpp @@ -111,7 +111,7 @@ void op::v1::DeformablePSROIPooling::validate_and_infer_types() { output_dim_vec[i] = m_group_size; } - set_output_type(0, input_et, PartialShape(output_dim_vec)); + set_output_type(0, input_et, ov::Shape(output_dim_vec)); } shared_ptr op::v1::DeformablePSROIPooling::clone_with_new_inputs(const OutputVector& new_args) const { diff --git a/ngraph/core/src/op/depth_to_space.cpp b/ngraph/core/src/op/depth_to_space.cpp index 2b24cc8daa08f7..98e8ce3bd85543 100644 --- a/ngraph/core/src/op/depth_to_space.cpp +++ b/ngraph/core/src/op/depth_to_space.cpp @@ -45,7 +45,7 @@ std::shared_ptr op::DepthToSpace::clone_with_new_inputs(const OutputVector void op::DepthToSpace::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_DepthToSpace_validate_and_infer_types); - PartialShape data_pshape = get_input_partial_shape(0); + ov::Shape data_pshape = get_input_partial_shape(0); const auto& data_type = get_input_element_type(0); @@ -79,7 +79,7 @@ void op::DepthToSpace::validate_and_infer_types() { set_output_size(1); set_output_type(0, data_type, out_shape); } else { - set_output_type(0, data_type, PartialShape::dynamic(data_pshape.rank())); + set_output_type(0, data_type, ov::Shape::dynamic(data_pshape.rank())); } } diff --git a/ngraph/core/src/op/detection_output.cpp b/ngraph/core/src/op/detection_output.cpp index 5ade2c7ff14796..cf03b5488da6de 100644 --- a/ngraph/core/src/op/detection_output.cpp +++ b/ngraph/core/src/op/detection_output.cpp @@ -56,9 +56,9 @@ void ov::op::v0::DetectionOutput::validate_and_infer_types() { proposals_et.is_real(), "Proposals' data type must be floating point. Got " + proposals_et.get_type_name()); - const PartialShape& box_logits_pshape = get_input_partial_shape(0); - const PartialShape& class_preds_pshape = get_input_partial_shape(1); - const PartialShape& proposals_pshape = get_input_partial_shape(2); + const ov::Shape& box_logits_pshape = get_input_partial_shape(0); + const ov::Shape& class_preds_pshape = get_input_partial_shape(1); + const ov::Shape& proposals_pshape = get_input_partial_shape(2); int num_loc_classes = m_attrs.share_location ? 1 : m_attrs.num_classes; int prior_box_size = m_attrs.normalized ? 4 : 5; @@ -174,8 +174,8 @@ void ov::op::v0::DetectionOutput::validate_and_infer_types() { "Additional box predictions' data type must be the same as box logits data type (" + box_logits_et.get_type_name() + "). Got " + aux_box_preds_et.get_type_name()); - const PartialShape& aux_class_preds_pshape = get_input_partial_shape(3); - const PartialShape& aux_box_preds_pshape = get_input_partial_shape(4); + const ov::Shape& aux_class_preds_pshape = get_input_partial_shape(3); + const ov::Shape& aux_box_preds_pshape = get_input_partial_shape(4); if (aux_class_preds_pshape.rank().is_static()) { NODE_VALIDATION_CHECK(this, aux_class_preds_pshape[0].compatible(num_images), diff --git a/ngraph/core/src/op/einsum.cpp b/ngraph/core/src/op/einsum.cpp index 2cc2fd3ef14433..13749b88bac347 100644 --- a/ngraph/core/src/op/einsum.cpp +++ b/ngraph/core/src/op/einsum.cpp @@ -188,7 +188,7 @@ void op::v7::Einsum::validate_and_infer_types() { // create a dictionary with dimension sizes (or ranges in case dynamic shapes) for each label // and check their compatibility in case repeating labels - unordered_map label_to_shape; + unordered_map label_to_shape; label_to_shape.clear(); for (size_t input_idx = 0; input_idx < num_inputs; ++input_idx) { @@ -210,14 +210,14 @@ void op::v7::Einsum::validate_and_infer_types() { if (label.compare("...") == 0) { size_t num_broadcasted_dims = input_rank - labels.size() + 1; auto current_sub_pshape = - PartialShape(std::vector(pshape.begin() + dim_ind, - pshape.begin() + dim_ind + num_broadcasted_dims)); + ov::Shape(std::vector(pshape.begin() + dim_ind, + pshape.begin() + dim_ind + num_broadcasted_dims)); if (label_to_shape.find(label) == label_to_shape.end()) { label_to_shape[label] = current_sub_pshape; } else { - bool is_broadcast_success = PartialShape::broadcast_merge_into(label_to_shape[label], - current_sub_pshape, - op::AutoBroadcastType::NUMPY); + bool is_broadcast_success = ov::Shape::broadcast_merge_into(label_to_shape[label], + current_sub_pshape, + op::AutoBroadcastType::NUMPY); NODE_VALIDATION_CHECK(this, is_broadcast_success, "Input dimensions labeled with ellipsis for Einsum " @@ -226,13 +226,13 @@ void op::v7::Einsum::validate_and_infer_types() { dim_ind += num_broadcasted_dims; } else { if (label_to_shape.find(label) == label_to_shape.end()) { - label_to_shape[label] = PartialShape{pshape[dim_ind]}; + label_to_shape[label] = ov::Shape{pshape[dim_ind]}; } else { NODE_VALIDATION_CHECK(this, - label_to_shape[label].compatible(PartialShape{pshape[label_ind]}), + label_to_shape[label].compatible(ov::Shape{pshape[label_ind]}), "Different input dimensions indicated by the same labels for Einsum " "must be compatible."); - PartialShape::merge_into(label_to_shape[label], PartialShape{pshape[dim_ind]}); + ov::Shape::merge_into(label_to_shape[label], ov::Shape{pshape[dim_ind]}); } ++dim_ind; } @@ -245,7 +245,7 @@ void op::v7::Einsum::validate_and_infer_types() { "not contain ellipsis."); if (label_to_shape.find(label) == label_to_shape.end()) { - label_to_shape[label] = PartialShape{Dimension::dynamic()}; + label_to_shape[label] = ov::Shape{Dimension::dynamic()}; } } } @@ -265,7 +265,7 @@ void op::v7::Einsum::validate_and_infer_types() { label_to_shape[output_label].begin(), label_to_shape[output_label].end()); } - set_output_type(0, input_type_0, PartialShape(output_pshape_vector)); + set_output_type(0, input_type_0, ov::Shape(output_pshape_vector)); } bool op::v7::Einsum::visit_attributes(AttributeVisitor& visitor) { diff --git a/ngraph/core/src/op/embedding_segments_sum.cpp b/ngraph/core/src/op/embedding_segments_sum.cpp index 96ca10ba481259..8345b284d7d8a0 100644 --- a/ngraph/core/src/op/embedding_segments_sum.cpp +++ b/ngraph/core/src/op/embedding_segments_sum.cpp @@ -90,7 +90,7 @@ void op::v3::EmbeddingSegmentsSum::validate_and_infer_types() { "INDICES and SEGMENT_IDS shape must be same"); NODE_VALIDATION_CHECK(this, - get_input_partial_shape(NUM_SEGMENTS).compatible(PartialShape{}), + get_input_partial_shape(NUM_SEGMENTS).compatible(ov::Shape{}), "NUM_SEGMENTS must be a scalar"); if (get_input_size() >= 5) { @@ -108,7 +108,7 @@ void op::v3::EmbeddingSegmentsSum::validate_and_infer_types() { ")"); NODE_VALIDATION_CHECK(this, - get_input_partial_shape(DEFAULT_INDEX).compatible(PartialShape{}), + get_input_partial_shape(DEFAULT_INDEX).compatible(ov::Shape{}), "DEFAULT_INDEX must be a scalar"); } @@ -133,9 +133,9 @@ void op::v3::EmbeddingSegmentsSum::validate_and_infer_types() { element::Type result_et = get_input_element_type(EMB_TABLE); - const PartialShape& emb_table_shape = get_input_partial_shape(EMB_TABLE); + const ov::Shape& emb_table_shape = get_input_partial_shape(EMB_TABLE); - PartialShape result_shape; + ov::Shape result_shape; if (emb_table_shape.rank().is_static()) { result_shape = emb_table_shape; if (const auto& num_segments_const = get_constant_from_source(input_value(NUM_SEGMENTS))) { @@ -145,7 +145,7 @@ void op::v3::EmbeddingSegmentsSum::validate_and_infer_types() { set_input_is_relevant_to_shape(NUM_SEGMENTS); } } else { - result_shape = PartialShape::dynamic(); + result_shape = ov::Shape::dynamic(); set_input_is_relevant_to_shape(NUM_SEGMENTS); } diff --git a/ngraph/core/src/op/experimental_detectron_detection_output.cpp b/ngraph/core/src/op/experimental_detectron_detection_output.cpp index a0b7717fd4eca6..fbc5775fece70d 100644 --- a/ngraph/core/src/op/experimental_detectron_detection_output.cpp +++ b/ngraph/core/src/op/experimental_detectron_detection_output.cpp @@ -49,9 +49,9 @@ void op::v6::ExperimentalDetectronDetectionOutput::validate_and_infer_types() { auto im_info_shape = get_input_partial_shape(3); set_output_size(3); - set_output_type(0, input_et, Shape{rois_num, 4}); - set_output_type(1, element::Type_t::i32, Shape{rois_num}); - set_output_type(2, input_et, Shape{rois_num}); + set_output_type(0, input_et, ov::StaticShape{rois_num, 4}); + set_output_type(1, element::Type_t::i32, ov::StaticShape{rois_num}); + set_output_type(2, input_et, ov::StaticShape{rois_num}); if (rois_shape.rank().is_static()) { NODE_VALIDATION_CHECK(this, rois_shape.rank().get_length() == 2, "Input rois rank must be equal to 2."); diff --git a/ngraph/core/src/op/experimental_detectron_generate_proposals.cpp b/ngraph/core/src/op/experimental_detectron_generate_proposals.cpp index d27c6bb2b9872c..ddd6d75be83a61 100644 --- a/ngraph/core/src/op/experimental_detectron_generate_proposals.cpp +++ b/ngraph/core/src/op/experimental_detectron_generate_proposals.cpp @@ -49,12 +49,12 @@ bool op::v6::ExperimentalDetectronGenerateProposalsSingleImage::visit_attributes void op::v6::ExperimentalDetectronGenerateProposalsSingleImage::validate_and_infer_types() { NGRAPH_OP_SCOPE(v6_ExperimentalDetectronGenerateProposalsSingleImage_validate_and_infer_types); - size_t post_nms_count = static_cast(m_attrs.post_nms_count); + auto post_nms_count = static_cast(m_attrs.post_nms_count); auto input_et = get_input_element_type(0); set_output_size(2); - set_output_type(0, input_et, Shape{post_nms_count, 4}); - set_output_type(1, input_et, Shape{post_nms_count}); + set_output_type(0, input_et, ov::StaticShape{post_nms_count, 4}); + set_output_type(1, input_et, ov::StaticShape{post_nms_count}); auto im_info_shape = get_input_partial_shape(0); auto anchors_shape = get_input_partial_shape(1); diff --git a/ngraph/core/src/op/experimental_detectron_prior_grid_generator.cpp b/ngraph/core/src/op/experimental_detectron_prior_grid_generator.cpp index 42ea322e950411..9d579411427cd8 100644 --- a/ngraph/core/src/op/experimental_detectron_prior_grid_generator.cpp +++ b/ngraph/core/src/op/experimental_detectron_prior_grid_generator.cpp @@ -94,9 +94,9 @@ void op::v6::ExperimentalDetectronPriorGridGenerator::validate_and_infer_types() validate(); set_output_size(1); - PartialShape out_shape = {Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), 4}; + ov::Shape out_shape = {Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), 4}; if (m_attrs.flatten) { - out_shape = PartialShape{Dimension::dynamic(), 4}; + out_shape = ov::Shape{Dimension::dynamic(), 4}; } if (priors_shape.rank().is_dynamic() || featmap_shape.rank().is_dynamic()) { @@ -109,9 +109,9 @@ void op::v6::ExperimentalDetectronPriorGridGenerator::validate_and_infer_types() auto featmap_width = featmap_shape[3]; if (m_attrs.flatten) { - out_shape = PartialShape{featmap_height * featmap_width * num_priors, 4}; + out_shape = ov::Shape{featmap_height * featmap_width * num_priors, 4}; } else { - out_shape = PartialShape{featmap_height, featmap_width, num_priors, 4}; + out_shape = ov::Shape{featmap_height, featmap_width, num_priors, 4}; } set_output_type(0, input_et, out_shape); } diff --git a/ngraph/core/src/op/experimental_detectron_roi_feature.cpp b/ngraph/core/src/op/experimental_detectron_roi_feature.cpp index bd158e5388fd3c..e4516da6bcabb0 100644 --- a/ngraph/core/src/op/experimental_detectron_roi_feature.cpp +++ b/ngraph/core/src/op/experimental_detectron_roi_feature.cpp @@ -45,9 +45,9 @@ void op::v6::ExperimentalDetectronROIFeatureExtractor::validate_and_infer_types( auto rois_shape = get_input_partial_shape(0); auto input_et = get_input_element_type(0); - PartialShape out_shape = {Dimension::dynamic(), Dimension::dynamic(), m_attrs.output_size, m_attrs.output_size}; + ov::Shape out_shape = {Dimension::dynamic(), Dimension::dynamic(), m_attrs.output_size, m_attrs.output_size}; - PartialShape out_rois_shape = {Dimension::dynamic(), 4}; + ov::Shape out_rois_shape = {Dimension::dynamic(), 4}; if (rois_shape.rank().is_static()) { NODE_VALIDATION_CHECK(this, rois_shape.rank().get_length() == 2, "Input rois rank must be equal to 2."); diff --git a/ngraph/core/src/op/experimental_detectron_topkrois.cpp b/ngraph/core/src/op/experimental_detectron_topkrois.cpp index 6e6c0783862236..599bf69d5809a8 100644 --- a/ngraph/core/src/op/experimental_detectron_topkrois.cpp +++ b/ngraph/core/src/op/experimental_detectron_topkrois.cpp @@ -39,7 +39,7 @@ void op::v6::ExperimentalDetectronTopKROIs::validate_and_infer_types() { const auto input_rois_shape = get_input_partial_shape(0); const auto rois_probs_shape = get_input_partial_shape(1); - set_output_type(0, get_input_element_type(0), Shape{m_max_rois, 4}); + set_output_type(0, get_input_element_type(0), ov::StaticShape{m_max_rois, 4}); if (input_rois_shape.rank().is_static()) { NODE_VALIDATION_CHECK(this, diff --git a/ngraph/core/src/op/extractimagepatches.cpp b/ngraph/core/src/op/extractimagepatches.cpp index 362035248271b7..1e3073e689d985 100644 --- a/ngraph/core/src/op/extractimagepatches.cpp +++ b/ngraph/core/src/op/extractimagepatches.cpp @@ -15,9 +15,9 @@ using namespace ngraph; OPENVINO_RTTI_DEFINITION(op::v3::ExtractImagePatches, "ExtractImagePatches", 3); op::v3::ExtractImagePatches::ExtractImagePatches(const Output& image, - const Shape& sizes, + const ov::StaticShape& sizes, const Strides& strides, - const Shape& rates, + const ov::StaticShape& rates, const PadType& auto_pad) : Op({image}), m_patch_sizes(sizes), @@ -29,7 +29,7 @@ op::v3::ExtractImagePatches::ExtractImagePatches(const Output& image, void op::v3::ExtractImagePatches::validate_and_infer_types() { NGRAPH_OP_SCOPE(v3_ExtractImagePatches_validate_and_infer_types); - const PartialShape input_pshape = get_input_partial_shape(0); + const ov::Shape input_pshape = get_input_partial_shape(0); NODE_VALIDATION_CHECK(this, input_pshape.rank() == 4, "input tensor must be 4D tensor."); @@ -60,7 +60,7 @@ void op::v3::ExtractImagePatches::validate_and_infer_types() { if (input_pshape[1].is_dynamic() || input_pshape[2].is_dynamic() || input_pshape[3].is_dynamic()) { set_input_is_relevant_to_shape(0); - auto output_pshape = PartialShape::dynamic(4); + auto output_pshape = ov::Shape::dynamic(4); set_output_type(0, get_input_element_type(0), output_pshape); } else { int32_t input_depth = input_pshape[1].get_length(); @@ -93,18 +93,17 @@ void op::v3::ExtractImagePatches::validate_and_infer_types() { if (out_cols < 0) out_cols = 0; - ngraph::Dimension::value_type out_depth_cast = + auto out_depth_cast = static_cast(input_depth * m_patch_sizes[0] * m_patch_sizes[1]); - ngraph::Dimension::value_type out_rows_cast = static_cast(out_rows); - ngraph::Dimension::value_type out_cols_cast = static_cast(out_cols); + auto out_rows_cast = static_cast(out_rows); + auto out_cols_cast = static_cast(out_cols); - PartialShape output_pshape; + ov::Shape output_pshape; if (input_pshape[0].is_dynamic()) { - output_pshape = PartialShape{input_pshape[0], out_depth_cast, out_rows_cast, out_cols_cast}; + output_pshape = ov::Shape{input_pshape[0], out_depth_cast, out_rows_cast, out_cols_cast}; } else { - ngraph::Dimension::value_type input_batch_cast = - static_cast(input_pshape[0].get_length()); - output_pshape = PartialShape{input_batch_cast, out_depth_cast, out_rows_cast, out_cols_cast}; + auto input_batch_cast = static_cast(input_pshape[0].get_length()); + output_pshape = ov::Shape{input_batch_cast, out_depth_cast, out_rows_cast, out_cols_cast}; } if (input_rows == 0 || input_cols == 0) { diff --git a/ngraph/core/src/op/fake_quantize.cpp b/ngraph/core/src/op/fake_quantize.cpp index 2282612009fb49..67ddf5b5c2d257 100644 --- a/ngraph/core/src/op/fake_quantize.cpp +++ b/ngraph/core/src/op/fake_quantize.cpp @@ -37,18 +37,18 @@ op::FakeQuantize::FakeQuantize(const Output& data, void op::FakeQuantize::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_FakeQuantize_validate_and_infer_types); - PartialShape data_pshape = get_input_partial_shape(0); + ov::Shape data_pshape = get_input_partial_shape(0); for (auto i = 1; i <= 4; i++) { if (m_auto_broadcast.m_type == op::AutoBroadcastType::NONE) { NODE_VALIDATION_CHECK(this, - PartialShape::merge_into(data_pshape, get_input_partial_shape(i)), + ov::Shape::merge_into(data_pshape, get_input_partial_shape(i)), "Argument shapes are inconsistent."); } else if (m_auto_broadcast.m_type == op::AutoBroadcastType::NUMPY || m_auto_broadcast.m_type == op::AutoBroadcastType::PDPD) { NODE_VALIDATION_CHECK( this, - PartialShape::broadcast_merge_into(data_pshape, get_input_partial_shape(i), m_auto_broadcast), + ov::Shape::broadcast_merge_into(data_pshape, get_input_partial_shape(i), m_auto_broadcast), "Argument shapes are inconsistent."); } else { NODE_VALIDATION_CHECK(this, false, "Unsupported auto broadcast specification"); diff --git a/ngraph/core/src/op/gather_elements.cpp b/ngraph/core/src/op/gather_elements.cpp index be92de695cdc7b..0a181ab86c2f2b 100644 --- a/ngraph/core/src/op/gather_elements.cpp +++ b/ngraph/core/src/op/gather_elements.cpp @@ -54,7 +54,7 @@ void op::v6::GatherElements::validate_and_infer_types() { "indices rank must be >= 1."); if (data_rank.is_static() && indices_rank.is_dynamic()) { - PartialShape out_shape_info(data_pshape); + ov::Shape out_shape_info(data_pshape); out_shape_info[axis] = Dimension::dynamic(); set_output_type(0, data_type, out_shape_info); return; @@ -62,7 +62,7 @@ void op::v6::GatherElements::validate_and_infer_types() { if (data_rank.is_dynamic()) { if (indices_rank.is_dynamic()) - set_output_type(0, data_type, PartialShape::dynamic()); + set_output_type(0, data_type, ov::Shape::dynamic()); return; } @@ -74,7 +74,7 @@ void op::v6::GatherElements::validate_and_infer_types() { " and ", indices_rank.get_length()); - PartialShape output_pshape(indices_pshape); + ov::Shape output_pshape(indices_pshape); for (int i = 0; i < indices_rank.get_length(); i++) { if (i != axis) { // if size of the current dimension of indices is unknown it will be retrieved from data diff --git a/ngraph/core/src/op/gather_nd.cpp b/ngraph/core/src/op/gather_nd.cpp index 61e1c2708f7f3c..870362bf22845e 100644 --- a/ngraph/core/src/op/gather_nd.cpp +++ b/ngraph/core/src/op/gather_nd.cpp @@ -105,9 +105,9 @@ void op::v5::GatherND::validate_and_infer_types() { output_shape[output_indices_length + dim + delta_output_rank] = data_pshape[m_batch_dims + indices_tuple_length + dim]; } - set_output_type(0, data_type, PartialShape(output_shape)); + set_output_type(0, data_type, ov::Shape(output_shape)); } else { - set_output_type(0, data_type, PartialShape::dynamic()); + set_output_type(0, data_type, ov::Shape::dynamic()); } } diff --git a/ngraph/core/src/op/gelu.cpp b/ngraph/core/src/op/gelu.cpp index 0fc27a3373ecb8..195a94e9af46bd 100644 --- a/ngraph/core/src/op/gelu.cpp +++ b/ngraph/core/src/op/gelu.cpp @@ -38,7 +38,7 @@ shared_ptr op::v0::Gelu::clone_with_new_inputs(const OutputVector& new_arg void op::v0::Gelu::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_Gelu_validate_and_infer_types); element::Type input_element_type = get_input_element_type(0); - PartialShape input_pshape = get_input_partial_shape(0); + ov::Shape input_pshape = get_input_partial_shape(0); NODE_VALIDATION_CHECK(this, input_element_type.is_dynamic() || input_element_type.is_real(), @@ -91,7 +91,7 @@ shared_ptr op::v7::Gelu::clone_with_new_inputs(const OutputVector& new_arg void op::v7::Gelu::validate_and_infer_types() { NGRAPH_OP_SCOPE(v7_Gelu_validate_and_infer_types); element::Type input_element_type = get_input_element_type(0); - PartialShape input_pshape = get_input_partial_shape(0); + ov::Shape input_pshape = get_input_partial_shape(0); NODE_VALIDATION_CHECK(this, input_element_type.is_dynamic() || input_element_type.is_real(), diff --git a/ngraph/core/src/op/grn.cpp b/ngraph/core/src/op/grn.cpp index 960ea235e3a0f5..eaabb5c31a7962 100644 --- a/ngraph/core/src/op/grn.cpp +++ b/ngraph/core/src/op/grn.cpp @@ -33,7 +33,7 @@ void op::v0::GRN::validate_and_infer_types() { const auto& data_pshape = get_input_partial_shape(0); if (data_pshape.is_static()) { - const Shape& data_shape{data_pshape.to_shape()}; + const ov::StaticShape& data_shape{data_pshape.to_shape()}; // Input data must be 2, 3 or 4D tensor. NODE_VALIDATION_CHECK(this, diff --git a/ngraph/core/src/op/group_conv.cpp b/ngraph/core/src/op/group_conv.cpp index dba5f693adc1fb..0cf967a9bb280d 100644 --- a/ngraph/core/src/op/group_conv.cpp +++ b/ngraph/core/src/op/group_conv.cpp @@ -54,7 +54,7 @@ bool ngraph::op::v1::GroupConvolution::visit_attributes(AttributeVisitor& visito return true; } -static Dimension infer_group_from_input_shapes(const PartialShape& data_pshape, const PartialShape& filters_pshape) { +static Dimension infer_group_from_input_shapes(const ov::Shape& data_pshape, const ov::Shape& filters_pshape) { Dimension group_dim = Dimension(); if (data_pshape.rank().is_static() && data_pshape[1].is_static() && filters_pshape.rank().is_static() && filters_pshape[2].is_static()) { @@ -70,8 +70,8 @@ static Dimension infer_group_from_input_shapes(const PartialShape& data_pshape, void op::v1::GroupConvolution::validate_and_infer_types() { NGRAPH_OP_SCOPE(v1_GroupConvolution_validate_and_infer_types); - PartialShape data_batch_pshape = get_input_partial_shape(0); - PartialShape filters_pshape = get_input_partial_shape(1); + ov::Shape data_batch_pshape = get_input_partial_shape(0); + ov::Shape filters_pshape = get_input_partial_shape(1); element::Type data_batch_et = get_input_element_type(0); element::Type filters_et = get_input_element_type(1); @@ -99,7 +99,7 @@ void op::v1::GroupConvolution::validate_and_infer_types() { filters_pshape, ")."); - PartialShape result_shape{PartialShape::dynamic()}; + ov::Shape result_shape{ov::Shape::dynamic()}; if (data_batch_pshape.rank().is_static() || filters_pshape.rank().is_static()) { const bool is_data_batch_ps_static = data_batch_pshape.rank().is_static(); const auto output_ps_rank = @@ -165,11 +165,11 @@ void op::v1::GroupConvolution::validate_and_infer_types() { m_pads_begin.clear(); m_pads_end.clear(); - const PartialShape filter_spatial_shape = [filters_pshape]() { + const ov::Shape filter_spatial_shape = [filters_pshape]() { vector filter_dims{filters_pshape}; filter_dims.erase(filter_dims.begin(), filter_dims.begin() + 3); // Remove {GROUP, C_OUT, C_IN} - return PartialShape{filter_dims}; + return ov::Shape{filter_dims}; }(); if (filter_spatial_shape.is_static()) { @@ -189,8 +189,8 @@ void op::v1::GroupConvolution::validate_and_infer_types() { } // we need to adjust channels input dim to reuse helpers for regular convolution - PartialShape data_batch_ps = [&]() { - auto shape = PartialShape{data_batch_pshape}; + ov::Shape data_batch_ps = [&]() { + auto shape = ov::Shape{data_batch_pshape}; auto groups = filters_pshape.rank().is_static() ? filters_pshape[0] : Dimension(); if (groups.is_dynamic()) { groups = infer_group_from_input_shapes(data_batch_pshape, filters_pshape); @@ -206,8 +206,8 @@ void op::v1::GroupConvolution::validate_and_infer_types() { }(); // we need to adjust filters shape to reuse helpers for regular convolution - PartialShape filters_ps = [&]() { - auto shape = PartialShape{filters_pshape}; + ov::Shape filters_ps = [&]() { + auto shape = ov::Shape{filters_pshape}; if (shape.rank().is_static() && shape.rank().get_length() > 2) { auto groups = filters_pshape.rank().is_static() ? filters_pshape[0] : Dimension(); if (groups.is_dynamic()) { @@ -216,7 +216,7 @@ void op::v1::GroupConvolution::validate_and_infer_types() { shape[1] = groups * shape[1]; vector dim_vec{shape}; dim_vec.erase(dim_vec.begin()); - shape = PartialShape{dim_vec}; + shape = ov::Shape{dim_vec}; } return shape; }(); @@ -333,8 +333,7 @@ bool op::v1::GroupConvolutionBackpropData::is_dynamic() const { return is_dynamic; } -static Dimension infer_backprop_group_from_input_shapes(const PartialShape& data_pshape, - const PartialShape& filters_pshape) { +static Dimension infer_backprop_group_from_input_shapes(const ov::Shape& data_pshape, const ov::Shape& filters_pshape) { Dimension group_dim = Dimension(); if (data_pshape.rank().is_static() && data_pshape[1].is_static() && filters_pshape.rank().is_static() && filters_pshape[1].is_static()) { @@ -348,30 +347,30 @@ static Dimension infer_backprop_group_from_input_shapes(const PartialShape& data return group_dim; } -const PartialShape op::v1::GroupConvolutionBackpropData::get_convolution_output_shape() const { +const ov::Shape op::v1::GroupConvolutionBackpropData::get_convolution_output_shape() const { auto data_pshape = get_input_partial_shape(0); auto filter_pshape = get_input_partial_shape(1); - PartialShape shape; + ov::Shape shape; if (inputs().size() == 3) { if (const auto& const_op = get_constant_from_source(input_value(2))) { - return PartialShape{const_op->get_shape_val()}; + return ov::Shape{const_op->get_shape_val()}; } } if (data_pshape.rank().is_static()) { - shape = PartialShape{vector(data_pshape.rank().get_length() - 2)}; + shape = ov::Shape{vector(data_pshape.rank().get_length() - 2)}; } else if (filter_pshape.rank().is_static()) { - shape = PartialShape{vector(filter_pshape.rank().get_length() - 3)}; + shape = ov::Shape{vector(filter_pshape.rank().get_length() - 3)}; } else { - shape = PartialShape::dynamic(); + shape = ov::Shape::dynamic(); } return shape; } -void op::v1::GroupConvolutionBackpropData::set_output_shape(const Shape& shape) { +void op::v1::GroupConvolutionBackpropData::set_output_shape(const ov::StaticShape& shape) { this->input(2).replace_source_output( - op::v0::Constant::create(this->get_input_element_type(2), Shape{shape.size()}, shape)->output(0)); + op::v0::Constant::create(this->get_input_element_type(2), ov::StaticShape{shape.size()}, shape)->output(0)); } void op::v1::GroupConvolutionBackpropData::infer_conv_backprop_output_spatial_shape( @@ -402,9 +401,9 @@ void op::v1::GroupConvolutionBackpropData::infer_conv_backprop_output_spatial_sh void op::v1::GroupConvolutionBackpropData::validate_and_infer_types() { NGRAPH_OP_SCOPE(v1_GroupConvolutionBackpropData_validate_and_infer_types); - const PartialShape& data_pshape = get_input_partial_shape(0); + const ov::Shape& data_pshape = get_input_partial_shape(0); element::Type data_et = get_input_element_type(0); - const PartialShape& filters_pshape = get_input_partial_shape(1); + const ov::Shape& filters_pshape = get_input_partial_shape(1); element::Type filters_et = get_input_element_type(1); element::Type result_et; @@ -433,7 +432,7 @@ void op::v1::GroupConvolutionBackpropData::validate_and_infer_types() { bool is_output_shape_present = inputs().size() == 3; if (is_output_shape_present) { - const PartialShape& output_shape_pshape = get_input_partial_shape(2); + const ov::Shape& output_shape_pshape = get_input_partial_shape(2); const element::Type output_shape_et = get_input_element_type(2); NODE_VALIDATION_CHECK(this, @@ -450,7 +449,7 @@ void op::v1::GroupConvolutionBackpropData::validate_and_infer_types() { output_shape_pshape, ")."); } - PartialShape output_spatial_pshape = get_convolution_output_shape(); + ov::Shape output_spatial_pshape = get_convolution_output_shape(); if (data_pshape.rank().is_static() || filters_pshape.rank().is_static()) { const bool is_data_ps_static = data_pshape.rank().is_static(); @@ -508,7 +507,7 @@ void op::v1::GroupConvolutionBackpropData::validate_and_infer_types() { } if (is_output_shape_present && output_spatial_pshape.is_static()) { - Shape output_shape = output_spatial_pshape.to_shape(); + ov::StaticShape output_shape = output_spatial_pshape.to_shape(); NODE_VALIDATION_CHECK(this, output_shape.size() == num_spatial_dims, "Output shape should be specified only and for " @@ -516,23 +515,23 @@ void op::v1::GroupConvolutionBackpropData::validate_and_infer_types() { } } - PartialShape result_pshape{PartialShape::dynamic()}; + ov::Shape result_pshape{ov::Shape::dynamic()}; // If output shape is provided, ignore current values for padding begin/end // and infer them. if (is_output_shape_present) { if (output_spatial_pshape.rank().is_static()) { if (data_pshape.rank().is_static() && filters_pshape.rank().is_static()) { - const PartialShape data_spatial_shape = [data_pshape]() { + const ov::Shape data_spatial_shape = [data_pshape]() { vector data_dims{data_pshape}; data_dims.erase(data_dims.begin(), data_dims.begin() + 2); // remove {N, C_IN} - return PartialShape{data_dims}; + return ov::Shape{data_dims}; }(); - const PartialShape filters_spatial_shape = [filters_pshape]() { + const ov::Shape filters_spatial_shape = [filters_pshape]() { vector filters_dims{filters_pshape}; filters_dims.erase(filters_dims.begin(), filters_dims.begin() + 3); // remove {GROUPS, C_OUT, C_IN} - return PartialShape{filters_dims}; + return ov::Shape{filters_dims}; }(); // If auto_pad has one of following mode we infer paddings. Otherwise in @@ -566,7 +565,7 @@ void op::v1::GroupConvolutionBackpropData::validate_and_infer_types() { // N auto batches = data_pshape.rank().is_static() ? data_pshape[0] : Dimension::dynamic(); output_pshape.insert(output_pshape.begin(), batches); - result_pshape = PartialShape{output_pshape}; + result_pshape = ov::Shape{output_pshape}; } set_input_is_relevant_to_shape(2); } @@ -616,7 +615,7 @@ void op::v1::GroupConvolutionBackpropData::validate_and_infer_types() { // N auto batches = data_pshape.rank().is_static() ? data_pshape[0] : Dimension::dynamic(); output_pshape.insert(output_pshape.begin(), batches); - result_pshape = PartialShape{output_pshape}; + result_pshape = ov::Shape{output_pshape}; } } set_input_is_relevant_to_shape(0); diff --git a/ngraph/core/src/op/gru_cell.cpp b/ngraph/core/src/op/gru_cell.cpp index aef6257695de7c..22b7910306cf28 100644 --- a/ngraph/core/src/op/gru_cell.cpp +++ b/ngraph/core/src/op/gru_cell.cpp @@ -89,7 +89,7 @@ void op::v3::GRUCell::validate_and_infer_types() { NGRAPH_OP_SCOPE(v3_GRUCell_validate_and_infer_types); for (const auto& input : inputs()) { if (input.get_partial_shape().rank().is_dynamic()) { - set_output_type(0, get_input_element_type(0), PartialShape::dynamic()); + set_output_type(0, get_input_element_type(0), ov::Shape::dynamic()); return; } } @@ -173,7 +173,7 @@ void op::v3::GRUCell::validate_and_infer_types() { void op::v3::GRUCell::add_default_bias_input() { Output B = op::v0::Constant::create(get_input_element_type(0), - Shape{(s_gates_count + m_linear_before_reset) * get_hidden_size()}, + ov::StaticShape{(s_gates_count + m_linear_before_reset) * get_hidden_size()}, vector((s_gates_count + m_linear_before_reset) * get_hidden_size(), 0.f)); set_argument(4, B); } diff --git a/ngraph/core/src/op/gru_sequence.cpp b/ngraph/core/src/op/gru_sequence.cpp index 1f980134ebed3f..f7a7e13b8a0490 100644 --- a/ngraph/core/src/op/gru_sequence.cpp +++ b/ngraph/core/src/op/gru_sequence.cpp @@ -49,8 +49,8 @@ void op::v5::GRUSequence::validate_and_infer_types() { NGRAPH_OP_SCOPE(v5_GRUSequence_validate_and_infer_types); for (const auto& input : inputs()) { if (input.get_partial_shape().rank().is_dynamic()) { - set_output_type(0, get_input_element_type(0), PartialShape::dynamic()); - set_output_type(1, get_input_element_type(0), PartialShape::dynamic()); + set_output_type(0, get_input_element_type(0), ov::Shape::dynamic()); + set_output_type(1, get_input_element_type(0), ov::Shape::dynamic()); return; } } diff --git a/ngraph/core/src/op/if.cpp b/ngraph/core/src/op/if.cpp index fe4a49c7778ba0..12623ad7ce1c8a 100644 --- a/ngraph/core/src/op/if.cpp +++ b/ngraph/core/src/op/if.cpp @@ -28,8 +28,7 @@ op::v8::If::If(const Output& execution_condition) : If() { // This function tries to calculate the output shape of the if operation by two outputs from two // subgraphs. -static ngraph::PartialShape resolve_shape(const ngraph::PartialShape& then_pshape, - const ngraph::PartialShape& else_pshape) { +static ov::Shape resolve_shape(const ov::Shape& then_pshape, const ov::Shape& else_pshape) { // then_pshape - shape of output from then_body // else_pshape - shape of output from else_body auto then_rank = then_pshape.rank(); @@ -38,7 +37,7 @@ static ngraph::PartialShape resolve_shape(const ngraph::PartialShape& then_pshap // if rangs of shapes are not equal or rang of one of them is dynamic function // return shape with dynamic rank if (then_rank.is_dynamic() || else_rank.is_dynamic() || then_rank.get_length() != else_rank.get_length()) { - return ngraph::PartialShape::dynamic(ngraph::Rank::dynamic()); + return ov::Shape::dynamic(ngraph::Rank::dynamic()); } std::vector new_dims; @@ -57,7 +56,7 @@ static ngraph::PartialShape resolve_shape(const ngraph::PartialShape& then_pshap } } - return PartialShape(new_dims); + return ov::Shape(new_dims); } bool op::v8::If::visit_attributes(AttributeVisitor& visitor) { diff --git a/ngraph/core/src/op/interpolate.cpp b/ngraph/core/src/op/interpolate.cpp index 6c725c2b01204b..1cef3a90338620 100644 --- a/ngraph/core/src/op/interpolate.cpp +++ b/ngraph/core/src/op/interpolate.cpp @@ -43,7 +43,7 @@ void op::v0::Interpolate::validate_and_infer_types() { "output shape must be an integral number."); set_input_is_relevant_to_shape(1); - PartialShape output_shape = PartialShape(get_input_partial_shape(0)); + ov::Shape output_shape = ov::Shape(get_input_partial_shape(0)); if (output_shape.rank().is_static()) { for (auto axis : m_attrs.axes) { NGRAPH_CHECK(static_cast(axis) < output_shape.rank().get_length()); @@ -127,7 +127,7 @@ bool op::v4::Interpolate::visit_attributes(AttributeVisitor& visitor) { std::vector op::v4::Interpolate::get_axes() const { auto inputs = input_values(); if (inputs.size() <= 3) { - PartialShape input_shape = PartialShape(get_input_partial_shape(0)); + ov::Shape input_shape = ov::Shape(get_input_partial_shape(0)); NODE_VALIDATION_CHECK(this, input_shape.rank().is_static(), "Could not define axes of interpolation because there are " @@ -157,10 +157,10 @@ int64_t multiply_bound_and_scale(int64_t bound, float scale) { } } // namespace -void op::v4::Interpolate::infer_using_scales(PartialShape& output_shape, +void op::v4::Interpolate::infer_using_scales(ov::Shape& output_shape, const std::vector& axes, const std::vector& scales, - const PartialShape& padded_input_shape) const { + const ov::Shape& padded_input_shape) const { size_t i = 0; for (auto axis : axes) { const auto& current_dim = padded_input_shape[axis]; @@ -174,7 +174,7 @@ void op::v4::Interpolate::infer_using_scales(PartialShape& output_shape, } } -void op::v4::Interpolate::infer_using_shapes(PartialShape& output_shape, +void op::v4::Interpolate::infer_using_shapes(ov::Shape& output_shape, const std::vector& axes, const std::vector& sizes) const { size_t i = 0; @@ -183,10 +183,10 @@ void op::v4::Interpolate::infer_using_shapes(PartialShape& output_shape, } } -PartialShape op::v4::Interpolate::get_padded_input_shape(const PartialShape& input_shape) const { +ov::Shape op::v4::Interpolate::get_padded_input_shape(const ov::Shape& input_shape) const { const auto input_rank = input_shape.rank().get_length(); - PartialShape padded_input_shape = input_shape; + ov::Shape padded_input_shape = input_shape; for (int64_t i = 0; i < input_rank; ++i) { if (input_shape[i].is_static()) { @@ -225,7 +225,7 @@ void op::v4::Interpolate::validate_and_infer_types() { "Axes element type must be i32, i64, u32 or u64"); } - PartialShape input_shape = PartialShape(get_input_partial_shape(0)); + ov::Shape input_shape = ov::Shape(get_input_partial_shape(0)); if (!input_shape.rank().is_static()) { set_output_type(0, get_input_element_type(0), input_shape); @@ -237,7 +237,7 @@ void op::v4::Interpolate::validate_and_infer_types() { // If the input 'axes' is given and this input is not Constant, we cannot infer any elements // of the output shape. Hence, all components of the output shape should be dynamic. if (input_values().size() == 4 && !has_and_set_equal_bounds(input_value(3))) { - PartialShape output_shape = std::vector(input_rank, Dimension::dynamic()); + ov::Shape output_shape = std::vector(input_rank, Dimension::dynamic()); set_output_type(0, get_input_element_type(0), output_shape); return; } @@ -245,8 +245,8 @@ void op::v4::Interpolate::validate_and_infer_types() { auto axes = get_axes(); correct_pads(); - PartialShape padded_input_shape = get_padded_input_shape(input_shape); - PartialShape output_shape = padded_input_shape; + ov::Shape padded_input_shape = get_padded_input_shape(input_shape); + ov::Shape output_shape = padded_input_shape; if (output_shape.rank().is_static()) { for (auto axis : axes) { @@ -287,7 +287,7 @@ static constexpr size_t axes_port = 3; static constexpr size_t max_num_of_ports = 4; std::vector get_axes_vector(const HostTensorVector& args) { - Shape input_shape{args[data_port]->get_shape()}; + ov::StaticShape input_shape{args[data_port]->get_shape()}; size_t input_rank = input_shape.size(); size_t num_of_inputs = args.size(); @@ -316,7 +316,7 @@ std::vector get_target_shape_vector(const HostTensorVector& args, size_ } std::vector get_scales_vector(const HostTensorVector& args, - const Shape& input_shape, + const ov::StaticShape& input_shape, const op::v4::Interpolate::InterpolateAttrs& attrs, std::vector axes) { using ShapeCalcMode = ngraph::op::v4::Interpolate::ShapeCalcMode; @@ -358,7 +358,7 @@ std::vector correct_pad(const std::vector& p, size_t rank) { } // namespace void op::v4::Interpolate::correct_pads() { - PartialShape input_shape = PartialShape(get_input_partial_shape(0)); + ov::Shape input_shape = ov::Shape(get_input_partial_shape(0)); if (input_shape.rank().is_dynamic()) { return; } @@ -371,8 +371,8 @@ void op::v4::Interpolate::correct_pads() { static void pad_input_data(const uint8_t* data_ptr, uint8_t* padded_data_ptr, size_t type_size, - const Shape& input_shape, - const Shape& padded_input_shape, + const ov::StaticShape& input_shape, + const ov::StaticShape& padded_input_shape, const std::vector& pads_begin) { NGRAPH_SUPPRESS_DEPRECATED_START CoordinateTransform input_transform(input_shape); @@ -396,15 +396,15 @@ bool op::v4::Interpolate::evaluate_interpolate(const HostTensorVector& outputs, element::Type input_et = get_input_element_type(0); size_t type_size = input_et.size(); - Shape input_shape{inputs[data_port]->get_shape()}; - Shape padded_input_shape = get_padded_input_shape(input_shape).to_shape(); + ov::StaticShape input_shape{inputs[data_port]->get_shape()}; + ov::StaticShape padded_input_shape = get_padded_input_shape(input_shape).to_shape(); auto axes = get_axes_vector(inputs); size_t num_of_axes = axes.size(); auto scales = get_scales_vector(inputs, padded_input_shape, m_attrs, axes); - PartialShape output_shape{padded_input_shape}; + ov::Shape output_shape{padded_input_shape}; if (m_attrs.shape_calculation_mode == ShapeCalcMode::SCALES) { infer_using_scales(output_shape, axes, scales, padded_input_shape); @@ -413,7 +413,7 @@ bool op::v4::Interpolate::evaluate_interpolate(const HostTensorVector& outputs, infer_using_shapes(output_shape, axes, sizes); } - Shape out_shape = output_shape.to_shape(); + ov::StaticShape out_shape = output_shape.to_shape(); outputs[0]->set_element_type(inputs[0]->get_element_type()); outputs[0]->set_shape(out_shape); diff --git a/ngraph/core/src/op/log_softmax.cpp b/ngraph/core/src/op/log_softmax.cpp index 4561cf0b4de524..ca719fdb800301 100644 --- a/ngraph/core/src/op/log_softmax.cpp +++ b/ngraph/core/src/op/log_softmax.cpp @@ -24,7 +24,7 @@ bool op::v5::LogSoftmax::visit_attributes(AttributeVisitor& visitor) { void op::v5::LogSoftmax::validate_and_infer_types() { NGRAPH_OP_SCOPE(v5_LogSoftmax_validate_and_infer_types); - const PartialShape& input_shape = get_input_partial_shape(0); + const ov::Shape& input_shape = get_input_partial_shape(0); if (input_shape.rank().is_static()) NODE_VALIDATION_CHECK(this, m_axis < input_shape.rank().get_length() && m_axis >= -input_shape.rank().get_length(), diff --git a/ngraph/core/src/op/logical_not.cpp b/ngraph/core/src/op/logical_not.cpp index c96a4834ffb16f..a0e2ed22515192 100644 --- a/ngraph/core/src/op/logical_not.cpp +++ b/ngraph/core/src/op/logical_not.cpp @@ -24,12 +24,11 @@ bool ngraph::op::v1::LogicalNot::visit_attributes(AttributeVisitor& visitor) { return true; } -// TODO(amprocte): Update this to allow only boolean, for consistency with logical binops. void op::v1::LogicalNot::validate_and_infer_types() { NGRAPH_OP_SCOPE(v1_LogicalNot_validate_and_infer_types); auto args_et_pshape = op::util::validate_and_infer_elementwise_args(this); element::Type& args_et = std::get<0>(args_et_pshape); - PartialShape& args_pshape = std::get<1>(args_et_pshape); + ov::Shape& args_pshape = std::get<1>(args_et_pshape); set_output_type(0, args_et, args_pshape); } diff --git a/ngraph/core/src/op/loop.cpp b/ngraph/core/src/op/loop.cpp index 0bd101dd2c4ad7..31cffb813ab85c 100644 --- a/ngraph/core/src/op/loop.cpp +++ b/ngraph/core/src/op/loop.cpp @@ -160,7 +160,7 @@ void op::v5::Loop::validate_and_infer_types() { auto body_parameter = m_bodies[0]->get_parameters().at(slice_input_description->m_body_parameter_index); const auto& input_partial_shape = inputs().at(index).get_source_output().get_partial_shape(); if (input_partial_shape.rank().is_dynamic()) { - body_parameter->set_partial_shape(PartialShape::dynamic()); + body_parameter->set_partial_shape(ov::Shape::dynamic()); } else { auto out_shape = input_partial_shape; const auto axis = @@ -202,12 +202,12 @@ void op::v5::Loop::validate_and_infer_types() { const auto& body_value_partial_shape = body_value.get_partial_shape(); auto out_shape = body_value_partial_shape; if (zero_number_of_iter) { - out_shape = PartialShape{0}; + out_shape = ov::Shape{0}; } else if (out_shape.rank().is_static()) { const auto axis = ngraph::normalize_axis(this, concat_output_description->m_axis, out_shape.rank()); const auto rank = out_shape.rank().get_length(); if (rank == 0) { - out_shape = PartialShape{1}; + out_shape = ov::Shape{1}; } if (out_shape[axis].is_static() && m_num_iterations != -1) { @@ -221,7 +221,7 @@ void op::v5::Loop::validate_and_infer_types() { else if (auto body_output_description = ov::as_type_ptr(output_description)) { - const PartialShape& ps = body_value.get_partial_shape(); + const ov::Shape& ps = body_value.get_partial_shape(); if (ps.is_dynamic()) { set_output_type(index, body_value.get_element_type(), ps); } else { diff --git a/ngraph/core/src/op/lrn.cpp b/ngraph/core/src/op/lrn.cpp index 828fd154bc23ed..f3999593ce66d8 100644 --- a/ngraph/core/src/op/lrn.cpp +++ b/ngraph/core/src/op/lrn.cpp @@ -17,7 +17,7 @@ using namespace ngraph; OPENVINO_RTTI_DEFINITION(op::v0::LRN, "LRN", 0); op::LRN::LRN(const Output& arg, double alpha, double beta, double bias, size_t size) - : LRN(arg, op::v0::Constant::create(element::i64, Shape{1}, {1}), alpha, beta, bias, size) { + : LRN(arg, op::v0::Constant::create(element::i64, ov::StaticShape{1}, {1}), alpha, beta, bias, size) { add_provenance_group_member(input_value(1).get_node_shared_ptr()); } @@ -41,13 +41,13 @@ AxisSet op::LRN::get_reduction_axes() const { void op::LRN::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_LRN_validate_and_infer_types); element::Type arg_type = get_input_element_type(0); - PartialShape arg_shape = get_input_partial_shape(0); + ov::Shape arg_shape = get_input_partial_shape(0); set_output_type(0, arg_type, arg_shape); - const PartialShape& input_shape = get_input_partial_shape(0); + const ov::Shape& input_shape = get_input_partial_shape(0); const auto input_shape_rank = input_shape.rank(); - PartialShape axes_shape{PartialShape::dynamic()}; + ov::Shape axes_shape{ov::Shape::dynamic()}; if (get_input_partial_shape(1).is_static()) { axes_shape = get_input_partial_shape(1); } diff --git a/ngraph/core/src/op/lstm_cell.cpp b/ngraph/core/src/op/lstm_cell.cpp index 724dd267994bca..c90eac687fda37 100644 --- a/ngraph/core/src/op/lstm_cell.cpp +++ b/ngraph/core/src/op/lstm_cell.cpp @@ -141,13 +141,13 @@ void op::v0::LSTMCell::validate_and_infer_types() { for (const auto& input : inputs()) { if (input.get_partial_shape().rank().is_dynamic()) { - set_output_type(0, get_input_element_type(0), PartialShape::dynamic()); - set_output_type(1, get_input_element_type(0), PartialShape::dynamic()); + set_output_type(0, get_input_element_type(0), ov::Shape::dynamic()); + set_output_type(1, get_input_element_type(0), ov::Shape::dynamic()); return; } } - std::vector input_param{}; + std::vector input_param{}; auto merged_batch_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic(); @@ -274,13 +274,13 @@ void op::v0::LSTMCell::validate_and_infer_types() { Output op::v0::LSTMCell::get_default_bias_input() const { return Output{op::v0::Constant::create(get_input_element_type(0), - Shape{s_gates_count * get_hidden_size()}, + StaticShape{s_gates_count * get_hidden_size()}, vector{0.f})}; } Output op::v0::LSTMCell::get_default_peepholes_input() const { return Output{op::v0::Constant::create(get_input_element_type(0), - Shape{s_peepholes_count * get_hidden_size()}, + StaticShape{s_peepholes_count * get_hidden_size()}, vector{0.f})}; } @@ -416,8 +416,8 @@ void op::v4::LSTMCell::validate_and_infer_types() { NGRAPH_OP_SCOPE(v4_LSTMCell_validate_and_infer_types); for (const auto& input : inputs()) { if (input.get_partial_shape().rank().is_dynamic()) { - set_output_type(0, get_input_element_type(0), PartialShape::dynamic()); - set_output_type(1, get_input_element_type(0), PartialShape::dynamic()); + set_output_type(0, get_input_element_type(0), ov::Shape::dynamic()); + set_output_type(1, get_input_element_type(0), ov::Shape::dynamic()); return; } } @@ -513,7 +513,7 @@ void op::v4::LSTMCell::validate_and_infer_types() { Output op::v4::LSTMCell::get_default_bias_input() const { return Output{op::v0::Constant::create(get_input_element_type(0), - Shape{s_gates_count * get_hidden_size()}, + StaticShape{s_gates_count * get_hidden_size()}, vector{0.f})}; } diff --git a/ngraph/core/src/op/lstm_sequence.cpp b/ngraph/core/src/op/lstm_sequence.cpp index 81a0e58f5fbfb6..23f962b9c35f96 100644 --- a/ngraph/core/src/op/lstm_sequence.cpp +++ b/ngraph/core/src/op/lstm_sequence.cpp @@ -82,8 +82,8 @@ op::v0::LSTMSequence::LSTMSequence(const Output& X, R, B, Constant::create(element::f32, - Shape{(lstm_direction == LSTMSequence::direction::BIDIRECTIONAL ? 2UL : 1UL), - 3UL * static_cast(hidden_size)}, + StaticShape{(lstm_direction == LSTMSequence::direction::BIDIRECTIONAL ? 2UL : 1UL), + 3UL * static_cast(hidden_size)}, std::vector{0.f}), hidden_size, lstm_direction, @@ -291,7 +291,7 @@ shared_ptr op::v0::LSTMSequence::prepare_input(Output node, void op::v0::LSTMSequence::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_LSTMSequence_validate_and_infer_types); - std::vector input_param{}; + std::vector input_param{}; auto lstm_seq_gates_count = 4; auto lstm_seq_peepholes_count = 3; @@ -464,13 +464,13 @@ void op::v5::LSTMSequence::validate_and_infer_types() { NGRAPH_OP_SCOPE(v5_LSTMSequence_validate_and_infer_types); for (const auto& input : inputs()) { if (input.get_partial_shape().rank().is_dynamic()) { - set_output_type(0, get_input_element_type(0), PartialShape::dynamic()); - set_output_type(1, get_input_element_type(0), PartialShape::dynamic()); - set_output_type(2, get_input_element_type(0), PartialShape::dynamic()); + set_output_type(0, get_input_element_type(0), ov::Shape::dynamic()); + set_output_type(1, get_input_element_type(0), ov::Shape::dynamic()); + set_output_type(2, get_input_element_type(0), ov::Shape::dynamic()); return; } } - std::vector input_param{}; + std::vector input_param{}; auto lstm_seq_gates_count = 4; auto merged_batch_size = Dimension::dynamic(); diff --git a/ngraph/core/src/op/matmul.cpp b/ngraph/core/src/op/matmul.cpp index 432d5c73099cbb..7756c540e04645 100644 --- a/ngraph/core/src/op/matmul.cpp +++ b/ngraph/core/src/op/matmul.cpp @@ -38,10 +38,10 @@ shared_ptr op::MatMul::clone_with_new_inputs(const OutputVector& new_args) } namespace matmul { -PartialShape validate_matmul_output_shape(const PartialShape& arg0_shape, - const PartialShape& arg1_shape, - bool transpose_a, - bool transpose_b) { +ov::Shape validate_matmul_output_shape(const ov::Shape& arg0_shape, + const ov::Shape& arg1_shape, + bool transpose_a, + bool transpose_b) { auto arg0_rank = arg0_shape.rank().get_length(); auto arg1_rank = arg1_shape.rank().get_length(); @@ -169,7 +169,7 @@ PartialShape validate_matmul_output_shape(const PartialShape& arg0_shape, output_shape.erase(output_shape.begin() + output_shape.size() - 1); } - return PartialShape(output_shape); + return ov::Shape(output_shape); } template @@ -180,12 +180,12 @@ bool evaluate(const HostTensorPtr& arg0, bool transpose_b) { using T = typename element_type_traits::value_type; - Shape arg0_shape = arg0->get_shape(); - Shape arg1_shape = arg1->get_shape(); + ov::StaticShape arg0_shape = arg0->get_shape(); + ov::StaticShape arg1_shape = arg1->get_shape(); - PartialShape output_partial_shape = - validate_matmul_output_shape(PartialShape(arg0_shape), PartialShape(arg1_shape), transpose_a, transpose_b); - Shape output_shape = output_partial_shape.to_shape(); + ov::Shape output_partial_shape = + validate_matmul_output_shape(ov::Shape(arg0_shape), ov::Shape(arg1_shape), transpose_a, transpose_b); + ov::StaticShape output_shape = output_partial_shape.to_shape(); output->set_element_type(arg0->get_element_type()); output->set_shape(output_shape); @@ -259,7 +259,7 @@ void ngraph::op::v0::MatMul::validate_and_infer_types() { const auto& B_partial_shape = get_input_partial_shape(1); if (A_partial_shape.rank().is_static() && B_partial_shape.rank().is_static()) { - PartialShape output_shape; + ov::Shape output_shape; const bool transpose_a = get_transpose_a(); const bool transpose_b = get_transpose_b(); @@ -268,6 +268,6 @@ void ngraph::op::v0::MatMul::validate_and_infer_types() { set_output_type(0, result_et, output_shape); } else { - set_output_type(0, result_et, PartialShape::dynamic()); + set_output_type(0, result_et, ov::Shape::dynamic()); } } diff --git a/ngraph/core/src/op/max_pool.cpp b/ngraph/core/src/op/max_pool.cpp index 675d0bc8e6f54f..adb9721c3280da 100644 --- a/ngraph/core/src/op/max_pool.cpp +++ b/ngraph/core/src/op/max_pool.cpp @@ -19,9 +19,9 @@ OPENVINO_RTTI_DEFINITION(op::v1::MaxPool, "MaxPool", 1, op::util::MaxPoolBase); op::v1::MaxPool::MaxPool(const Output& arg, const Strides& strides, - const Shape& pads_begin, - const Shape& pads_end, - const Shape& kernel, + const ov::StaticShape& pads_begin, + const ov::StaticShape& pads_end, + const ov::StaticShape& kernel, const op::RoundingType rounding_type, const PadType auto_pad) : op::util::MaxPoolBase(arg, strides, pads_begin, pads_end, kernel, rounding_type, auto_pad) { @@ -44,7 +44,7 @@ void op::v1::MaxPool::validate_and_infer_types() { MaxPoolBase::validate_and_infer_types(); - const PartialShape output_shape = infer_output_shape(Strides{}); // no dilations of the filter window + const ov::Shape output_shape = infer_output_shape(Strides{}); // no dilations of the filter window set_output_type(0, get_input_element_type(0), output_shape); } @@ -69,11 +69,11 @@ namespace maxpool { template inline bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, - const Shape& out_shape, - const Shape& window_shape, + const ov::StaticShape& out_shape, + const ov::StaticShape& window_shape, const Strides& window_movement_strides, - const Shape& padding_below, - const Shape& padding_above) { + const ov::StaticShape& padding_below, + const ov::StaticShape& padding_above) { using T = typename element_type_traits::value_type; out->set_shape(out_shape); runtime::reference::max_pool(arg->get_data_ptr(), @@ -89,11 +89,11 @@ inline bool evaluate(const HostTensorPtr& arg, bool evaluate_maxpool(const HostTensorPtr& arg, const HostTensorPtr& out, - const Shape& out_shape, - const Shape& kernel, + const ov::StaticShape& out_shape, + const ov::StaticShape& kernel, const Strides& strides, - const Shape& pad_begin, - const Shape& pad_end) { + const ov::StaticShape& pad_begin, + const ov::StaticShape& pad_end) { bool rc = true; auto arg_shape = arg->get_shape(); @@ -165,12 +165,12 @@ template inline bool evaluate(const HostTensorPtr& data, const HostTensorPtr& values, const HostTensorPtr& indices, - const Shape& out_shape, - const Shape& kernel, + const ov::StaticShape& out_shape, + const ov::StaticShape& kernel, const Strides& strides, const Strides& dilations, - const Shape& pads_begin, - const Shape& pads_end, + const ov::StaticShape& pads_begin, + const ov::StaticShape& pads_end, const int64_t axis) { using Values_t = typename element_type_traits::value_type; using Indices_t = typename element_type_traits::value_type; @@ -191,12 +191,12 @@ inline bool evaluate(const HostTensorPtr& data, bool evaluate_maxpool(const HostTensorPtr& data, const HostTensorPtr& values, const HostTensorPtr& indices, - const Shape& out_shape, - const Shape& kernel, + const ov::StaticShape& out_shape, + const ov::StaticShape& kernel, const Strides& strides, const Strides& dilations, - const Shape& pads_begin, - const Shape& pads_end, + const ov::StaticShape& pads_begin, + const ov::StaticShape& pads_end, const int64_t axis) { #define EVAL_MAX_POOL_8(data_et, index_et) \ NGRAPH_2_TYPES_CASE(maxpool_v8::evaluate_maxpool, \ @@ -255,9 +255,9 @@ OPENVINO_RTTI_DEFINITION(op::v8::MaxPool, "MaxPool", 8, op::util::MaxPoolBase); op::v8::MaxPool::MaxPool(const Output& arg, const Strides& strides, const Strides& dilations, - const Shape& pads_begin, - const Shape& pads_end, - const Shape& kernel, + const ov::StaticShape& pads_begin, + const ov::StaticShape& pads_end, + const ov::StaticShape& kernel, const op::RoundingType rounding_type, const PadType auto_pad, const element::Type index_element_type, @@ -293,7 +293,7 @@ void op::v8::MaxPool::validate_and_infer_types() { m_axis = ngraph::normalize_axis(this, m_axis, input_shape.rank()); } - const PartialShape output_shape = infer_output_shape(m_dilations); + const ov::Shape output_shape = infer_output_shape(m_dilations); set_output_type(0, get_input_element_type(0), output_shape); set_output_type(1, m_index_element_type, output_shape); diff --git a/ngraph/core/src/op/non_max_suppression.cpp b/ngraph/core/src/op/non_max_suppression.cpp index 143b89d60ef0b1..f2dbdff9728ca2 100644 --- a/ngraph/core/src/op/non_max_suppression.cpp +++ b/ngraph/core/src/op/non_max_suppression.cpp @@ -41,9 +41,9 @@ op::v1::NonMaxSuppression::NonMaxSuppression(const Output& boxes, const bool sort_result_descending) : Op({boxes, scores, - op::v0::Constant::create(element::i64, Shape{}, {0}), - op::v0::Constant::create(element::f32, Shape{}, {.0f}), - op::v0::Constant::create(element::f32, Shape{}, {.0f})}), + op::v0::Constant::create(element::i64, ov::StaticShape{}, {0}), + op::v0::Constant::create(element::f32, ov::StaticShape{}, {.0f}), + op::v0::Constant::create(element::f32, ov::StaticShape{}, {.0f})}), m_box_encoding{box_encoding}, m_sort_result_descending{sort_result_descending} { constructor_validate_and_infer_types(); @@ -55,11 +55,11 @@ std::shared_ptr op::v1::NonMaxSuppression::clone_with_new_inputs(const Out NODE_VALIDATION_CHECK(this, new_args.size() >= 2 && new_args.size() <= 5, "Number of inputs must be 2, 3, 4 or 5"); const auto& arg2 = - new_args.size() > 2 ? new_args.at(2) : ngraph::op::v0::Constant::create(element::i32, Shape{}, {0}); + new_args.size() > 2 ? new_args.at(2) : ngraph::op::v0::Constant::create(element::i32, ov::StaticShape{}, {0}); const auto& arg3 = - new_args.size() > 3 ? new_args.at(3) : ngraph::op::v0::Constant::create(element::f32, Shape{}, {.0f}); + new_args.size() > 3 ? new_args.at(3) : ngraph::op::v0::Constant::create(element::f32, ov::StaticShape{}, {.0f}); const auto& arg4 = - new_args.size() > 4 ? new_args.at(4) : ngraph::op::v0::Constant::create(element::f32, Shape{}, {.0f}); + new_args.size() > 4 ? new_args.at(4) : ngraph::op::v0::Constant::create(element::f32, ov::StaticShape{}, {.0f}); return std::make_shared(new_args.at(0), new_args.at(1), @@ -90,7 +90,7 @@ void op::v1::NonMaxSuppression::validate_and_infer_types() { // NonMaxSuppression produces triplets // that have the following format: [batch_index, class_index, box_index] - PartialShape out_shape = {Dimension::dynamic(), 3}; + ov::Shape out_shape = {Dimension::dynamic(), 3}; if (boxes_ps.is_dynamic() || scores_ps.is_dynamic()) { set_output_type(0, output_element_type, out_shape); @@ -219,9 +219,9 @@ op::v3::NonMaxSuppression::NonMaxSuppression(const Output& boxes, const element::Type& output_type) : Op({boxes, scores, - op::v0::Constant::create(element::i64, Shape{}, {0}), - op::v0::Constant::create(element::f32, Shape{}, {.0f}), - op::v0::Constant::create(element::f32, Shape{}, {.0f})}), + op::v0::Constant::create(element::i64, ov::StaticShape{}, {0}), + op::v0::Constant::create(element::f32, ov::StaticShape{}, {.0f}), + op::v0::Constant::create(element::f32, ov::StaticShape{}, {.0f})}), m_box_encoding{box_encoding}, m_sort_result_descending{sort_result_descending}, m_output_type{output_type} { @@ -234,11 +234,11 @@ std::shared_ptr op::v3::NonMaxSuppression::clone_with_new_inputs(const Out NODE_VALIDATION_CHECK(this, new_args.size() >= 2 && new_args.size() <= 5, "Number of inputs must be 2, 3, 4 or 5"); const auto& arg2 = - new_args.size() > 2 ? new_args.at(2) : ngraph::op::v0::Constant::create(element::i32, Shape{}, {0}); + new_args.size() > 2 ? new_args.at(2) : ngraph::op::v0::Constant::create(element::i32, ov::StaticShape{}, {0}); const auto& arg3 = - new_args.size() > 3 ? new_args.at(3) : ngraph::op::v0::Constant::create(element::f32, Shape{}, {.0f}); + new_args.size() > 3 ? new_args.at(3) : ngraph::op::v0::Constant::create(element::f32, ov::StaticShape{}, {.0f}); const auto& arg4 = - new_args.size() > 4 ? new_args.at(4) : ngraph::op::v0::Constant::create(element::f32, Shape{}, {.0f}); + new_args.size() > 4 ? new_args.at(4) : ngraph::op::v0::Constant::create(element::f32, ov::StaticShape{}, {.0f}); return std::make_shared(new_args.at(0), new_args.at(1), @@ -336,7 +336,7 @@ void op::v3::NonMaxSuppression::validate_and_infer_types() { // NonMaxSuppression produces triplets // that have the following format: [batch_index, class_index, box_index] - PartialShape out_shape = {Dimension::dynamic(), 3}; + ov::Shape out_shape = {Dimension::dynamic(), 3}; validate(); @@ -412,9 +412,9 @@ op::v4::NonMaxSuppression::NonMaxSuppression(const Output& boxes, const element::Type& output_type) : op::v3::NonMaxSuppression(boxes, scores, - op::v0::Constant::create(element::i64, Shape{}, {0}), - op::v0::Constant::create(element::f32, Shape{}, {.0f}), - op::v0::Constant::create(element::f32, Shape{}, {.0f}), + op::v0::Constant::create(element::i64, ov::StaticShape{}, {0}), + op::v0::Constant::create(element::f32, ov::StaticShape{}, {.0f}), + op::v0::Constant::create(element::f32, ov::StaticShape{}, {.0f}), box_encoding, sort_result_descending, output_type) { @@ -427,11 +427,11 @@ std::shared_ptr op::v4::NonMaxSuppression::clone_with_new_inputs(const Out NODE_VALIDATION_CHECK(this, new_args.size() >= 2 && new_args.size() <= 5, "Number of inputs must be 2, 3, 4 or 5"); const auto& arg2 = - new_args.size() > 2 ? new_args.at(2) : ngraph::op::v0::Constant::create(element::i32, Shape{}, {0}); + new_args.size() > 2 ? new_args.at(2) : ngraph::op::v0::Constant::create(element::i32, ov::StaticShape{}, {0}); const auto& arg3 = - new_args.size() > 3 ? new_args.at(3) : ngraph::op::v0::Constant::create(element::f32, Shape{}, {.0f}); + new_args.size() > 3 ? new_args.at(3) : ngraph::op::v0::Constant::create(element::f32, ov::StaticShape{}, {.0f}); const auto& arg4 = - new_args.size() > 4 ? new_args.at(4) : ngraph::op::v0::Constant::create(element::f32, Shape{}, {.0f}); + new_args.size() > 4 ? new_args.at(4) : ngraph::op::v0::Constant::create(element::f32, ov::StaticShape{}, {.0f}); return std::make_shared(new_args.at(0), new_args.at(1), @@ -450,7 +450,7 @@ void op::v4::NonMaxSuppression::validate_and_infer_types() { // NonMaxSuppression produces triplets // that have the following format: [batch_index, class_index, box_index] - PartialShape out_shape = {Dimension::dynamic(), 3}; + ov::Shape out_shape = {Dimension::dynamic(), 3}; op::v3::NonMaxSuppression::validate(); @@ -609,12 +609,12 @@ inline bool is_float_type_admissible(const element::Type& t) { return t == element::f32 || t == element::f16 || t == element::bf16; } -inline bool is_scalar_or_1d_tensor_with_1_element(const PartialShape& p) { +inline bool is_scalar_or_1d_tensor_with_1_element(const ov::Shape& p) { if (p.is_dynamic()) { return false; } - Shape shape = p.to_shape(); + ov::StaticShape shape = p.to_shape(); return ngraph::is_scalar(shape) || (is_vector(shape) && (shape[0] == 1)); } @@ -797,7 +797,7 @@ void op::v5::NonMaxSuppression::validate_and_infer_types() { // NonMaxSuppression produces triplets // that have the following format: [batch_index, class_index, box_index] - PartialShape out_shape = {Dimension::dynamic(), 3}; + ov::Shape out_shape = {Dimension::dynamic(), 3}; validate(); @@ -816,7 +816,7 @@ void op::v5::NonMaxSuppression::validate_and_infer_types() { set_output_type(0, m_output_type, out_shape); set_output_type(1, element::f32, out_shape); - set_output_type(2, m_output_type, Shape{1}); + set_output_type(2, m_output_type, ov::StaticShape{1}); } std::ostream& ov::operator<<(std::ostream& s, const op::v5::NonMaxSuppression::BoxEncodingType& type) { diff --git a/ngraph/core/src/op/non_zero.cpp b/ngraph/core/src/op/non_zero.cpp index e16d134a556d64..277c44cf065fb1 100644 --- a/ngraph/core/src/op/non_zero.cpp +++ b/ngraph/core/src/op/non_zero.cpp @@ -47,13 +47,13 @@ void op::v3::NonZero::validate_and_infer_types() { m_output_type == element::i64 || m_output_type == element::i32, "Output type must be i32 or i64"); // For scalar non-zero value case, onnx test case expects output shape {1, 1} - const PartialShape& input_shape = get_input_partial_shape(0); + const ov::Shape& input_shape = get_input_partial_shape(0); if (input_shape.rank().compatible(0)) { - set_output_type(0, m_output_type, PartialShape{Dimension::dynamic(), Dimension::dynamic()}); + set_output_type(0, m_output_type, ov::Shape{Dimension::dynamic(), Dimension::dynamic()}); } else { const Dimension dim = std::accumulate(begin(input_shape), end(input_shape), Dimension(0, 1), std::multiplies()); - set_output_type(0, m_output_type, PartialShape{input_shape.rank(), dim}); + set_output_type(0, m_output_type, ov::Shape{input_shape.rank(), dim}); } set_input_is_relevant_to_shape(0); @@ -82,16 +82,16 @@ bool evaluate_nonzero_execute(const HostTensorPtr& input, const HostTensorPtr& o using IN_T = typename element_type_traits::value_type; using OUT_T = typename element_type_traits::value_type; - Shape input_shape = input->get_shape(); + ov::StaticShape input_shape = input->get_shape(); size_t input_rank = input_shape.size(); size_t non_zero_count = runtime::reference::non_zero_get_count(input->get_data_ptr(), input_shape); - Shape out_shape; + ov::StaticShape out_shape; if (input_rank == 0 && non_zero_count > 0) { - out_shape = Shape{1, 1}; + out_shape = ov::StaticShape{1, 1}; } else { - out_shape = Shape{input_rank, non_zero_count}; + out_shape = ov::StaticShape{input_rank, non_zero_count}; } output->set_shape(out_shape); diff --git a/ngraph/core/src/op/not_equal.cpp b/ngraph/core/src/op/not_equal.cpp index a97751381a68f4..cebed042fedf0c 100644 --- a/ngraph/core/src/op/not_equal.cpp +++ b/ngraph/core/src/op/not_equal.cpp @@ -7,6 +7,7 @@ #include "itt.hpp" #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/reference/not_equal.hpp" +#include "ngraph/validation_util.hpp" using namespace std; using namespace ngraph; @@ -65,6 +66,7 @@ shared_ptr op::v1::NotEqual::clone_with_new_inputs(const OutputVector& new bool op::v1::NotEqual::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { NGRAPH_OP_SCOPE(v1_NotEqual_evaluate); + NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1) && validate_host_tensor_vector(inputs, 2)); return not_equalop::evaluate_not_equal(inputs[0], inputs[1], outputs[0], get_autob()); } diff --git a/ngraph/core/src/op/one_hot.cpp b/ngraph/core/src/op/one_hot.cpp index 7e3443ec0fe4d7..1860626d7db173 100644 --- a/ngraph/core/src/op/one_hot.cpp +++ b/ngraph/core/src/op/one_hot.cpp @@ -61,7 +61,7 @@ void op::v1::OneHot::validate_and_infer_types() { off_value_shape.is_dynamic() || ngraph::is_scalar(off_value_shape.to_shape()), "off_value input must be scalar."); - PartialShape result_shape{PartialShape::dynamic()}; + ov::Shape result_shape{ov::Shape::dynamic()}; const auto& depth = input_value(1).get_node_shared_ptr(); const auto& depth_constant = get_constant_from_source(input_value(1)); if (indices_shape.rank().is_static() && depth_constant) { diff --git a/ngraph/core/src/op/pad.cpp b/ngraph/core/src/op/pad.cpp index 4e2126524c3c2b..999c52ba9d797c 100644 --- a/ngraph/core/src/op/pad.cpp +++ b/ngraph/core/src/op/pad.cpp @@ -33,7 +33,7 @@ op::v1::Pad::Pad(const Output& arg, const Output& pads_begin, const Output& pads_end, PadMode pad_mode) - : Op({arg, pads_begin, pads_end, op::v0::Constant::create(arg.get_element_type(), Shape{}, {0})}), + : Op({arg, pads_begin, pads_end, op::v0::Constant::create(arg.get_element_type(), ov::StaticShape{}, {0})}), m_pad_mode{pad_mode} { constructor_validate_and_infer_types(); } @@ -80,7 +80,7 @@ void op::v1::Pad::validate_and_infer_types() { ")."); NODE_VALIDATION_CHECK(this, - arg_pad_shape.compatible(PartialShape{}), + arg_pad_shape.compatible(ov::Shape{}), "Argument for padding value is not a scalar (shape: ", arg_pad_shape, ")."); @@ -158,7 +158,7 @@ void op::v1::Pad::validate_and_infer_types() { } set_output_type(0, get_input_element_type(0), result_dims); } else { - set_output_type(0, get_input_element_type(0), PartialShape::dynamic(arg_shape_rank)); + set_output_type(0, get_input_element_type(0), ov::Shape::dynamic(arg_shape_rank)); } } diff --git a/ngraph/core/src/op/parameter.cpp b/ngraph/core/src/op/parameter.cpp index 415c1dbda002ac..a6a4536b454b06 100644 --- a/ngraph/core/src/op/parameter.cpp +++ b/ngraph/core/src/op/parameter.cpp @@ -14,7 +14,7 @@ using namespace ngraph; OPENVINO_RTTI_DEFINITION(op::v0::Parameter, "Parameter", 0); -op::Parameter::Parameter(const element::Type& element_type, const PartialShape& pshape) +op::Parameter::Parameter(const element::Type& element_type, const ov::Shape& pshape) : m_partial_shape(pshape), m_element_type(element_type), m_is_relevant_to_shapes(false) { diff --git a/ngraph/core/src/op/prior_box.cpp b/ngraph/core/src/op/prior_box.cpp index a782cf08bdd7ac..8085405518e09b 100644 --- a/ngraph/core/src/op/prior_box.cpp +++ b/ngraph/core/src/op/prior_box.cpp @@ -58,11 +58,12 @@ void op::PriorBox::validate_and_infer_types() { auto layer_shape = const_shape->get_shape_val(); - set_output_type(0, - element::f32, - Shape{2, 4 * layer_shape[0] * layer_shape[1] * static_cast(number_of_priors(m_attrs))}); + set_output_type( + 0, + element::f32, + ov::StaticShape{2, 4 * layer_shape[0] * layer_shape[1] * static_cast(number_of_priors(m_attrs))}); } else { - set_output_type(0, element::f32, PartialShape{2, Dimension::dynamic()}); + set_output_type(0, element::f32, ov::Shape{2, Dimension::dynamic()}); } } diff --git a/ngraph/core/src/op/prior_box_clustered.cpp b/ngraph/core/src/op/prior_box_clustered.cpp index 6cb8cc8e02c2c2..890db04f30f8ba 100644 --- a/ngraph/core/src/op/prior_box_clustered.cpp +++ b/ngraph/core/src/op/prior_box_clustered.cpp @@ -66,9 +66,9 @@ void ov::op::v0::PriorBoxClustered::validate_and_infer_types() { auto layer_shape = const_shape->get_shape_val(); // {Prior boxes, variances-adjusted prior boxes} const auto num_priors = m_attrs.widths.size(); - set_output_type(0, element::f32, Shape{2, 4 * layer_shape[0] * layer_shape[1] * num_priors}); + set_output_type(0, element::f32, ov::StaticShape{2, 4 * layer_shape[0] * layer_shape[1] * num_priors}); } else { - set_output_type(0, element::f32, PartialShape::dynamic()); + set_output_type(0, element::f32, ov::Shape::dynamic()); } } diff --git a/ngraph/core/src/op/proposal.cpp b/ngraph/core/src/op/proposal.cpp index fe4c95257c5ba8..8998e677243f4f 100644 --- a/ngraph/core/src/op/proposal.cpp +++ b/ngraph/core/src/op/proposal.cpp @@ -100,7 +100,7 @@ void op::v0::Proposal::validate_and_infer_types() { } // intersect the batch size - set_output_type(0, get_input_element_type(0), PartialShape{out_dim * m_attrs.post_nms_topn, 5}); + set_output_type(0, get_input_element_type(0), ov::Shape{out_dim * m_attrs.post_nms_topn, 5}); } shared_ptr op::v0::Proposal::clone_with_new_inputs(const OutputVector& new_args) const { @@ -143,9 +143,9 @@ void op::v4::Proposal::validate_and_infer_types() { v0::Proposal::validate_and_infer_types(); // Output shape was inferred in v0's validate_and_infer_types const auto proposals_ps = get_output_partial_shape(0); - auto out_ps = PartialShape{Dimension::dynamic()}; + auto out_ps = ov::Shape{Dimension::dynamic()}; if (proposals_ps.rank().is_static() && proposals_ps.rank().compatible(2)) { - out_ps = PartialShape{proposals_ps[0]}; + out_ps = ov::Shape{proposals_ps[0]}; } set_output_type(1, get_input_element_type(0), out_ps); } diff --git a/ngraph/core/src/op/psroi_pooling.cpp b/ngraph/core/src/op/psroi_pooling.cpp index becc9425adac65..70a07f8a4ce41b 100644 --- a/ngraph/core/src/op/psroi_pooling.cpp +++ b/ngraph/core/src/op/psroi_pooling.cpp @@ -60,10 +60,10 @@ void ov::op::v0::PSROIPooling::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, m_spatial_bins_y > 0, "spatial_bins_y has to be greater than 0"); } - const PartialShape& feat_map_pshape = get_input_partial_shape(0); - const PartialShape& coords_pshape = get_input_partial_shape(1); + const ov::Shape& feat_map_pshape = get_input_partial_shape(0); + const ov::Shape& coords_pshape = get_input_partial_shape(1); if (feat_map_pshape.rank().is_dynamic() || coords_pshape.rank().is_dynamic()) { - set_output_type(0, feat_maps_et, PartialShape::dynamic()); + set_output_type(0, feat_maps_et, ov::Shape::dynamic()); } else { NODE_VALIDATION_CHECK(this, feat_map_pshape.rank().get_length() == 4, diff --git a/ngraph/core/src/op/random_uniform.cpp b/ngraph/core/src/op/random_uniform.cpp index c343c408c4b01c..e973f91f4f5c5e 100644 --- a/ngraph/core/src/op/random_uniform.cpp +++ b/ngraph/core/src/op/random_uniform.cpp @@ -35,14 +35,14 @@ void op::v8::RandomUniform::validate_and_infer_types() { shape_et.is_dynamic() || shape_et == element::i32 || shape_et == element::i64, "Type of the input should be int32 or int64."); - PartialShape output_shape = PartialShape::dynamic(); + ov::Shape output_shape = ov::Shape::dynamic(); const auto& input_shape = get_input_partial_shape(0); if (input_shape.rank().is_static()) { NODE_VALIDATION_CHECK(this, input_shape.rank() == 1, "The rank of the tensor defining output shape must be equal to 1."); if (const auto& const_shape = get_constant_from_source(input_value(0))) { - output_shape = PartialShape(const_shape->cast_vector()); + output_shape = ov::Shape(const_shape->cast_vector()); } } @@ -53,7 +53,7 @@ void op::v8::RandomUniform::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, min_rank <= 1, "Min value must be a scalar or 1D tensor."); if (min_rank == 1) { - NODE_VALIDATION_CHECK(this, min_pshape.compatible(Shape{1}), "'min_val' should have 1 element."); + NODE_VALIDATION_CHECK(this, min_pshape.compatible(ov::StaticShape{1}), "'min_val' should have 1 element."); } } @@ -62,7 +62,7 @@ void op::v8::RandomUniform::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, max_rank <= 1, "Max value must be a scalar or 1D tensor."); if (max_rank == 1) { - NODE_VALIDATION_CHECK(this, max_pshape.compatible(Shape{1}), "'max_val' should have 1 element."); + NODE_VALIDATION_CHECK(this, max_pshape.compatible(ov::StaticShape{1}), "'max_val' should have 1 element."); } } diff --git a/ngraph/core/src/op/range.cpp b/ngraph/core/src/op/range.cpp index 7bf31bdf81f325..c58d14de74abab 100644 --- a/ngraph/core/src/op/range.cpp +++ b/ngraph/core/src/op/range.cpp @@ -71,9 +71,15 @@ void op::v4::Range::validate_and_infer_types() { set_input_is_relevant_to_shape(1); set_input_is_relevant_to_shape(2); - NODE_VALIDATION_CHECK(this, get_input_partial_shape(0).compatible(Shape{}), "'start' input is not a scalar"); - NODE_VALIDATION_CHECK(this, get_input_partial_shape(1).compatible(Shape{}), "'stop' input is not a scalar"); - NODE_VALIDATION_CHECK(this, get_input_partial_shape(2).compatible(Shape{}), "'step' input is not a scalar"); + NODE_VALIDATION_CHECK(this, + get_input_partial_shape(0).compatible(ov::StaticShape{}), + "'start' input is not a scalar"); + NODE_VALIDATION_CHECK(this, + get_input_partial_shape(1).compatible(ov::StaticShape{}), + "'stop' input is not a scalar"); + NODE_VALIDATION_CHECK(this, + get_input_partial_shape(2).compatible(ov::StaticShape{}), + "'step' input is not a scalar"); NODE_VALIDATION_CHECK(this, get_input_element_type(0).is_integral_number() || get_input_element_type(0).is_real(), @@ -117,7 +123,7 @@ void op::v4::Range::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, std::isfinite(step) && !std::isnan(step), "'step' cannot be nan or infinite."); } - PartialShape result{PartialShape::dynamic(1)}; + ov::Shape result{ov::Shape::dynamic(1)}; if (const_start != nullptr && const_stop != nullptr && const_step != nullptr) { // all inputs must be casted to output_type before @@ -142,7 +148,7 @@ void op::v4::Range::validate_and_infer_types() { double strided = ceil(fabs(span) / fabs(step)); - result = PartialShape{Dimension(static_cast(strided))}; + result = ov::Shape{Dimension(static_cast(strided))}; } set_output_type(0, m_output_type, result); } @@ -221,7 +227,7 @@ bool evaluate(const HostTensorPtr& out, if (steps > 0) { out_size = steps; } - Shape out_shape = Shape({static_cast(out_size)}); + ov::StaticShape out_shape = ov::StaticShape({static_cast(out_size)}); out->set_shape(out_shape); runtime::reference::range(&start_val, &step_val, shape_size(out_shape), out->get_data_ptr()); return true; @@ -324,7 +330,7 @@ adjust_for_step_and_sign(T span, T step) { } template -static PartialShape infer_output_shape(const op::v0::Range* node, const element::Type& /* et */) { +static ov::Shape infer_output_shape(const op::v0::Range* node, const element::Type& /* et */) { auto const_start = get_constant_from_source(node->input_value(0)); auto const_stop = get_constant_from_source(node->input_value(1)); auto const_step = get_constant_from_source(node->input_value(2)); @@ -354,7 +360,7 @@ static PartialShape infer_output_shape(const op::v0::Range* node, const element: check_step(node, step); } - PartialShape result{PartialShape::dynamic(1)}; + ov::Shape result{ov::Shape::dynamic(1)}; if (const_start != nullptr && const_stop != nullptr && const_step != nullptr) { T span; @@ -369,7 +375,7 @@ static PartialShape infer_output_shape(const op::v0::Range* node, const element: T strided = adjust_for_step_and_sign(span, step); - result = PartialShape{Dimension(static_cast(strided))}; + result = ov::Shape{Dimension(static_cast(strided))}; } return result; @@ -398,11 +404,17 @@ void op::v0::Range::validate_and_infer_types() { result_et != element::boolean, "Element type for start, stop, and step, must not be boolean."); - NODE_VALIDATION_CHECK(this, get_input_partial_shape(0).compatible(Shape{}), "'start' input is not a scalar"); - NODE_VALIDATION_CHECK(this, get_input_partial_shape(1).compatible(Shape{}), "'stop' input is not a scalar"); - NODE_VALIDATION_CHECK(this, get_input_partial_shape(2).compatible(Shape{}), "'step' input is not a scalar"); + NODE_VALIDATION_CHECK(this, + get_input_partial_shape(0).compatible(ov::StaticShape{}), + "'start' input is not a scalar"); + NODE_VALIDATION_CHECK(this, + get_input_partial_shape(1).compatible(ov::StaticShape{}), + "'stop' input is not a scalar"); + NODE_VALIDATION_CHECK(this, + get_input_partial_shape(2).compatible(ov::StaticShape{}), + "'step' input is not a scalar"); - PartialShape result_shape; + ov::Shape result_shape; #if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) # pragma GCC diagnostic push @@ -447,7 +459,7 @@ void op::v0::Range::validate_and_infer_types() { result_shape = infer_output_shape(this, result_et); break; case element::Type_t::dynamic: - result_shape = PartialShape::dynamic(1); + result_shape = ov::Shape::dynamic(1); break; case element::Type_t::u1: case element::Type_t::i4: diff --git a/ngraph/core/src/op/read_value.cpp b/ngraph/core/src/op/read_value.cpp index 41c2242b2e1ae5..43bc6277c1b703 100644 --- a/ngraph/core/src/op/read_value.cpp +++ b/ngraph/core/src/op/read_value.cpp @@ -61,7 +61,7 @@ void op::v6::ReadValue::validate_and_infer_types() { element::Type::merge(var_info.data_type, m_variable->get_info().data_type, arg_t), "Variables types are inconsistent."); NODE_VALIDATION_CHECK(this, - PartialShape::merge_into(var_info.data_shape, m_variable->get_info().data_shape), + ov::Shape::merge_into(var_info.data_shape, m_variable->get_info().data_shape), "Variable shape and output shape are inconsistent."); m_variable->update(var_info); set_output_type(0, arg_t, output_shape); @@ -80,7 +80,7 @@ bool op::v6::ReadValue::visit_attributes(AttributeVisitor& visitor) { } void op::v6::ReadValue::revalidate_and_infer_types() { - VariableInfo var_info{PartialShape::dynamic(), element::dynamic, m_variable->get_info().variable_id}; + VariableInfo var_info{ov::Shape::dynamic(), element::dynamic, m_variable->get_info().variable_id}; m_variable->update(var_info); Node::revalidate_and_infer_types(); } diff --git a/ngraph/core/src/op/region_yolo.cpp b/ngraph/core/src/op/region_yolo.cpp index e8cf031ff3cbae..758e40133f4e50 100644 --- a/ngraph/core/src/op/region_yolo.cpp +++ b/ngraph/core/src/op/region_yolo.cpp @@ -56,8 +56,8 @@ void op::RegionYolo::validate_and_infer_types() { input_et); if (get_input_partial_shape(0).is_static()) { - Shape input_shape = get_input_partial_shape(0).to_shape(); - Shape output_shape; + ov::StaticShape input_shape = get_input_partial_shape(0).to_shape(); + ov::StaticShape output_shape; int end_axis = m_end_axis; if (m_end_axis < 0) { m_end_axis += input_shape.size(); @@ -83,7 +83,7 @@ void op::RegionYolo::validate_and_infer_types() { } set_output_type(0, input_et, output_shape); } else { - set_output_type(0, input_et, PartialShape::dynamic()); + set_output_type(0, input_et, ov::Shape::dynamic()); } } diff --git a/ngraph/core/src/op/reorg_yolo.cpp b/ngraph/core/src/op/reorg_yolo.cpp index f90b07baa0e51a..97fba8fa3a25a0 100644 --- a/ngraph/core/src/op/reorg_yolo.cpp +++ b/ngraph/core/src/op/reorg_yolo.cpp @@ -43,14 +43,14 @@ void op::ReorgYolo::validate_and_infer_types() { input_shape[1] >= (m_strides[0] * m_strides[0]), "For [N, C, H, W] input shape, C >= (stride*stride) is required."); - Shape output_shape{input_shape[0], input_shape[1]}; + ov::StaticShape output_shape{input_shape[0], input_shape[1]}; for (size_t i = 2; i < input_shape.size(); i++) { output_shape.push_back(input_shape[i] / m_strides[0]); output_shape[1] *= m_strides[0]; } set_output_type(0, input_et, output_shape); } else { - set_output_type(0, input_et, PartialShape::dynamic()); + set_output_type(0, input_et, ov::Shape::dynamic()); } } diff --git a/ngraph/core/src/op/reshape.cpp b/ngraph/core/src/op/reshape.cpp index 6593a7b2fd40f8..8218f2693a4299 100644 --- a/ngraph/core/src/op/reshape.cpp +++ b/ngraph/core/src/op/reshape.cpp @@ -57,8 +57,8 @@ void op::v1::Reshape::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, shape_pattern_et.is_integral_number(), "Shape pattern must be an integral number."); // check shapes - const PartialShape& input_pshape = get_input_partial_shape(0); - const PartialShape& shape_pattern_shape = get_input_partial_shape(1); + const ov::Shape& input_pshape = get_input_partial_shape(0); + const ov::Shape& shape_pattern_shape = get_input_partial_shape(1); NODE_VALIDATION_CHECK(this, shape_pattern_shape.rank().compatible(1) || (shape_pattern_shape.rank().is_static() && shape_pattern_shape.rank().get_length() == 0), @@ -68,7 +68,7 @@ void op::v1::Reshape::validate_and_infer_types() { Rank output_rank = shape_pattern_shape.rank().is_dynamic() ? Rank::dynamic() : shape_pattern_shape.rank().get_length() == 0 ? 0 : shape_pattern_shape[0]; - set_output_type(0, get_input_element_type(0), PartialShape::dynamic(output_rank)); + set_output_type(0, get_input_element_type(0), ov::Shape::dynamic(output_rank)); set_input_is_relevant_to_shape(1); std::vector reshape_pattern; @@ -155,8 +155,8 @@ bool op::v1::Reshape::evaluate_reshape(const HostTensorVector& outputs, const Ho std::vector output_shape(out_shape_val.size()); calculate_output_shape(reshape_pattern, minus_one_idx, inputs[0]->get_partial_shape(), output_shape); - NGRAPH_CHECK(PartialShape(output_shape).is_static()); - outputs[0]->set_shape(PartialShape(output_shape).to_shape()); + NGRAPH_CHECK(ov::Shape(output_shape).is_static()); + outputs[0]->set_shape(ov::Shape(output_shape).to_shape()); const AxisVector order = get_default_order(inputs[0]->get_shape()); return reshapeop::evaluate_reshape(inputs[0], outputs[0], order); @@ -215,7 +215,7 @@ bool op::v1::Reshape::constant_fold(OutputVector& output_values, const OutputVec void op::v1::Reshape::calculate_output_shape(vector& reshape_pattern, const int64_t& minus_one_idx, - const PartialShape& input_pshape, + const ov::Shape& input_pshape, vector& output_shape) const { Dimension output_product(1); for (int64_t i = 0; i < static_cast(reshape_pattern.size()); ++i) { @@ -303,7 +303,7 @@ void op::v1::Reshape::calculate_output_shape(vector& reshape_pattern, } } } - PartialShape output_pshape(output_shape); + ov::Shape output_pshape(output_shape); if (input_pshape.is_static() && output_pshape.is_static()) { size_t zero_dims = std::count_if(reshape_pattern.begin(), reshape_pattern.end(), [](Dimension dim) { return dim.get_max_length() == 0 && dim.get_min_length() == 0; diff --git a/ngraph/core/src/op/reverse_sequence.cpp b/ngraph/core/src/op/reverse_sequence.cpp index efc380fefaa74a..cdb68a2af9b363 100644 --- a/ngraph/core/src/op/reverse_sequence.cpp +++ b/ngraph/core/src/op/reverse_sequence.cpp @@ -63,7 +63,7 @@ void op::ReverseSequence::validate_and_infer_types() { "Sequence lengths rank must be equal to 1. Got: ", seq_lengths_pshape); - PartialShape output_pshape{data_pshape}; + ov::Shape output_pshape{data_pshape}; if (data_rank.is_static() && seq_lengths_rank.is_static()) { Dimension merged_sequence_length; NODE_VALIDATION_CHECK( diff --git a/ngraph/core/src/op/rnn_cell.cpp b/ngraph/core/src/op/rnn_cell.cpp index c67007d483ed65..b631c6211062ed 100644 --- a/ngraph/core/src/op/rnn_cell.cpp +++ b/ngraph/core/src/op/rnn_cell.cpp @@ -66,7 +66,7 @@ void op::v0::RNNCell::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_RNNCell_validate_and_infer_types); for (const auto& input : inputs()) { if (input.get_partial_shape().rank().is_dynamic()) { - set_output_type(0, get_input_element_type(0), PartialShape::dynamic()); + set_output_type(0, get_input_element_type(0), ov::Shape::dynamic()); return; } } @@ -148,7 +148,7 @@ void op::v0::RNNCell::validate_and_infer_types() { Output op::v0::RNNCell::get_default_bias_input() const { return Output{op::v0::Constant::create(get_input_element_type(0), - Shape{s_gates_count * get_hidden_size()}, + ov::StaticShape{s_gates_count * get_hidden_size()}, vector(s_gates_count * get_hidden_size(), 0.f))}; } diff --git a/ngraph/core/src/op/rnn_sequence.cpp b/ngraph/core/src/op/rnn_sequence.cpp index 10f6f382608d38..234cd57c65f17d 100644 --- a/ngraph/core/src/op/rnn_sequence.cpp +++ b/ngraph/core/src/op/rnn_sequence.cpp @@ -45,8 +45,8 @@ void op::v5::RNNSequence::validate_and_infer_types() { NGRAPH_OP_SCOPE(v5_RNNSequence_validate_and_infer_types); for (const auto& input : inputs()) { if (input.get_partial_shape().rank().is_dynamic()) { - set_output_type(0, get_input_element_type(0), PartialShape::dynamic()); - set_output_type(1, get_input_element_type(0), PartialShape::dynamic()); + set_output_type(0, get_input_element_type(0), ov::Shape::dynamic()); + set_output_type(1, get_input_element_type(0), ov::Shape::dynamic()); return; } } diff --git a/ngraph/core/src/op/roi_align.cpp b/ngraph/core/src/op/roi_align.cpp index 4d4faa8b45d2fd..4b2fd3e9f28285 100644 --- a/ngraph/core/src/op/roi_align.cpp +++ b/ngraph/core/src/op/roi_align.cpp @@ -107,10 +107,10 @@ void op::v3::ROIAlign::validate_and_infer_types() { } // the output shape should have the following format [NUM_ROIS, C, pooled_h, pooled_w] - auto output_shape = PartialShape{{Dimension::dynamic(), - input_ps[1], - Dimension{static_cast(m_pooled_h)}, - Dimension{static_cast(m_pooled_w)}}}; + auto output_shape = ov::Shape{{Dimension::dynamic(), + input_ps[1], + Dimension{static_cast(m_pooled_h)}, + Dimension{static_cast(m_pooled_w)}}}; // if either of those 2 dimensions is static its value will be used // for the first dimension of the output shape - 'NUM_ROIS' @@ -189,7 +189,7 @@ bool evaluate(const HostTensorPtr& feature_maps, const int sampling_ratio, const float spatial_scale, const op::v3::ROIAlign::PoolingMode& pooling_mode, - const Shape& batch_indices_shape) { + const ov::StaticShape& batch_indices_shape) { using T = typename element_type_traits::value_type; runtime::reference::roi_align(feature_maps->get_data_ptr(), rois->get_data_ptr(), diff --git a/ngraph/core/src/op/roi_pooling.cpp b/ngraph/core/src/op/roi_pooling.cpp index cd349783b322df..3d1f7466fa0ac9 100644 --- a/ngraph/core/src/op/roi_pooling.cpp +++ b/ngraph/core/src/op/roi_pooling.cpp @@ -13,7 +13,7 @@ OPENVINO_RTTI_DEFINITION(op::v0::ROIPooling, "ROIPooling", 0); op::ROIPooling::ROIPooling(const Output& input, const Output& coords, - const Shape& output_size, + const ov::StaticShape& output_size, const float spatial_scale, const string& method) : Op({input, coords}), @@ -87,10 +87,10 @@ void op::ROIPooling::validate_and_infer_types() { } // output shape should be {NUM_ROIS, C, pooled_h, pooled_w} - auto output_shape = PartialShape{{Dimension::dynamic(), - Dimension::dynamic(), - Dimension{static_cast(m_output_size[0])}, - Dimension{static_cast(m_output_size[1])}}}; + auto output_shape = ov::Shape{{Dimension::dynamic(), + Dimension::dynamic(), + Dimension{static_cast(m_output_size[0])}, + Dimension{static_cast(m_output_size[1])}}}; if (coords_ps.rank().is_static() && coords_ps[0].is_static()) { output_shape[0] = coords_ps[0]; diff --git a/ngraph/core/src/op/scatter_elements_update.cpp b/ngraph/core/src/op/scatter_elements_update.cpp index b6bcb9c1a0e8cc..ced157954119ae 100644 --- a/ngraph/core/src/op/scatter_elements_update.cpp +++ b/ngraph/core/src/op/scatter_elements_update.cpp @@ -35,10 +35,10 @@ void op::v3::ScatterElementsUpdate::validate_and_infer_types() { element::Type updates_et = get_input_element_type(2); element::Type axis_et = get_input_element_type(3); - const PartialShape& data_shape = get_input_partial_shape(0); - const PartialShape& indices_shape = get_input_partial_shape(1); - const PartialShape& updates_shape = get_input_partial_shape(2); - const PartialShape& axis_shape = get_input_partial_shape(3); + const ov::Shape& data_shape = get_input_partial_shape(0); + const ov::Shape& indices_shape = get_input_partial_shape(1); + const ov::Shape& updates_shape = get_input_partial_shape(2); + const ov::Shape& axis_shape = get_input_partial_shape(3); NODE_VALIDATION_CHECK(this, indices_et.is_integral(), @@ -56,7 +56,7 @@ void op::v3::ScatterElementsUpdate::validate_and_infer_types() { updates_et); NODE_VALIDATION_CHECK(this, - axis_shape.compatible(PartialShape{}) || axis_shape.compatible(PartialShape{1}), + axis_shape.compatible(ov::Shape{}) || axis_shape.compatible(ov::Shape{1}), "Axis input shape are required to be scalar or 1D tensor. ", "Got: ", axis_shape); diff --git a/ngraph/core/src/op/scatter_nd_update.cpp b/ngraph/core/src/op/scatter_nd_update.cpp index a1544822a3dec7..ea1c80c28fec09 100644 --- a/ngraph/core/src/op/scatter_nd_update.cpp +++ b/ngraph/core/src/op/scatter_nd_update.cpp @@ -29,10 +29,10 @@ bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& arg2, const HostTensorPtr& out) { using T = typename element_type_traits::value_type; - Shape params_shape = arg0->get_shape(); - Shape indices_shape = arg1->get_shape(); - Shape updates_shape = arg1->get_shape(); - Shape out_shape(params_shape); + ov::StaticShape params_shape = arg0->get_shape(); + ov::StaticShape indices_shape = arg1->get_shape(); + ov::StaticShape updates_shape = arg1->get_shape(); + const ov::StaticShape& out_shape(params_shape); out->set_shape(out_shape); if (arg1->get_element_type() == element::i64) { diff --git a/ngraph/core/src/op/select.cpp b/ngraph/core/src/op/select.cpp index 9a55c76dfd1a72..ccd3506ba7546c 100644 --- a/ngraph/core/src/op/select.cpp +++ b/ngraph/core/src/op/select.cpp @@ -42,28 +42,28 @@ void op::v1::Select::validate_and_infer_types() { element::Type::merge(result_et, get_input_element_type(1), get_input_element_type(2)), "Argument 1 and 2 element types must match."); - PartialShape result_shape; + ov::Shape result_shape; if (get_auto_broadcast().m_type == op::AutoBroadcastType::PDPD) { result_shape = get_input_partial_shape(1); // 'then' tensor NODE_VALIDATION_CHECK( this, - PartialShape::broadcast_merge_into(result_shape, get_input_partial_shape(2), get_auto_broadcast()), + ov::Shape::broadcast_merge_into(result_shape, get_input_partial_shape(2), get_auto_broadcast()), "'Else' tensor shape is not broadcastable."); NODE_VALIDATION_CHECK( this, - PartialShape::broadcast_merge_into(result_shape, get_input_partial_shape(0), get_auto_broadcast()), + ov::Shape::broadcast_merge_into(result_shape, get_input_partial_shape(0), get_auto_broadcast()), "'Cond' tensor shape is not broadcastable."); } else { result_shape = get_input_partial_shape(2); for (int i = 1; i >= 0; i--) { if (get_auto_broadcast().m_type == op::AutoBroadcastType::NONE) { NODE_VALIDATION_CHECK(this, - PartialShape::merge_into(result_shape, get_input_partial_shape(i)), + ov::Shape::merge_into(result_shape, get_input_partial_shape(i)), "Argument shapes are inconsistent."); } else if (get_auto_broadcast().m_type == op::AutoBroadcastType::NUMPY) { NODE_VALIDATION_CHECK( this, - PartialShape::broadcast_merge_into(result_shape, get_input_partial_shape(i), get_auto_broadcast()), + ov::Shape::broadcast_merge_into(result_shape, get_input_partial_shape(i), get_auto_broadcast()), "Argument shapes are inconsistent."); } else { NODE_VALIDATION_CHECK(this, false, "Unsupported auto broadcast specification"); diff --git a/ngraph/core/src/op/shape_of.cpp b/ngraph/core/src/op/shape_of.cpp index e4eb8f370217ca..b873c01b43bbed 100644 --- a/ngraph/core/src/op/shape_of.cpp +++ b/ngraph/core/src/op/shape_of.cpp @@ -33,7 +33,7 @@ void op::v3::ShapeOf::validate_and_infer_types() { "Output type must be i32 or i64"); set_input_is_relevant_to_value(0, false); const auto input_partial_shape = get_input_partial_shape(0); - set_output_type(0, m_output_type, PartialShape{input_partial_shape.rank()}); + set_output_type(0, m_output_type, ov::Shape{input_partial_shape.rank()}); } bool ngraph::op::v3::ShapeOf::visit_attributes(AttributeVisitor& visitor) { @@ -51,15 +51,15 @@ shared_ptr op::v3::ShapeOf::clone_with_new_inputs(const OutputVector& new_ namespace shape_of { template -inline bool evaluate(const Shape& shape, const HostTensorPtr& output_value) { +inline bool evaluate(const ov::StaticShape& shape, const HostTensorPtr& output_value) { runtime::reference::shape_of(shape, output_value->get_data_ptr()); return true; } bool evaluate_shape_of(const HostTensorPtr& output_value, const HostTensorPtr& input_value) { bool rc = true; - Shape shape = input_value->get_shape(); - output_value->set_shape(Shape{shape.size()}); + ov::StaticShape shape = input_value->get_shape(); + output_value->set_shape(ov::StaticShape{shape.size()}); switch (output_value->get_element_type()) { NGRAPH_TYPE_CASE(evaluate_shape_of, i32, shape, output_value); NGRAPH_TYPE_CASE(evaluate_shape_of, i64, shape, output_value); @@ -93,7 +93,7 @@ bool evaluate_bound_shape(const Node* shape_of_node, const HostTensorVector& out if (input_partial_shape.rank().is_dynamic()) return false; const auto rank = input_partial_shape.rank().get_length(); - auto pshape_low = PartialShape::dynamic(rank), pshape_up = PartialShape::dynamic(rank); + auto pshape_low = ov::Shape::dynamic(rank), pshape_up = ov::Shape::dynamic(rank); for (Dimension::value_type i = 0; i < rank; ++i) { Interval interval = input_partial_shape[i].get_interval(); pshape_low[i] = interval.get_min_val(); @@ -109,16 +109,16 @@ bool evaluate_bound_shape(const Node* shape_of_node, const HostTensorVector& out shape_of_node->get_output_tensor(0).set_upper_value(output_values[0]); } else { HostTensorVector upper = - is_upper ? output_values - : HostTensorVector{ - std::make_shared(output_et, PartialShape{pshape_up.rank().get_length()})}; + is_upper + ? output_values + : HostTensorVector{std::make_shared(output_et, ov::Shape{pshape_up.rank().get_length()})}; shape_of_node->evaluate(upper, {std::make_shared(input_et, pshape_up)}); shape_of_node->get_output_tensor(0).set_upper_value(upper[0]); HostTensorVector lower = - !is_upper ? output_values - : HostTensorVector{ - std::make_shared(output_et, PartialShape{pshape_low.rank().get_length()})}; + !is_upper + ? output_values + : HostTensorVector{std::make_shared(output_et, ov::Shape{pshape_low.rank().get_length()})}; shape_of_node->evaluate(lower, {std::make_shared(input_et, pshape_low)}); shape_of_node->get_output_tensor(0).set_lower_value(lower[0]); @@ -189,7 +189,7 @@ op::v0::ShapeOf::ShapeOf(const Output& arg) : Op({arg}) { void op::v0::ShapeOf::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_ShapeOf_validate_and_infer_types); set_input_is_relevant_to_value(0, false); - set_output_type(0, element::i64, PartialShape{get_input_partial_shape(0).rank()}); + set_output_type(0, element::i64, ov::Shape{get_input_partial_shape(0).rank()}); } bool ngraph::op::v0::ShapeOf::visit_attributes(AttributeVisitor& visitor) { diff --git a/ngraph/core/src/op/softmax.cpp b/ngraph/core/src/op/softmax.cpp index d7c2858a0de1cd..56f5ff40a28fd6 100644 --- a/ngraph/core/src/op/softmax.cpp +++ b/ngraph/core/src/op/softmax.cpp @@ -23,7 +23,10 @@ using namespace ngraph; namespace { template -inline bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, const Shape& shape, const AxisSet& axes) { +inline bool evaluate(const HostTensorPtr& arg, + const HostTensorPtr& out, + const ov::StaticShape& shape, + const AxisSet& axes) { runtime::reference::softmax(arg->get_data_ptr(), out->get_data_ptr(), shape, axes); return true; } @@ -60,7 +63,7 @@ bool ngraph::op::v1::Softmax::visit_attributes(AttributeVisitor& visitor) { void op::v1::Softmax::validate_and_infer_types() { NGRAPH_OP_SCOPE(v1_Softmax_validate_and_infer_types); - const PartialShape& input_shape = get_input_partial_shape(0); + const ov::Shape& input_shape = get_input_partial_shape(0); if (input_shape.rank().is_static()) NODE_VALIDATION_CHECK(this, m_axis < static_cast(input_shape.rank().get_length()), diff --git a/ngraph/core/src/op/space_to_batch.cpp b/ngraph/core/src/op/space_to_batch.cpp index 0bd707e689c0dc..8603ea5f62663c 100644 --- a/ngraph/core/src/op/space_to_batch.cpp +++ b/ngraph/core/src/op/space_to_batch.cpp @@ -33,7 +33,7 @@ ngraph::op::v1::SpaceToBatch::SpaceToBatch(const ngraph::Output& d void op::v1::SpaceToBatch::validate_and_infer_types() { NGRAPH_OP_SCOPE(v1_SpaceToBatch_validate_and_infer_types); - PartialShape data_pshape = get_input_partial_shape(0); + ov::Shape data_pshape = get_input_partial_shape(0); const auto& data_type = get_input_element_type(0); const auto& block_shape_type = get_input_element_type(1); const auto& pads_begin_type = get_input_element_type(2); @@ -83,7 +83,7 @@ void op::v1::SpaceToBatch::validate_and_infer_types() { for (long idx : block_val) block_prod *= idx; - Shape output_shape = {static_cast(data_shape[0] * block_prod)}; + ov::StaticShape output_shape = {static_cast(data_shape[0] * block_prod)}; for (size_t idx = 1; idx < data_shape.size(); ++idx) { NODE_VALIDATION_CHECK(this, block_val.at(idx) > 0, "block_shape values must be greater than 0"); NODE_VALIDATION_CHECK( @@ -102,7 +102,7 @@ void op::v1::SpaceToBatch::validate_and_infer_types() { set_output_size(1); set_output_type(0, data_type, output_shape); } else { - set_output_type(0, data_type, PartialShape::dynamic(data_pshape.rank())); + set_output_type(0, data_type, ov::Shape::dynamic(data_pshape.rank())); } } @@ -149,7 +149,7 @@ bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVecto CoordinateDiff pads_end_vec(shape_size(inputs[2]->get_shape())); pads_end_vec.assign(pads_end, pads_end + shape_size(inputs[2]->get_shape())); - Shape padded_shape(data_shape.size()); + ov::StaticShape padded_shape(data_shape.size()); for (size_t i = 0; i < data_shape.size(); ++i) { padded_shape[i] = data_shape[i] + pads_begin_vec[i] + pads_end_vec[i]; } @@ -166,9 +166,9 @@ bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVecto ngraph::op::PadMode::CONSTANT); data_shape = padded_shape; - Shape dispersed_shape(block_values_size + 1); + ov::StaticShape dispersed_shape(block_values_size + 1); std::vector axes_order(block_values_size + 1); - Shape squeezed_shape(data_shape.begin(), data_shape.end()); + ov::StaticShape squeezed_shape(data_shape.begin(), data_shape.end()); std::vector plain_axes_order(block_values_size + 1); std::iota(plain_axes_order.begin(), plain_axes_order.end(), 0); @@ -202,7 +202,7 @@ bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVecto plain_axes_order, dispersed_shape, elem_size); - Shape post_transpose_shape(axes_order.size()); + ov::StaticShape post_transpose_shape(axes_order.size()); for (size_t i = 0; i < axes_order.size(); ++i) { post_transpose_shape[i] = dispersed_shape[axes_order[i]]; } diff --git a/ngraph/core/src/op/space_to_depth.cpp b/ngraph/core/src/op/space_to_depth.cpp index f4a3436f4d860c..71e0e23279afcb 100644 --- a/ngraph/core/src/op/space_to_depth.cpp +++ b/ngraph/core/src/op/space_to_depth.cpp @@ -46,7 +46,7 @@ std::shared_ptr ov::op::v0::SpaceToDepth::clone_with_new_inputs(const Outp void ngraph::op::v0::SpaceToDepth::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_SpaceToDepth_validate_and_infer_types); - PartialShape data_pshape = get_input_partial_shape(0); + ov::Shape data_pshape = get_input_partial_shape(0); const auto& data_type = get_input_element_type(0); @@ -81,7 +81,7 @@ void ngraph::op::v0::SpaceToDepth::validate_and_infer_types() { set_output_size(1); set_output_type(0, data_type, out_shape); } else { - set_output_type(0, data_type, PartialShape::dynamic(data_pshape.rank())); + set_output_type(0, data_type, ov::Shape::dynamic(data_pshape.rank())); } } diff --git a/ngraph/core/src/op/split.cpp b/ngraph/core/src/op/split.cpp index 2d70a0be5f1d8e..67afd0a7ef01c0 100644 --- a/ngraph/core/src/op/split.cpp +++ b/ngraph/core/src/op/split.cpp @@ -34,8 +34,8 @@ bool ngraph::op::v1::Split::visit_attributes(AttributeVisitor& visitor) { void op::v1::Split::validate_and_infer_types() { NGRAPH_OP_SCOPE(v1_Split_validate_and_infer_types); - const PartialShape& data_ps = get_input_partial_shape(0); - const PartialShape& axis_ps = get_input_partial_shape(1); + const ov::Shape& data_ps = get_input_partial_shape(0); + const ov::Shape& axis_ps = get_input_partial_shape(1); const element::Type& axis_et = get_input_element_type(1); NODE_VALIDATION_CHECK(this, axis_ps.rank().compatible(0), "'axis' input must be a scalar. Got: ", axis_ps); @@ -50,7 +50,7 @@ void op::v1::Split::validate_and_infer_types() { "Attribute 'num_splits' must be greater than zero. Got: ", m_num_splits); - PartialShape each_output_shape{data_ps}; + ov::Shape each_output_shape{data_ps}; const Rank data_rank = data_ps.rank(); const auto axis_input = get_constant_from_source(input_value(1)); if (axis_input && data_rank.is_static()) { @@ -87,7 +87,7 @@ void op::v1::Split::validate_and_infer_types() { each_output_shape[axis] = Dimension(dim_interval_at_axis_min, dim_interval_at_axis_max); } } else { - each_output_shape = PartialShape::dynamic(data_ps.rank()); + each_output_shape = ov::Shape::dynamic(data_ps.rank()); } for (size_t i = 0; i < m_num_splits; ++i) { @@ -108,7 +108,7 @@ inline bool evaluate(const HostTensorPtr& data_tensor, const HostTensorVector& outputs, const int64_t axis, const int64_t num_splits) { - Shape output_shape = data_tensor->get_shape(); + ov::StaticShape output_shape = data_tensor->get_shape(); std::vector outputs_data(num_splits); output_shape.at(axis) /= num_splits; for (size_t i = 0; i < outputs.size(); ++i) { diff --git a/ngraph/core/src/op/squeeze.cpp b/ngraph/core/src/op/squeeze.cpp index 0d04d6a08bd3b6..c25939cfcc3f48 100644 --- a/ngraph/core/src/op/squeeze.cpp +++ b/ngraph/core/src/op/squeeze.cpp @@ -42,7 +42,7 @@ void op::Squeeze::validate_and_infer_types() { if (get_input_size() == 1) { // Handling the case when Squeeze op is created with a single input - data. // This way the following code (validation, shape inference) can be used in both cases. - axes_constant = make_shared(element::i64, Shape{0}, vector{}); + axes_constant = make_shared(element::i64, ov::StaticShape{0}, vector{}); } else { auto axes_node = input_value(1).get_node_shared_ptr(); auto axes_pshape = get_input_partial_shape(1); @@ -78,7 +78,7 @@ void op::Squeeze::validate_and_infer_types() { } } - set_output_type(0, get_input_element_type(0), PartialShape::dynamic()); + set_output_type(0, get_input_element_type(0), ov::Shape::dynamic()); return; } @@ -116,7 +116,7 @@ void op::Squeeze::validate_and_infer_types() { output_data_shape.push_back(data_partial_shape[idx]); } } - set_output_type(0, get_input_element_type(0), PartialShape(output_data_shape)); + set_output_type(0, get_input_element_type(0), ov::Shape(output_data_shape)); } bool ngraph::op::v0::Squeeze::visit_attributes(AttributeVisitor& visitor) { diff --git a/ngraph/core/src/op/strided_slice.cpp b/ngraph/core/src/op/strided_slice.cpp index c7ef03d52ec9b2..67fe52686643c5 100644 --- a/ngraph/core/src/op/strided_slice.cpp +++ b/ngraph/core/src/op/strided_slice.cpp @@ -61,7 +61,7 @@ shared_ptr calculate_default_strides(const Output& begin, const Outp std::make_shared(begin)); } - return op::Constant::create(element::i64, Shape{strides_length}, vector(strides_length, 1)); + return op::Constant::create(element::i64, ov::StaticShape{strides_length}, vector(strides_length, 1)); } } // namespace @@ -173,7 +173,7 @@ void op::v1::StridedSlice::validate_and_infer_types() { convert_mask_to_axis_set(get_shrink_axis_mask()), convert_mask_to_axis_set(get_ellipsis_mask()))); } else { - set_output_type(0, get_input_element_type(0), PartialShape::dynamic(data_rank)); + set_output_type(0, get_input_element_type(0), ov::Shape::dynamic(data_rank)); } } diff --git a/ngraph/core/src/op/tensor_iterator.cpp b/ngraph/core/src/op/tensor_iterator.cpp index 2f1760111f632d..59501dc492fdd4 100644 --- a/ngraph/core/src/op/tensor_iterator.cpp +++ b/ngraph/core/src/op/tensor_iterator.cpp @@ -99,11 +99,11 @@ void op::v0::TensorIterator::validate_and_infer_types() { // +1 because the left and right borders are included [start, end] m_num_iterations = (abs(end - start) + 1) / part_size; // infer type for m_body_parameter - Shape out_shape{input_shape}; + ov::StaticShape out_shape{input_shape}; out_shape[axis] = part_size; body_parameter->set_partial_shape(out_shape); } else { - body_parameter->set_partial_shape(PartialShape::dynamic(input_partial_shape.rank())); + body_parameter->set_partial_shape(ov::Shape::dynamic(input_partial_shape.rank())); } } else if (auto merged_input_description = ov::as_type_ptr(input_description)) { auto body_value = m_bodies[0]->get_results().at(merged_input_description->m_body_value_index)->input(0); @@ -135,14 +135,14 @@ void op::v0::TensorIterator::validate_and_infer_types() { auto body_value = m_bodies[0]->get_results().at(output_description->m_body_value_index)->input_value(0); if (auto concat_output_description = ov::as_type_ptr(output_description)) { - auto body_value_partial_shape = body_value.get_partial_shape(); - set_output_type(index, body_value.get_element_type(), PartialShape::dynamic()); + const auto& body_value_partial_shape = body_value.get_partial_shape(); + set_output_type(index, body_value.get_element_type(), ov::Shape::dynamic()); if (body_value_partial_shape.is_static()) { auto body_value_shape = body_value_partial_shape.to_shape(); auto part_size = concat_output_description->m_part_size; auto axis = concat_output_description->m_axis; - Shape out_shape{body_value_shape}; + ov::StaticShape out_shape{body_value_shape}; if (body_value_shape.empty()) { NODE_VALIDATION_CHECK(this, @@ -151,7 +151,7 @@ void op::v0::TensorIterator::validate_and_infer_types() { "tensor slices are scalars. " "TensorIterator output index: ", index); - out_shape = Shape(1); + out_shape = ov::StaticShape(1); } if (m_num_iterations != -1) { @@ -162,7 +162,7 @@ void op::v0::TensorIterator::validate_and_infer_types() { } else { set_output_type(index, body_value.get_element_type(), - PartialShape::dynamic(body_value.get_partial_shape().rank())); + ov::Shape::dynamic(body_value.get_partial_shape().rank())); } } else if (auto body_output_description = ov::as_type_ptr(output_description)) { set_output_type(index, body_value.get_element_type(), body_value.get_partial_shape()); @@ -204,7 +204,7 @@ std::shared_ptr op::v0::TensorIterator::clone_with_new_inputs(const Output op->set_output_size(m_output_descriptions[0].size()); std::vector<::ngraph::element::Type> types(m_bodies[0]->get_parameters().size()); - std::vector<::ngraph::PartialShape> new_shapes(m_bodies[0]->get_parameters().size()); + std::vector new_shapes(m_bodies[0]->get_parameters().size()); for (size_t input_index = 0; input_index < new_args.size(); ++input_index) { for (auto& input_description : m_input_descriptions[0]) { diff --git a/ngraph/core/src/op/tile.cpp b/ngraph/core/src/op/tile.cpp index 210018d06a63bb..49cf2530ab8bad 100644 --- a/ngraph/core/src/op/tile.cpp +++ b/ngraph/core/src/op/tile.cpp @@ -38,7 +38,7 @@ void op::v0::Tile::validate_and_infer_types() { auto arg_shape = get_input_partial_shape(0); auto repeats_shape = get_input_partial_shape(1); NODE_VALIDATION_CHECK(this, repeats_shape.rank().compatible(1), "Shape of repeats must be of rank 1"); - PartialShape repeats_as_pshape; + ov::Shape repeats_as_pshape; bool repeats_are_known = evaluate_as_partial_shape(get_input_source_output(1), repeats_as_pshape); std::vector repeats_value(repeats_as_pshape); if (repeats_are_known && !repeats_value.empty() && arg_shape.rank().is_static()) { @@ -51,12 +51,12 @@ void op::v0::Tile::validate_and_infer_types() { data_shape.insert(data_shape.begin(), output_rank - data_rank, 1); repeats_value.insert(repeats_value.begin(), output_rank - repeats_rank, 1); - auto output_shape = PartialShape::dynamic(output_rank); + auto output_shape = ov::Shape::dynamic(output_rank); for (size_t i = 0; i < output_rank; i++) output_shape[i] = data_shape[i] * repeats_value[i]; set_output_type(0, arg_et, output_shape); } else { - set_output_type(0, arg_et, PartialShape::dynamic()); + set_output_type(0, arg_et, ov::Shape::dynamic()); } set_input_is_relevant_to_shape(0); @@ -75,7 +75,7 @@ bool op::v0::Tile::evaluate_tile(const HostTensorVector& outputs, const HostTens auto& output = outputs[0]; auto repeats_val = read_vector(axis); auto repeats_rank = repeats_val.size(); - Shape data_shape = data->get_shape(); + ov::StaticShape data_shape = data->get_shape(); auto data_rank = data_shape.size(); auto output_rank = std::max(data_rank, repeats_rank); @@ -83,7 +83,7 @@ bool op::v0::Tile::evaluate_tile(const HostTensorVector& outputs, const HostTens data_shape.insert(data_shape.begin(), output_rank - data_rank, 1); repeats_val.insert(repeats_val.begin(), output_rank - repeats_rank, 1); - Shape output_shape(output_rank); + ov::StaticShape output_shape(output_rank); for (size_t i = 0; i < output_rank; i++) { output_shape[i] = data_shape[i] * repeats_val[i]; } diff --git a/ngraph/core/src/op/topk.cpp b/ngraph/core/src/op/topk.cpp index ecbc42f5e43e35..9dd81f2f604d83 100644 --- a/ngraph/core/src/op/topk.cpp +++ b/ngraph/core/src/op/topk.cpp @@ -24,14 +24,14 @@ template inline bool evaluate_execute(const HostTensorPtr& arg0, const HostTensorPtr& out_indices, const HostTensorPtr& out_values, - const Shape out_shape, + const ov::StaticShape out_shape, const size_t axis, const size_t k, const bool compute_max, const op::v1::TopK::SortType sort) { using T = typename element_type_traits::value_type; using U = typename element_type_traits::value_type; - const Shape in_shape = arg0->get_shape(); + const ov::StaticShape in_shape = arg0->get_shape(); out_indices->set_shape(out_shape); out_indices->set_element_type(INDEX_ET); @@ -60,7 +60,7 @@ template bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out_indices, const HostTensorPtr& out_values, - const Shape out_shape, + const ov::StaticShape out_shape, const size_t axis, const size_t k, const bool max, @@ -80,7 +80,7 @@ bool evaluate(const HostTensorPtr& arg, bool evaluate_topk(const HostTensorPtr& arg, const HostTensorPtr& out_indices, const HostTensorPtr& out_values, - const Shape out_shape, + const ov::StaticShape out_shape, const size_t axis, const size_t k, const bool max, @@ -201,12 +201,12 @@ void op::v1::TopK::validate_and_infer_types() { read_k_from_constant_node(input_value(1).get_node_shared_ptr(), get_input_element_type(1)); } - PartialShape output_shape{input_partial_shape}; + ov::Shape output_shape{input_partial_shape}; if (output_shape.rank().is_static()) { m_normalized_axis = ngraph::normalize_axis(this, m_axis, output_shape.rank()); - PartialShape k_as_shape; + ov::Shape k_as_shape; if (evaluate_as_partial_shape(input_value(1), k_as_shape)) { if (k_as_shape.is_static()) { output_shape[m_normalized_axis] = k_as_shape[0]; @@ -232,10 +232,10 @@ void op::v1::TopK::validate_and_infer_types() { set_output_type(1, m_index_element_type, output_shape); } -Shape op::v1::TopK::compute_output_shape(const std::string& node_description, - const PartialShape input_partial_shape, - const int64_t k) const { - PartialShape output_shape{input_partial_shape}; +ov::StaticShape op::v1::TopK::compute_output_shape(const std::string& node_description, + const ov::Shape input_partial_shape, + const int64_t k) const { + ov::Shape output_shape{input_partial_shape}; auto normalized_axis = ngraph::normalize_axis(node_description, m_axis, output_shape.rank()); if (k != 0) { @@ -345,12 +345,12 @@ size_t op::v1::TopK::get_k() const { } void op::v1::TopK::set_k(size_t k) { - this->input(1).replace_source_output(op::v0::Constant::create(element::i64, Shape{}, {k})->output(0)); + this->input(1).replace_source_output(op::v0::Constant::create(element::i64, ov::StaticShape{}, {k})->output(0)); } bool op::v1::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { NGRAPH_OP_SCOPE(v1_TopK_evaluate); - Shape arg_shape = inputs[0]->get_shape(); + ov::StaticShape arg_shape = inputs[0]->get_shape(); // 1. get axis, mode ( max/min), sort_type size_t axis = ngraph::normalize_axis(this, m_axis, arg_shape.size()); bool compute_max = get_mode() == TopKMode::MAX ? true : false; diff --git a/ngraph/core/src/op/transpose.cpp b/ngraph/core/src/op/transpose.cpp index 2b68e94bd3f02c..df47d75c145de9 100644 --- a/ngraph/core/src/op/transpose.cpp +++ b/ngraph/core/src/op/transpose.cpp @@ -36,7 +36,7 @@ void op::v1::Transpose::validate_and_infer_types() { const auto& arg_shape = get_input_partial_shape(0); NODE_VALIDATION_CHECK( this, - input_order_shape.compatible(PartialShape{arg_shape.rank()}) || + input_order_shape.compatible(ov::Shape{arg_shape.rank()}) || (input_order_shape.is_static() && input_order_shape.rank() == 1 && input_order_shape[0] == 0), "Input order must have shape [n], where n is the rank of arg."); @@ -57,7 +57,7 @@ void op::v1::Transpose::validate_and_infer_types() { arg_shape); set_output_type(0, get_input_element_type(0), ngraph::apply_permutation(arg_shape, permutation)); } else { - set_output_type(0, get_input_element_type(0), PartialShape::dynamic(arg_shape.rank())); + set_output_type(0, get_input_element_type(0), ov::Shape::dynamic(arg_shape.rank())); } NGRAPH_SUPPRESS_DEPRECATED_END } @@ -74,7 +74,7 @@ bool evaluate_transpose(const HostTensorPtr& arg1, const HostTensorPtr& arg2, co "Transpose axis element type has to be integral data type."); std::vector axes_order = host_tensor_2_vector(arg2); - Shape in_shape = arg1->get_shape(); + ov::StaticShape in_shape = arg1->get_shape(); if (shape_size(arg2->get_shape()) == 0) { axes_order.resize(in_shape.size()); std::iota(axes_order.begin(), axes_order.end(), 0); @@ -85,7 +85,7 @@ bool evaluate_transpose(const HostTensorPtr& arg1, const HostTensorPtr& arg2, co NGRAPH_CHECK(is_unique_order, "Transpose axes order values must be unique."); } - Shape out_shape(in_shape.size()); + ov::StaticShape out_shape(in_shape.size()); std::transform(axes_order.begin(), axes_order.end(), out_shape.begin(), [&](const int64_t& v) { NGRAPH_CHECK(v >= 0, "Negative values for transpose axes order are not supported."); NGRAPH_CHECK(v < int64_t(in_shape.size()), "Transpose axis ", v, " is out of shape range."); diff --git a/ngraph/core/src/op/unsqueeze.cpp b/ngraph/core/src/op/unsqueeze.cpp index 5d186ba587358c..27fa4774cde72a 100644 --- a/ngraph/core/src/op/unsqueeze.cpp +++ b/ngraph/core/src/op/unsqueeze.cpp @@ -40,7 +40,7 @@ void op::v0::Unsqueeze::validate_and_infer_types() { axes_pshape.rank().get_length()); if (data_rank.is_dynamic() || !axes_constant) { - set_output_type(0, get_input_element_type(0), PartialShape::dynamic()); + set_output_type(0, get_input_element_type(0), ov::Shape::dynamic()); return; } @@ -58,7 +58,7 @@ void op::v0::Unsqueeze::validate_and_infer_types() { output_shape.insert(next(begin(output_shape), axis), 1); } - set_output_type(0, get_input_element_type(0), PartialShape{output_shape}); + set_output_type(0, get_input_element_type(0), ov::Shape{output_shape}); } bool op::v0::Unsqueeze::visit_attributes(AttributeVisitor& visitor) { diff --git a/ngraph/core/src/op/util/arithmetic_reduction.cpp b/ngraph/core/src/op/util/arithmetic_reduction.cpp index 91e23760dd650b..9d75e0f2d23bd3 100644 --- a/ngraph/core/src/op/util/arithmetic_reduction.cpp +++ b/ngraph/core/src/op/util/arithmetic_reduction.cpp @@ -41,14 +41,14 @@ void ov::op::util::ArithmeticReduction::set_reduction_axes(const AxisSet& reduct void ov::op::util::ArithmeticReduction::validate_and_infer_types() { NGRAPH_OP_SCOPE(util_ArithmeticReduction_validate_and_infer_types); - const PartialShape& axes_shape = get_input_partial_shape(1); + const Shape& axes_shape = get_input_partial_shape(1); const Rank axes_rank = axes_shape.rank(); NODE_VALIDATION_CHECK(this, axes_rank.compatible(0) || axes_rank.compatible(1), "Axes input must be a scalar or 1D input. Got: ", axes_shape); - PartialShape result_shape = infer_reduction_output_shape(false); + Shape result_shape = infer_reduction_output_shape(false); set_input_is_relevant_to_shape(1); set_output_type(0, get_input_element_type(0), result_shape); } diff --git a/ngraph/core/src/op/util/arithmetic_reductions_keep_dims.cpp b/ngraph/core/src/op/util/arithmetic_reductions_keep_dims.cpp index 522f0561508115..953f35cdc75578 100644 --- a/ngraph/core/src/op/util/arithmetic_reductions_keep_dims.cpp +++ b/ngraph/core/src/op/util/arithmetic_reductions_keep_dims.cpp @@ -30,7 +30,7 @@ void ov::op::util::ArithmeticReductionKeepDims::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_util_ArithmeticReductionKeepDims_validate_and_infer_types); const element::Type& data_et = get_input_element_type(0); - const PartialShape& axes_shape = get_input_partial_shape(1); + const Shape& axes_shape = get_input_partial_shape(1); const element::Type& axes_et = get_input_element_type(1); NODE_VALIDATION_CHECK(this, @@ -49,7 +49,7 @@ void ov::op::util::ArithmeticReductionKeepDims::validate_and_infer_types() { "Axes input must be a scalar or 1D input. Got: ", axes_shape); - PartialShape result_shape = infer_reduction_output_shape(m_keep_dims); + Shape result_shape = infer_reduction_output_shape(m_keep_dims); set_input_is_relevant_to_shape(1); set_output_type(0, data_et, result_shape); } diff --git a/ngraph/core/src/op/util/binary_elementwise_arithmetic.cpp b/ngraph/core/src/op/util/binary_elementwise_arithmetic.cpp index b09b87977dbbdf..5c505e445e2487 100644 --- a/ngraph/core/src/op/util/binary_elementwise_arithmetic.cpp +++ b/ngraph/core/src/op/util/binary_elementwise_arithmetic.cpp @@ -27,7 +27,7 @@ void ov::op::util::BinaryElementwiseArithmetic::validate_and_infer_elementwise_a const op::AutoBroadcastSpec& autob) { auto args_et_pshape = op::util::validate_and_infer_elementwise_args(this, autob); element::Type& args_et = std::get<0>(args_et_pshape); - PartialShape& args_pshape = std::get<1>(args_et_pshape); + Shape& args_pshape = std::get<1>(args_et_pshape); NODE_VALIDATION_CHECK(this, args_et.is_dynamic() || args_et != element::boolean, diff --git a/ngraph/core/src/op/util/binary_elementwise_comparison.cpp b/ngraph/core/src/op/util/binary_elementwise_comparison.cpp index 7a6e15a0cb8495..f2c44ba92425dc 100644 --- a/ngraph/core/src/op/util/binary_elementwise_comparison.cpp +++ b/ngraph/core/src/op/util/binary_elementwise_comparison.cpp @@ -24,7 +24,7 @@ ov::op::util::BinaryElementwiseComparison::BinaryElementwiseComparison(const Out void ov::op::util::BinaryElementwiseComparison::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_util_BinaryElementwiseComparison_validate_and_infer_types); auto args_et_pshape = op::util::validate_and_infer_elementwise_args(this, m_autob); - PartialShape& args_pshape = std::get<1>(args_et_pshape); + Shape& args_pshape = std::get<1>(args_et_pshape); set_output_type(0, element::boolean, args_pshape); } diff --git a/ngraph/core/src/op/util/binary_elementwise_logical.cpp b/ngraph/core/src/op/util/binary_elementwise_logical.cpp index fc9125e63cebd3..39644a863f1ba0 100644 --- a/ngraph/core/src/op/util/binary_elementwise_logical.cpp +++ b/ngraph/core/src/op/util/binary_elementwise_logical.cpp @@ -25,7 +25,7 @@ void ov::op::util::BinaryElementwiseLogical::validate_and_infer_types() { auto args_et_pshape = op::util::validate_and_infer_elementwise_args(this, m_autob); element::Type& args_et = std::get<0>(args_et_pshape); - PartialShape& args_pshape = std::get<1>(args_et_pshape); + Shape& args_pshape = std::get<1>(args_et_pshape); NODE_VALIDATION_CHECK(this, args_et.is_dynamic() || args_et == element::boolean, diff --git a/ngraph/core/src/op/util/broadcast_base.cpp b/ngraph/core/src/op/util/broadcast_base.cpp index 34b2809e8295e6..a68c630a23b5f9 100644 --- a/ngraph/core/src/op/util/broadcast_base.cpp +++ b/ngraph/core/src/op/util/broadcast_base.cpp @@ -32,17 +32,17 @@ ov::op::util::BroadcastBase::BroadcastBase(const Output& arg, : Op({arg, target_shape}), m_mode{broadcast_mode} {} -ov::PartialShape ov::op::util::BroadcastBase::get_result_shape_pdpd(const PartialShape& arg0_shape, - const PartialShape& target_pshape, - const op::BroadcastModeSpec& broadcast_spec) const { +ov::Shape ov::op::util::BroadcastBase::get_result_shape_pdpd(const Shape& arg0_shape, + const Shape& target_pshape, + const op::BroadcastModeSpec& broadcast_spec) const { if (target_pshape.is_dynamic()) - return PartialShape::dynamic(target_pshape.rank()); - ngraph::Shape target_shape = target_pshape.to_shape(); + return Shape::dynamic(target_pshape.rank()); + StaticShape target_shape = target_pshape.to_shape(); if (arg0_shape.rank().is_dynamic()) { - return PartialShape::dynamic(target_shape.size()); + return Shape::dynamic(target_shape.size()); } const auto arg_rank_length = arg0_shape.rank().get_length(); - PartialShape result_shape = target_shape; + Shape result_shape = target_shape; auto start_axis = broadcast_spec.m_axis; NODE_VALIDATION_CHECK(this, @@ -68,8 +68,7 @@ ov::PartialShape ov::op::util::BroadcastBase::get_result_shape_pdpd(const Partia return result_shape; } -void ov::op::util::BroadcastBase::validate_target_shape_numpy(const PartialShape& arg_shape, - const PartialShape& target_shape) const { +void ov::op::util::BroadcastBase::validate_target_shape_numpy(const Shape& arg_shape, const Shape& target_shape) const { if (arg_shape.rank().is_dynamic() || target_shape.rank().is_dynamic()) { return; } @@ -97,9 +96,9 @@ void ov::op::util::BroadcastBase::validate_target_shape_numpy(const PartialShape } } -void ov::op::util::BroadcastBase::validate_target_shape_none(const PartialShape& arg_shape, +void ov::op::util::BroadcastBase::validate_target_shape_none(const Shape& arg_shape, const AxisVector& axes_mapping_val, - const PartialShape& target_shape) const { + const Shape& target_shape) const { if (arg_shape.rank().is_dynamic() || target_shape.rank().is_dynamic()) { return; } @@ -173,7 +172,7 @@ void ov::op::util::BroadcastBase::validate_and_infer_types() { axes_shape_rank); } - PartialShape result_shape{PartialShape::dynamic()}; + Shape result_shape{Shape::dynamic()}; const auto& input_shape = get_input_partial_shape(0); const auto input_rank = input_shape.rank(); const auto& target_shape = input_value(1).get_partial_shape(); @@ -181,15 +180,15 @@ void ov::op::util::BroadcastBase::validate_and_infer_types() { if (m_mode.m_type == BroadcastType::BIDIRECTIONAL) { if (input_rank.is_static() && is_target_shape_known) { - result_shape = PartialShape::dynamic(std::max(input_rank.get_length(), target_shape[0].get_length())); + result_shape = Shape::dynamic(std::max(input_rank.get_length(), target_shape[0].get_length())); } } else { if (is_target_shape_known) { - result_shape = PartialShape::dynamic(target_shape[0].get_length()); + result_shape = Shape::dynamic(target_shape[0].get_length()); } } - PartialShape output_shape; + Shape output_shape; bool output_shape_defined = ngraph::evaluate_as_partial_shape(get_input_source_output(1), output_shape); if (auto concat = ov::as_type_ptr(input_value(1).get_node_shared_ptr())) { @@ -207,7 +206,7 @@ void ov::op::util::BroadcastBase::validate_and_infer_types() { } } output_shape_defined = true; - output_shape = PartialShape(output_partial_shape); + output_shape = Shape(output_partial_shape); } } @@ -249,8 +248,8 @@ void ov::op::util::BroadcastBase::validate_and_infer_types() { } std::pair ov::op::util::BroadcastBase::get_broadcast_axes_numpy_pdpd( - const ngraph::Shape& arg_shape, - const ngraph::Shape& result_shape, + const StaticShape& arg_shape, + const StaticShape& result_shape, const op::BroadcastModeSpec& broadcast_spec) { AxisSet broadcast_axes; bool axes_known = false; @@ -313,7 +312,7 @@ bool ov::op::util::BroadcastBase::evaluate_broadcast(const HostTensorPtr& arg0, NGRAPH_OP_SCOPE(util_BroadcastBase_evaluate_axes); auto arg0_shape = arg0->get_shape(); if (arg0_shape.size() == 0) { - arg0_shape = ngraph::Shape{1}; + arg0_shape = StaticShape{1}; } ngraph::runtime::reference::broadcast(arg0->get_data_ptr(), out->get_data_ptr(), @@ -340,7 +339,7 @@ void get_axis_vector_from_hosttensor(const ngraph::HostTensorPtr& arg, ov::AxisV void get_axis_vector_from_ht(const ngraph::HostTensorPtr& arg, ov::AxisVector& axis_vector, - const ngraph::Shape& arg_shape) { + const ov::StaticShape& arg_shape) { switch (arg->get_element_type()) { GET_AXIS_VECTOR(i8)(arg, axis_vector); break; @@ -371,20 +370,20 @@ void get_axis_vector_from_ht(const ngraph::HostTensorPtr& arg, } template -void get_shape_from_hosttensor(const ngraph::HostTensorPtr& input1, ngraph::Shape& target_shape) { +void get_shape_from_hosttensor(const ngraph::HostTensorPtr& input1, ov::StaticShape& target_shape) { using T = typename ov::element_type_traits::value_type; auto rank = input1->get_shape().at(0); std::vector target_shape_vec(rank); input1->read(target_shape_vec.data(), rank * sizeof(T)); - target_shape = ngraph::Shape(target_shape_vec.begin(), target_shape_vec.end()); + target_shape = ov::StaticShape(target_shape_vec.begin(), target_shape_vec.end()); } #define CASE_GET_SHAPE(a) \ case ov::element::Type_t::a: \ get_shape_from_hosttensor -ngraph::Shape get_target_shape_from_ht(const ngraph::HostTensorPtr& input1) { - ngraph::Shape target_shape; +ov::StaticShape get_target_shape_from_ht(const ngraph::HostTensorPtr& input1) { + ov::StaticShape target_shape; switch (input1->get_element_type()) { CASE_GET_SHAPE(i8)(input1, target_shape); break; @@ -413,20 +412,20 @@ ngraph::Shape get_target_shape_from_ht(const ngraph::HostTensorPtr& input1) { bool ov::op::util::BroadcastBase::evaluate_broadcast(const HostTensorPtr& arg0, const HostTensorPtr& out, const std::pair& pair_broadcast_axes, - const ngraph::Shape& output_shape) const { + const StaticShape& output_shape) const { if (!pair_broadcast_axes.first) { // broadcast_axes not known deterministically return false; } - ngraph::Shape in_shape = arg0->get_shape(); + StaticShape in_shape = arg0->get_shape(); out->set_shape(output_shape); out->set_element_type(arg0->get_element_type()); return evaluate_broadcast(arg0, out, pair_broadcast_axes.second); } -ngraph::Shape ov::op::util::BroadcastBase::get_target_shape(const HostTensorPtr& input1) const { - ngraph::Shape target_shape; +ov::StaticShape ov::op::util::BroadcastBase::get_target_shape(const HostTensorPtr& input1) const { + StaticShape target_shape; const auto shape_constant = ov::as_type_ptr(input_value(1).get_node_shared_ptr()); if (shape_constant) { target_shape = shape_constant->get_shape_val(); @@ -441,9 +440,9 @@ bool ov::op::util::BroadcastBase::evaluate(const HostTensorVector& outputs, cons NGRAPH_CHECK(ngraph::validate_host_tensor_vector(inputs, 2) || ngraph::validate_host_tensor_vector(inputs, 3)); NGRAPH_CHECK(ngraph::validate_host_tensor_vector(outputs, 1)); - ngraph::Shape target_shape = get_target_shape(inputs[1]); + StaticShape target_shape = get_target_shape(inputs[1]); - PartialShape result_shape; + Shape result_shape; std::pair pair_broadcast_axes; auto arg_shape = inputs[0]->get_shape(); diff --git a/ngraph/core/src/op/util/deformable_convolution_base.cpp b/ngraph/core/src/op/util/deformable_convolution_base.cpp index c1343f6f402336..62946abaae9e11 100644 --- a/ngraph/core/src/op/util/deformable_convolution_base.cpp +++ b/ngraph/core/src/op/util/deformable_convolution_base.cpp @@ -46,9 +46,9 @@ bool ov::op::util::DeformableConvolutionBase::visit_attributes(AttributeVisitor& void ov::op::util::DeformableConvolutionBase::validate_and_infer_types() { NGRAPH_OP_SCOPE(util_DeformableConvolutionBase_validate_and_infer_types); - const PartialShape& data_batch_pshape = get_input_partial_shape(0); - const PartialShape& offsets_pshape = get_input_partial_shape(1); - const PartialShape& filters_pshape = get_input_partial_shape(2); + const Shape& data_batch_pshape = get_input_partial_shape(0); + const Shape& offsets_pshape = get_input_partial_shape(1); + const Shape& filters_pshape = get_input_partial_shape(2); element::Type data_batch_et = get_input_element_type(0); element::Type offsets_et = get_input_element_type(1); @@ -158,27 +158,27 @@ void ov::op::util::DeformableConvolutionBase::validate_and_infer_types() { } return new_shape; }(m_group); - PartialShape result_shape = ngraph::validate_and_infer_convolution_forward_output_shape(this, - result_ps_rank, - data_batch_pshape, - new_filters_pshape, - m_auto_pad, - m_strides, - m_dilations, - m_pads_begin, - m_pads_end); + Shape result_shape = ngraph::validate_and_infer_convolution_forward_output_shape(this, + result_ps_rank, + data_batch_pshape, + new_filters_pshape, + m_auto_pad, + m_strides, + m_dilations, + m_pads_begin, + m_pads_end); if (result_shape.rank().is_static() && offsets_pshape.rank().is_static()) { - PartialShape result_spatial_shape = [&result_shape]() { + Shape result_spatial_shape = [&result_shape]() { vector result_spatial_dims{result_shape}; result_spatial_dims.erase(result_spatial_dims.begin(), result_spatial_dims.begin() + 2); - return PartialShape{result_spatial_dims}; + return Shape{result_spatial_dims}; }(); - PartialShape offsets_spatial_shape = [&offsets_pshape]() { + Shape offsets_spatial_shape = [&offsets_pshape]() { vector offsets_spatial_dims{offsets_pshape}; offsets_spatial_dims.erase(offsets_spatial_dims.begin(), offsets_spatial_dims.begin() + 2); - return PartialShape{offsets_spatial_dims}; + return Shape{offsets_spatial_dims}; }(); NODE_VALIDATION_CHECK(this, diff --git a/ngraph/core/src/op/util/elementwise_args.cpp b/ngraph/core/src/op/util/elementwise_args.cpp index 9a290f7fb8a96f..db71a2b8775423 100644 --- a/ngraph/core/src/op/util/elementwise_args.cpp +++ b/ngraph/core/src/op/util/elementwise_args.cpp @@ -6,12 +6,12 @@ #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -std::tuple ov::op::util::validate_and_infer_elementwise_args( +std::tuple ov::op::util::validate_and_infer_elementwise_args( Node* node, const op::AutoBroadcastSpec& autob) { NGRAPH_CHECK(node != nullptr, "nGraph node is empty! Cannot validate eltwise arguments."); element::Type element_type = node->get_input_element_type(0); - PartialShape pshape = node->get_input_partial_shape(0); + Shape pshape = node->get_input_partial_shape(0); if (node->get_input_size() > 1) { for (size_t i = 1; i < node->get_input_size(); ++i) { @@ -21,13 +21,12 @@ std::tuple ov::op::util::validate_and_infer if (autob.m_type == op::AutoBroadcastType::NONE) { NODE_VALIDATION_CHECK(node, - PartialShape::merge_into(pshape, node->get_input_partial_shape(i)), + Shape::merge_into(pshape, node->get_input_partial_shape(i)), "Argument shapes are inconsistent."); } else if (autob.m_type == op::AutoBroadcastType::NUMPY || autob.m_type == op::AutoBroadcastType::PDPD) { - NODE_VALIDATION_CHECK( - node, - PartialShape::broadcast_merge_into(pshape, node->get_input_partial_shape(i), autob), - "Argument shapes are inconsistent."); + NODE_VALIDATION_CHECK(node, + Shape::broadcast_merge_into(pshape, node->get_input_partial_shape(i), autob), + "Argument shapes are inconsistent."); } else { NODE_VALIDATION_CHECK(node, false, "Unsupported auto broadcast specification"); } diff --git a/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp b/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp index 358ba88c04352d..603a9379394695 100644 --- a/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp +++ b/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp @@ -80,7 +80,7 @@ void ov::op::util::EmbeddingBagOffsetsBase::validate_and_infer_types() { ")"); NODE_VALIDATION_CHECK(this, - get_input_partial_shape(DEFAULT_INDEX).compatible(PartialShape{}), + get_input_partial_shape(DEFAULT_INDEX).compatible(Shape{}), "DEFAULT_INDEX must be a scalar"); } @@ -105,15 +105,15 @@ void ov::op::util::EmbeddingBagOffsetsBase::validate_and_infer_types() { element::Type result_et = get_input_element_type(EMB_TABLE); - const PartialShape& emb_table_shape = get_input_partial_shape(EMB_TABLE); - const PartialShape& offsets_shape = get_input_partial_shape(OFFSETS); + const Shape& emb_table_shape = get_input_partial_shape(EMB_TABLE); + const Shape& offsets_shape = get_input_partial_shape(OFFSETS); - PartialShape result_shape; + Shape result_shape; if (emb_table_shape.rank().is_static()) { result_shape = emb_table_shape; result_shape[0] = offsets_shape.rank().is_static() ? offsets_shape[0] : Dimension::dynamic(); } else { - result_shape = PartialShape::dynamic(); + result_shape = Shape::dynamic(); } set_output_type(0, result_et, result_shape); diff --git a/ngraph/core/src/op/util/embeddingbag_packed_base.cpp b/ngraph/core/src/op/util/embeddingbag_packed_base.cpp index 734fbd5ff1d1ed..9932658930c16a 100644 --- a/ngraph/core/src/op/util/embeddingbag_packed_base.cpp +++ b/ngraph/core/src/op/util/embeddingbag_packed_base.cpp @@ -56,15 +56,15 @@ void ov::op::util::EmbeddingBagPackedBase::validate_and_infer_types() { element::Type result_et = get_input_element_type(EMB_TABLE); - const PartialShape& emb_table_shape = get_input_partial_shape(EMB_TABLE); - const PartialShape& indices_shape = get_input_partial_shape(INDICES); + const Shape& emb_table_shape = get_input_partial_shape(EMB_TABLE); + const Shape& indices_shape = get_input_partial_shape(INDICES); - PartialShape result_shape; + Shape result_shape; if (emb_table_shape.rank().is_static()) { result_shape = emb_table_shape; result_shape[0] = indices_shape.rank().is_static() ? indices_shape[0] : Dimension::dynamic(); } else { - result_shape = PartialShape::dynamic(); + result_shape = Shape::dynamic(); } set_output_type(0, result_et, result_shape); diff --git a/ngraph/core/src/op/util/fft_base.cpp b/ngraph/core/src/op/util/fft_base.cpp index d6551d4527583a..bdaf4fdad6af48 100644 --- a/ngraph/core/src/op/util/fft_base.cpp +++ b/ngraph/core/src/op/util/fft_base.cpp @@ -38,7 +38,7 @@ void ov::op::util::FFTBase::validate() { axes_et == element::i64 || axes_et == element::i32, "FFT op axes element type must be i32 or i64"); - const auto& input_shape = PartialShape(get_input_partial_shape(0)); + const auto& input_shape = Shape(get_input_partial_shape(0)); if (input_shape.rank().is_static()) { const auto input_rank = input_shape.rank().get_length(); NODE_VALIDATION_CHECK(this, @@ -53,7 +53,7 @@ void ov::op::util::FFTBase::validate() { input_shape[input_rank - 1]); } - const auto& axes_shape = PartialShape(get_input_partial_shape(1)); + const auto& axes_shape = Shape(get_input_partial_shape(1)); if (axes_shape.rank().is_static()) { NODE_VALIDATION_CHECK(this, axes_shape.rank().get_length() == 1, @@ -110,7 +110,7 @@ void ov::op::util::FFTBase::validate() { signal_size_et == element::i64 || signal_size_et == element::i32, "FFT op signal_size element type must be i32 or i64"); - const auto& signal_size_shape = PartialShape(get_input_partial_shape(2)); + const auto& signal_size_shape = Shape(get_input_partial_shape(2)); if (signal_size_shape.rank().is_static()) { NODE_VALIDATION_CHECK(this, signal_size_shape.rank().get_length() == 1, @@ -135,9 +135,9 @@ void ov::op::util::FFTBase::validate_and_infer_types() { NGRAPH_OP_SCOPE(util_FFTBase_validate_and_infer_types); validate(); - const auto& input_shape = PartialShape(get_input_partial_shape(0)); - const auto& axes_shape = PartialShape(get_input_partial_shape(1)); - PartialShape output_shape = input_shape; + const auto& input_shape = Shape(get_input_partial_shape(0)); + const auto& axes_shape = Shape(get_input_partial_shape(1)); + Shape output_shape = input_shape; if (input_shape.rank().is_dynamic()) { set_output_type(0, get_input_element_type(0), output_shape); return; @@ -158,7 +158,7 @@ void ov::op::util::FFTBase::validate_and_infer_types() { return; } - const auto& signal_size_shape = PartialShape(get_input_partial_shape(2)); + const auto& signal_size_shape = Shape(get_input_partial_shape(2)); if (signal_size_shape.rank().is_dynamic()) { set_output_type(0, get_input_element_type(0), output_shape); return; diff --git a/ngraph/core/src/op/util/gather_base.cpp b/ngraph/core/src/op/util/gather_base.cpp index 31cc86232ac630..21dd8331aeb919 100644 --- a/ngraph/core/src/op/util/gather_base.cpp +++ b/ngraph/core/src/op/util/gather_base.cpp @@ -88,7 +88,7 @@ void ov::op::util::GatherBase::validate_and_infer_types() { if (data_rank.is_static() && indices_rank.is_static()) { auto out_rank = data_rank.get_length() + indices_rank.get_length() - 1 - batch_dims; - PartialShape output_pshape = PartialShape::dynamic(out_rank); + Shape output_pshape = Shape::dynamic(out_rank); // implementation of out_shape formula // data.shape[:batch_dims] + data.shape[batch_dims:axis] + indices.shape[batch_dims:] + @@ -124,7 +124,7 @@ void ov::op::util::GatherBase::validate_and_infer_types() { Rank out_rank = data_rank + indices_rank - 1 - batch_dims; if (batch_dims < 0) out_rank = out_rank - indices_rank.get_max_length(); - set_output_type(0, data_type, PartialShape::dynamic(out_rank)); + set_output_type(0, data_type, Shape::dynamic(out_rank)); } } @@ -218,7 +218,7 @@ bool evaluate_gather(const ngraph::HostTensorPtr& arg0, bool cf_gather_with_subgraph(ov::OutputVector& output_values, const ov::OutputVector& input_values, - const ov::PartialShape& gather_ps) { + const ov::Shape& gather_ps) { if (gather_ps.is_dynamic() || input_values.size() != 3) { return false; } diff --git a/ngraph/core/src/op/util/index_reduction.cpp b/ngraph/core/src/op/util/index_reduction.cpp index 7e14a8cbb3aeb0..9d85c269ef8ac4 100644 --- a/ngraph/core/src/op/util/index_reduction.cpp +++ b/ngraph/core/src/op/util/index_reduction.cpp @@ -37,7 +37,7 @@ void ov::op::util::IndexReduction::set_index_element_type(const element::Type& i void ov::op::util::IndexReduction::validate_and_infer_types() { NGRAPH_OP_SCOPE(util_IndexReduction_validate_and_infer_types); // TODO(amprocte): Should reject if size of reduction axis is zero. - const PartialShape& arg_shape = get_input_partial_shape(0); + const Shape& arg_shape = get_input_partial_shape(0); Rank rank = arg_shape.rank(); NODE_VALIDATION_CHECK(this, rank.is_dynamic() || rank.get_length() >= 1, "Argument rank is zero."); @@ -52,7 +52,7 @@ void ov::op::util::IndexReduction::validate_and_infer_types() { m_index_element_type == element::i32 || m_index_element_type == element::i64, "Index element is neither i64 or i32."); - PartialShape output_shape{PartialShape::dynamic()}; + Shape output_shape{Shape::dynamic()}; if (rank.is_static()) { Dimension d = arg_shape[m_axis]; @@ -73,7 +73,7 @@ void ov::op::util::IndexReduction::validate_and_infer_types() { output_dims[i] = arg_shape[j++]; } - output_shape = PartialShape(output_dims); + output_shape = Shape(output_dims); } set_output_type(0, m_index_element_type, output_shape); diff --git a/ngraph/core/src/op/util/logical_reduction.cpp b/ngraph/core/src/op/util/logical_reduction.cpp index 89f818c379a42d..d4dd5dd3ac1fbf 100644 --- a/ngraph/core/src/op/util/logical_reduction.cpp +++ b/ngraph/core/src/op/util/logical_reduction.cpp @@ -48,7 +48,7 @@ void op::util::LogicalReduction::validate_and_infer_types() { NGRAPH_OP_SCOPE(util_LogicalReduction_validate_and_infer_types); const element::Type& data_et = get_input_element_type(0); - const PartialShape& axes_shape = get_input_partial_shape(1); + const Shape& axes_shape = get_input_partial_shape(1); NODE_VALIDATION_CHECK(this, data_et.compatible(element::boolean), "Element type of data input must be boolean."); @@ -58,7 +58,7 @@ void op::util::LogicalReduction::validate_and_infer_types() { "Axes input must be a scalar or 1D input. Got: ", axes_shape); - PartialShape result_shape = infer_reduction_output_shape(false); + Shape result_shape = infer_reduction_output_shape(false); set_input_is_relevant_to_shape(1); set_output_type(0, data_et, result_shape); } diff --git a/ngraph/core/src/op/util/logical_reduction_keep_dims.cpp b/ngraph/core/src/op/util/logical_reduction_keep_dims.cpp index 014289eddfc4ad..5c45bdfc735d0d 100644 --- a/ngraph/core/src/op/util/logical_reduction_keep_dims.cpp +++ b/ngraph/core/src/op/util/logical_reduction_keep_dims.cpp @@ -29,7 +29,7 @@ void ov::op::util::LogicalReductionKeepDims::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_util_LogicalReductionKeepDims_validate_and_infer_types); const element::Type& data_et = get_input_element_type(0); - const PartialShape& axes_shape = get_input_partial_shape(1); + const Shape& axes_shape = get_input_partial_shape(1); const element::Type& axes_et = get_input_element_type(1); NODE_VALIDATION_CHECK(this, data_et.compatible(element::boolean), "Element type of data input must be boolean."); @@ -45,7 +45,7 @@ void ov::op::util::LogicalReductionKeepDims::validate_and_infer_types() { "Axes input must be a scalar or 1D input. Got: ", axes_shape); - PartialShape result_shape = infer_reduction_output_shape(m_keep_dims); + Shape result_shape = infer_reduction_output_shape(m_keep_dims); set_input_is_relevant_to_shape(1); set_output_type(0, data_et, result_shape); } diff --git a/ngraph/core/src/op/util/max_pool_base.cpp b/ngraph/core/src/op/util/max_pool_base.cpp index 0e2a78e57322e6..60791ee0869cee 100644 --- a/ngraph/core/src/op/util/max_pool_base.cpp +++ b/ngraph/core/src/op/util/max_pool_base.cpp @@ -45,7 +45,7 @@ void ov::op::util::MaxPoolBase::validate_and_infer_types() { m_pads_end = ngraph::Shape(m_kernel.size(), 0); } - const PartialShape& arg_shape = get_input_partial_shape(0); + const Shape& arg_shape = get_input_partial_shape(0); NODE_VALIDATION_CHECK( this, @@ -74,7 +74,7 @@ void ov::op::util::MaxPoolBase::validate_and_infer_types() { } } -ov::PartialShape ov::op::util::MaxPoolBase::infer_output_shape(const Strides& dilations) { +ov::Shape ov::op::util::MaxPoolBase::infer_output_shape(const Strides& dilations) { NGRAPH_OP_SCOPE(util_MaxPoolBase_infer_output_shape); const auto& arg_shape = get_input_partial_shape(0); @@ -90,19 +90,19 @@ ov::PartialShape ov::op::util::MaxPoolBase::infer_output_shape(const Strides& di m_pads_begin = ngraph::Shape(m_pads_begin.size(), 0); } - auto output_shape = PartialShape::dynamic(); + auto output_shape = Shape::dynamic(); if (update_auto_padding_succeed) { CoordinateDiff pads_begin(m_pads_begin.begin(), m_pads_begin.end()); CoordinateDiff pads_end(m_pads_end.begin(), m_pads_end.end()); - output_shape = infer_batched_pooling_forward(this, - get_input_partial_shape(0), - pads_begin, - pads_end, - m_kernel, - m_strides, - true, - m_rounding_type == op::RoundingType::CEIL, - dilations); + output_shape = ngraph::infer_batched_pooling_forward(this, + get_input_partial_shape(0), + pads_begin, + pads_end, + m_kernel, + m_strides, + true, + m_rounding_type == op::RoundingType::CEIL, + dilations); } else { if (arg_shape.rank().is_static()) { output_shape = std::vector(arg_shape.rank().get_max_length(), Dimension::dynamic()); @@ -118,15 +118,20 @@ ov::PartialShape ov::op::util::MaxPoolBase::infer_output_shape(const Strides& di return output_shape; } -bool ov::op::util::MaxPoolBase::update_auto_padding(const PartialShape& in_shape, +bool ov::op::util::MaxPoolBase::update_auto_padding(const Shape& in_shape, const Strides& filter_dilations, ngraph::Shape& new_pads_end, ngraph::Shape& new_pads_begin) const { bool update_auto_padding_succeed = true; if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER) { CoordinateDiff pads_end, pads_begin; - update_auto_padding_succeed = - try_apply_auto_padding(in_shape, m_kernel, m_strides, filter_dilations, m_auto_pad, pads_end, pads_begin); + update_auto_padding_succeed = ngraph::try_apply_auto_padding(in_shape, + m_kernel, + m_strides, + filter_dilations, + m_auto_pad, + pads_end, + pads_begin); new_pads_end = ngraph::Shape(pads_end.begin(), pads_end.end()); new_pads_begin = ngraph::Shape(pads_begin.begin(), pads_begin.end()); } diff --git a/ngraph/core/src/op/util/reduction_base.cpp b/ngraph/core/src/op/util/reduction_base.cpp index 16419edc6efb1b..fb013e530b27f3 100644 --- a/ngraph/core/src/op/util/reduction_base.cpp +++ b/ngraph/core/src/op/util/reduction_base.cpp @@ -17,13 +17,13 @@ ov::op::util::ReductionBase::ReductionBase() = default; ov::op::util::ReductionBase::ReductionBase(const Output& arg, const Output& reduction_axes) : Op({arg, reduction_axes}) {} -ov::PartialShape ov::op::util::ReductionBase::infer_reduction_output_shape(const bool keep_dims) { - const PartialShape& data_ps = get_input_partial_shape(0); - PartialShape result_ps{PartialShape::dynamic()}; +ov::Shape ov::op::util::ReductionBase::infer_reduction_output_shape(const bool keep_dims) { + const Shape& data_ps = get_input_partial_shape(0); + Shape result_ps{Shape::dynamic()}; Rank data_rank = data_ps.rank(); if (data_rank.is_static() && keep_dims) { - result_ps = PartialShape::dynamic(data_rank); + result_ps = Shape::dynamic(data_rank); } const auto& axes = get_constant_from_source(input_value(1)); @@ -55,7 +55,7 @@ ov::PartialShape ov::op::util::ReductionBase::infer_reduction_output_shape(const dims.emplace_back(Dimension{1}); } } - result_ps = PartialShape(dims); + result_ps = Shape(dims); } return result_ps; } diff --git a/ngraph/core/src/op/util/scatter_base.cpp b/ngraph/core/src/op/util/scatter_base.cpp index 219fecb832cd35..a83f966ff41e59 100644 --- a/ngraph/core/src/op/util/scatter_base.cpp +++ b/ngraph/core/src/op/util/scatter_base.cpp @@ -49,7 +49,7 @@ void ov::op::util::ScatterBase::validate_and_infer_types() { const auto& axis_shape = get_input_partial_shape(AXIS); NODE_VALIDATION_CHECK(this, - axis_shape.compatible(PartialShape{}) || axis_shape.compatible(PartialShape{1}), + axis_shape.compatible(Shape{}) || axis_shape.compatible(Shape{1}), "Axis input shape is required to be scalar or 1D tensor. ", "Got: ", axis_shape); diff --git a/ngraph/core/src/op/util/scatter_nd_base.cpp b/ngraph/core/src/op/util/scatter_nd_base.cpp index 0c2b6a2f52e5ca..b0f23e11214ec2 100644 --- a/ngraph/core/src/op/util/scatter_nd_base.cpp +++ b/ngraph/core/src/op/util/scatter_nd_base.cpp @@ -33,9 +33,9 @@ void ov::op::util::ScatterNDBase::validate_and_infer_types() { element::Type indices_et = get_input_element_type(INDICES); element::Type updates_et = get_input_element_type(UPDATES); - const PartialShape& inputs_shape = get_input_partial_shape(INPUTS); - const PartialShape& indices_shape = get_input_partial_shape(INDICES); - const PartialShape& updates_shape = get_input_partial_shape(UPDATES); + const Shape& inputs_shape = get_input_partial_shape(INPUTS); + const Shape& indices_shape = get_input_partial_shape(INDICES); + const Shape& updates_shape = get_input_partial_shape(UPDATES); const auto& inputs_rank = inputs_shape.rank(); const auto& indices_rank = indices_shape.rank(); diff --git a/ngraph/core/src/op/util/unary_elementwise_arithmetic.cpp b/ngraph/core/src/op/util/unary_elementwise_arithmetic.cpp index 0d02f92f097209..06394ae9481208 100644 --- a/ngraph/core/src/op/util/unary_elementwise_arithmetic.cpp +++ b/ngraph/core/src/op/util/unary_elementwise_arithmetic.cpp @@ -16,7 +16,7 @@ ov::op::util::UnaryElementwiseArithmetic::UnaryElementwiseArithmetic(const Outpu void ov::op::util::UnaryElementwiseArithmetic::validate_and_infer_elementwise_arithmetic() { auto args_et_pshape = op::util::validate_and_infer_elementwise_args(this); element::Type& args_et = std::get<0>(args_et_pshape); - PartialShape& args_pshape = std::get<1>(args_et_pshape); + Shape& args_pshape = std::get<1>(args_et_pshape); NODE_VALIDATION_CHECK(this, args_et.is_dynamic() || args_et != element::boolean, diff --git a/ngraph/core/src/op/variadic_split.cpp b/ngraph/core/src/op/variadic_split.cpp index 2dd5882614669e..57cca0e9b9eb7c 100644 --- a/ngraph/core/src/op/variadic_split.cpp +++ b/ngraph/core/src/op/variadic_split.cpp @@ -101,11 +101,11 @@ void ngraph::op::v1::VariadicSplit::validate_and_infer_types() { split_lengths.at(output) == -1 ? Dimension::dynamic() : split_lengths.at(output); auto tmp_shape = data_shape_dims; tmp_shape.at(axis) = output_split_dim; - set_output_type(output, data_type, PartialShape{tmp_shape}); + set_output_type(output, data_type, ov::Shape{tmp_shape}); } } else { for (int64_t output{0}; output < num_outputs; ++output) { - set_output_type(output, data_type, PartialShape::dynamic()); + set_output_type(output, data_type, ov::Shape::dynamic()); } } } @@ -159,7 +159,7 @@ bool op::v1::VariadicSplit::evaluate_variadic_split(const HostTensorVector& inpu split_lengths[std::distance(std::begin(split_lengths), neg_one)] = data_shape[axis] - sum_of_known_splits; } - Shape output_shape = data_shape; + ov::StaticShape output_shape = data_shape; std::vector lower_bounds(data_shape.size(), 0); std::vector upper_bounds = data_shape; upper_bounds.at(axis) = split_lengths[0]; diff --git a/ngraph/core/src/partial_shape.cpp b/ngraph/core/src/partial_shape.cpp deleted file mode 100644 index da93eb1ba15f0b..00000000000000 --- a/ngraph/core/src/partial_shape.cpp +++ /dev/null @@ -1,390 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "openvino/core/partial_shape.hpp" - -#include -#include -#include - -#include "ngraph/check.hpp" - -using namespace ov; - -PartialShape::PartialShape() : PartialShape(std::initializer_list{}) {} - -PartialShape::PartialShape(std::initializer_list init) : PartialShape(true, init) {} - -PartialShape::PartialShape(const std::vector& dimensions) - : m_rank_is_static(true), - m_dimensions(dimensions.begin(), dimensions.end()) {} - -PartialShape::PartialShape(const ngraph::Shape& shape) - : m_rank_is_static(true), - m_shape_type(ShapeType::SHAPE_IS_STATIC), - m_dimensions(shape.begin(), shape.end()) {} - -PartialShape::PartialShape(bool rank_is_static, std::vector dimensions) - : m_rank_is_static(rank_is_static), - m_dimensions(std::move(dimensions)) {} - -PartialShape::PartialShape(std::vector dimensions) - : m_rank_is_static(true), - m_dimensions(std::move(dimensions)) {} - -bool PartialShape::is_static() const { - ShapeType shape_type = m_shape_type; - - if (m_shape_type == ShapeType::SHAPE_IS_UNKNOWN || m_shape_type == ShapeType::SHAPE_IS_UPDATED) { - shape_type = m_rank_is_static && std::all_of(m_dimensions.begin(), - m_dimensions.end(), - [](const Dimension& d) { - return d.is_static(); - }) - ? ShapeType::SHAPE_IS_STATIC - : ShapeType::SHAPE_IS_DYNAMIC; - - if (m_shape_type == ShapeType::SHAPE_IS_UNKNOWN) - m_shape_type = shape_type; - } - - return shape_type == ShapeType::SHAPE_IS_STATIC; -} - -bool PartialShape::operator==(const PartialShape& partial_shape) const { - if (rank() != partial_shape.rank()) { - return false; - } - if (rank().is_dynamic()) { - return true; - } - for (auto i = 0; i < rank().get_length(); ++i) { - if (m_dimensions[i] != partial_shape.m_dimensions[i]) { - return false; - } - } - return true; -} - -bool PartialShape::operator!=(const PartialShape& partial_shape) const { - return !(*this == partial_shape); -} - -ngraph::Shape PartialShape::get_max_shape() const { - if (rank().is_dynamic()) { - return ngraph::Shape(); - } else { - ngraph::Shape shape; - for (auto dimension : m_dimensions) { - shape.push_back(dimension.get_interval().get_max_val()); - } - return shape; - } -} - -ngraph::Shape PartialShape::get_min_shape() const { - if (rank().is_dynamic()) { - return ngraph::Shape(); - } else { - ngraph::Shape shape; - for (auto dimension : m_dimensions) { - shape.push_back(dimension.get_interval().get_min_val()); - } - return shape; - } -} - -ngraph::Shape PartialShape::get_shape() const { - NGRAPH_CHECK(rank().is_static(), "get_shape() must be called on a static shape"); - ngraph::Shape shape; - for (auto dimension : m_dimensions) { - auto min_val = dimension.get_interval().get_min_val(); - auto max_val = dimension.get_interval().get_max_val(); - NGRAPH_CHECK(min_val == max_val, "get_shape() must be called on a static shape"); - shape.push_back(min_val); - } - return shape; -} - -PartialShape ov::operator+(const PartialShape& s1, const PartialShape& s2) { - if (s1.rank().is_dynamic() || s2.rank().is_dynamic()) { - return PartialShape::dynamic(); - } - - if (!s1.rank().compatible(s2.rank())) { - throw std::invalid_argument("rank mismatch"); - } - - PartialShape result{}; - result.m_rank_is_static = true; - for (size_t i = 0; i < s1.m_dimensions.size(); i++) { - result.m_dimensions.push_back(s1.m_dimensions[i] + s2.m_dimensions[i]); - } - return result; -} - -std::ostream& ov::operator<<(std::ostream& str, const PartialShape& shape) { - if (shape.m_rank_is_static) { - str << "{"; - bool first = true; - for (auto& d : shape.m_dimensions) { - if (!first) { - str << ","; - } - str << d; - first = false; - } - return (str << "}"); - } else { - return (str << "?"); - } -} - -PartialShape PartialShape::dynamic(Rank r) { - return PartialShape(r.is_static(), - std::vector(r.is_static() ? r.get_length() : 0, Dimension::dynamic())); -} - -bool PartialShape::compatible(const PartialShape& s) const { - // If we don't know *this's rank, or we don't know s's rank, they are compatible. - if (!m_rank_is_static || s.rank().is_dynamic()) { - return true; - } - // If we do know *this's rank and s's rank, and they are unequal, they are incompatible. - else if (rank().get_length() != s.rank().get_length()) { - return false; - } - // If we know both the ranks and they are equal, then *this and s are compatible iff they - // are elementwise compatible everywhere. - else { - for (int64_t i = 0; i < rank().get_length(); i++) { - if (!m_dimensions[i].compatible(s.m_dimensions[i])) { - return false; - } - } - // If we are still here, we know that s1 and s2 have the same rank and are elementwise - // compatible everywhere. - return true; - } -} - -bool PartialShape::same_scheme(const PartialShape& s) const { - if (rank().is_dynamic() && s.rank().is_dynamic()) { - return true; - } else if (rank().is_static() && s.rank().is_static()) { - if (rank().get_length() != s.rank().get_length()) { - return false; - } - - bool success = true; - - for (int64_t i = 0; i < rank().get_length(); i++) { - success &= (*this)[i].same_scheme(s[i]); - } - - return success; - } else { - return false; - } -} - -bool PartialShape::relaxes(const PartialShape& s) const { - if (rank().is_dynamic()) { - return true; - } else if (s.rank().is_static() && rank().get_length() == s.rank().get_length()) { - bool all_relax = true; - - for (int64_t i = 0; i < rank().get_length(); i++) { - all_relax &= ((*this)[i].relaxes(s[i])); - } - - return all_relax; - } else { - return false; - } -} - -bool PartialShape::refines(const PartialShape& s) const { - if (s.rank().is_dynamic()) { - return true; - } else if (rank().is_static() && rank().get_length() == s.rank().get_length()) { - bool all_refine = true; - - for (int64_t i = 0; i < rank().get_length(); i++) { - all_refine &= ((*this)[i].refines(s[i])); - } - - return all_refine; - } else { - return false; - } -} - -bool PartialShape::merge_rank(Rank r) { - if (r.is_dynamic()) { - return true; - } else if (!m_rank_is_static) { - m_rank_is_static = true; - m_dimensions = std::vector(r.get_length(), Dimension::dynamic()); - m_shape_type = ShapeType::SHAPE_IS_UNKNOWN; - return true; - } else { - return (static_cast(m_dimensions.size()) == r.get_length()); - } -} - -ngraph::Shape PartialShape::to_shape() const { - if (is_dynamic()) { - throw std::invalid_argument("to_shape was called on a dynamic shape."); - } - - std::vector shape_dimensions(m_dimensions.size()); - std::transform(m_dimensions.begin(), m_dimensions.end(), shape_dimensions.begin(), [](const Dimension& d) { - return d.get_length(); - }); - - return shape_dimensions; -} - -bool PartialShape::merge_into(PartialShape& dst, const PartialShape& src) { - if (dst.rank().is_dynamic()) { - dst = src; - return true; - } else if (src.rank().is_dynamic()) { - // No change to dst. - return true; - } else if (dst.rank().get_length() != src.rank().get_length()) { - // Mismatching static ranks, cannot merge. - return false; - } else { - // Ranks are both static, and they match. - bool success = true; - for (int64_t i = 0; i < dst.rank().get_length(); i++) { - success &= Dimension::merge(dst[i], dst[i], src[i]); - } - return success; - } -} - -bool PartialShape::broadcast_merge_into(PartialShape& dst, - const PartialShape& src, - const ngraph::op::AutoBroadcastSpec& autob) { - switch (autob.m_type) { - case ngraph::op::AutoBroadcastType::NONE: - return true; - case ngraph::op::AutoBroadcastType::NUMPY: { - if (dst.rank().is_dynamic() || src.rank().is_dynamic()) { - dst = PartialShape::dynamic(); - return true; - } else { - // Ranks are both static. - auto dst_rank = dst.rank().get_length(); - auto src_rank = src.rank().get_length(); - auto new_rank = std::max(dst_rank, src_rank); - std::vector dims(new_rank); - bool success = true; - for (int64_t i = 0; i < new_rank; i++) { - auto dsti = i < (new_rank - dst_rank) ? Dimension(1) : dst[i - (new_rank - dst_rank)]; - auto srci = i < (new_rank - src_rank) ? Dimension(1) : src[i - (new_rank - src_rank)]; - success &= Dimension::broadcast_merge(dims[i], dsti, srci); - } - dst = PartialShape(std::move(dims)); - return success; - } - } - case ngraph::op::AutoBroadcastType::PDPD: { - if (dst.rank().is_dynamic() || src.rank().is_dynamic()) { - return true; - } else { - // Ranks are both static. - auto dst_rank = dst.rank().get_length(); - auto src_rank = src.rank().get_length(); - if (dst_rank == src_rank && dst.compatible(src)) - return true; - - int64_t axis = autob.m_axis; - if (axis < -1) { - return false; - } - if (axis == -1) { - axis = dst_rank - src_rank; - } - - size_t len = src_rank; - while (len > 0 && src[len - 1].is_static() && src[len - 1].get_length() == 1) { - --len; - } - - for (size_t i = axis; i < axis + len; ++i) { - if (!(dst[i].compatible(src[i - axis]))) { - return false; - } - } - - return true; - } - } - default: - NGRAPH_CHECK(false, "Unsupported auto broadcast type: ", autob.m_type); - } - - return false; -} - -bool PartialShape::all_non_negative() const { - for (auto& d : m_dimensions) { - if (d.is_static() && d.get_length() < 0) { - return false; - } - } - - return true; -} - -const Dimension& PartialShape::operator[](size_t i) const { - if (i >= m_dimensions.size()) { - throw std::out_of_range("Accessing out-of-range dimension in Dimension[]"); - } - return m_dimensions[i]; -} - -Dimension& PartialShape::operator[](size_t i) { - if (i >= m_dimensions.size()) { - throw std::out_of_range("Accessing out-of-range dimension in Dimension[]"); - } - m_shape_type = ShapeType::SHAPE_IS_UPDATED; // We can't guarantee that the shape remains static or dynamic. - return m_dimensions[i]; -} - -const std::vector& ov::AttributeAdapter::get() { - if (!m_buffer_valid) { - m_buffer.clear(); - if (m_ref.rank().is_dynamic()) { - m_buffer.push_back(-2); - } else { - for (int64_t i = 0; i < m_ref.rank().get_length(); ++i) { - const auto& elt = static_cast(m_ref)[i]; - m_buffer.push_back(elt.is_dynamic() ? -1 : elt.get_length()); - } - } - m_buffer_valid = true; - } - return m_buffer; -} - -void ov::AttributeAdapter::set(const std::vector& value) { - m_ref = PartialShape(); - if (value.size() == 1 && value[0] == -2) { - m_ref = PartialShape::dynamic(); - } else { - std::vector dims; - for (auto elt : value) { - dims.push_back(elt == -1 ? Dimension::dynamic() : elt); - } - m_ref = PartialShape(dims); - } - m_buffer_valid = false; -} - -NGRAPH_API constexpr DiscreteTypeInfo ov::AttributeAdapter::type_info; diff --git a/ngraph/core/src/pass/low_latency.cpp b/ngraph/core/src/pass/low_latency.cpp index d4e0abf21433bd..5180949e79ac38 100644 --- a/ngraph/core/src/pass/low_latency.cpp +++ b/ngraph/core/src/pass/low_latency.cpp @@ -63,7 +63,7 @@ ngraph::pass::LowLatency::LowLatency() { func->get_parameters().at(merged_in->m_body_parameter_index)->get_friendly_name(), variable_id)); auto variable = - std::make_shared(VariableInfo{PartialShape::dynamic(), element::dynamic, variable_name}); + std::make_shared(VariableInfo{ov::Shape::dynamic(), element::dynamic, variable_name}); auto read_value = std::make_shared(func->get_parameters().at(merged_in->m_body_parameter_index), variable); @@ -186,7 +186,7 @@ bool ov::pass::LowLatency2::run_on_function(shared_ptr f) { } } - ngraph::VariableInfo var_info{PartialShape::dynamic(), element::dynamic, var_name}; + ngraph::VariableInfo var_info{Shape::dynamic(), element::dynamic, var_name}; auto variable = make_shared(var_info); // insert ReadValue diff --git a/ngraph/core/src/pattern/op/label.cpp b/ngraph/core/src/pattern/op/label.cpp index 025ac805f29716..65239866ff81c5 100644 --- a/ngraph/core/src/pattern/op/label.cpp +++ b/ngraph/core/src/pattern/op/label.cpp @@ -49,5 +49,5 @@ std::shared_ptr ov::pass::pattern::any_input() { } std::shared_ptr ov::pass::pattern::any_input(const ov::pass::pattern::op::ValuePredicate& pred) { - return std::make_shared(element::dynamic, PartialShape::dynamic(), pred); + return std::make_shared(element::dynamic, Shape::dynamic(), pred); } diff --git a/ngraph/core/src/preprocess/pre_post_process.cpp b/ngraph/core/src/preprocess/pre_post_process.cpp new file mode 100644 index 00000000000000..30bbd1f260020f --- /dev/null +++ b/ngraph/core/src/preprocess/pre_post_process.cpp @@ -0,0 +1,266 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/core/preprocess/pre_post_process.hpp" + +#include "ngraph/opsets/opset1.hpp" +#include "openvino/core/function.hpp" + +namespace ov { +namespace preprocess { + +/// \brief InputTensorInfoImpl - internal data structure +struct InputTensorInfo::InputTensorInfoImpl { + InputTensorInfoImpl() = default; + explicit InputTensorInfoImpl(const element::Type& type) : m_type(type) {} + + element::Type m_type = element::dynamic; +}; + +/// \brief PreProcessStepsImpl - internal data structure +struct PreProcessSteps::PreProcessStepsImpl { + void add_scale_impl(const std::vector& values) { + m_actions.emplace_back(std::make_tuple( + [values](const std::shared_ptr& node) { + ngraph::Shape shape; + if (values.size() == 1) { + shape = ngraph::Shape{1}; + } else { + // TODO: implement when Layout API is available + } + auto constant = op::v0::Constant::create(element::f32, shape, values); + constant->set_friendly_name(node->get_friendly_name() + "/scale/Divide_Factor"); + + auto new_op = std::make_shared(node, constant); + new_op->set_friendly_name(node->get_friendly_name() + "/scale/Divide"); + return new_op; + }, + false)); + } + + void add_mean_impl(const std::vector& values) { + m_actions.emplace_back(std::make_tuple( + [values](const std::shared_ptr& node) { + ngraph::Shape shape; + if (values.size() == 1) { + shape = ngraph::Shape{1}; + } else { + // TODO: implement when Layout API is available + } + auto constant = op::v0::Constant::create(element::f32, shape, values); + constant->set_friendly_name(node->get_friendly_name() + "/mean/Mean_Const"); + + auto new_op = std::make_shared(node, constant); + new_op->set_friendly_name(node->get_friendly_name() + "/mean/Subtract"); + return new_op; + }, + false)); + } + + void add_convert_impl(const element::Type& type) { + m_actions.emplace_back(std::make_tuple( + [type](const std::shared_ptr& node) { + if (node->get_element_type().is_dynamic()) { + throw ngraph::ngraph_error("Can't insert 'convert_element_type' for dynamic source tensor type."); + } + auto convert = std::make_shared(node, type); + convert->set_friendly_name(node->get_friendly_name() + "/convert_element_type"); + return convert; + }, + true)); + } + std::list> m_actions; +}; + +/// \brief InputInfoImpl - internal data structure +struct InputInfo::InputInfoImpl { + InputInfoImpl() = default; + explicit InputInfoImpl(size_t idx) : m_has_index(true), m_index(idx) {} + + bool has_index() const { + return m_has_index; + } + + void create_tensor_data(const element::Type& type) { + m_tensor_data = + std::unique_ptr(new InputTensorInfo::InputTensorInfoImpl(type)); + } + + bool m_has_index = false; + size_t m_index = 0; + std::unique_ptr m_tensor_data; + std::unique_ptr m_preprocess; +}; + +//-------------- InputInfo ------------------ +InputInfo::InputInfo() : m_impl(std::unique_ptr(new InputInfoImpl)) {} +InputInfo::InputInfo(size_t input_index) : m_impl(std::unique_ptr(new InputInfoImpl(input_index))) {} +InputInfo::InputInfo(InputInfo&&) noexcept = default; +InputInfo& InputInfo::operator=(InputInfo&&) noexcept = default; +InputInfo::~InputInfo() = default; + +InputInfo& InputInfo::tensor(InputTensorInfo&& builder) & { + m_impl->m_tensor_data = std::move(builder.m_impl); + return *this; +} + +InputInfo&& InputInfo::tensor(InputTensorInfo&& builder) && { + m_impl->m_tensor_data = std::move(builder.m_impl); + return std::move(*this); +} + +InputInfo&& InputInfo::preprocess(PreProcessSteps&& builder) && { + m_impl->m_preprocess = std::move(builder.m_impl); + return std::move(*this); +} + +InputInfo& InputInfo::preprocess(PreProcessSteps&& builder) & { + m_impl->m_preprocess = std::move(builder.m_impl); + return *this; +} + +// ------------------------ PrePostProcessor -------------------- +struct PrePostProcessor::PrePostProcessorImpl { +public: + std::list> in_contexts; +}; + +PrePostProcessor::PrePostProcessor() : m_impl(std::unique_ptr(new PrePostProcessorImpl())) {} +PrePostProcessor::PrePostProcessor(PrePostProcessor&&) noexcept = default; +PrePostProcessor& PrePostProcessor::operator=(PrePostProcessor&&) noexcept = default; +PrePostProcessor::~PrePostProcessor() = default; + +PrePostProcessor& PrePostProcessor::input(InputInfo&& builder) & { + m_impl->in_contexts.push_back(std::move(builder.m_impl)); + return *this; +} + +PrePostProcessor&& PrePostProcessor::input(InputInfo&& builder) && { + m_impl->in_contexts.push_back(std::move(builder.m_impl)); + return std::move(*this); +} + +std::shared_ptr PrePostProcessor::build(const std::shared_ptr& function) { + bool tensor_data_updated = false; + for (const auto& input : m_impl->in_contexts) { + std::shared_ptr param; + OPENVINO_ASSERT(input, "Internal error: Invalid preprocessing input, please report a problem"); + if (input->has_index()) { + param = function->get_parameters().at(input->m_index); + } else { + // Default case + OPENVINO_ASSERT(function->get_parameters().size() == 1, + std::string("Preprocessing info expects having 1 input, however function has ") + + std::to_string(function->get_parameters().size()) + + " inputs. Please use ov::preprocess::InputInfo constructor specifying " + "particular input instead of default one"); + param = function->get_parameters().front(); + } + auto consumers = param->output(0).get_target_inputs(); + if (!input->m_tensor_data) { + input->create_tensor_data(param->get_element_type()); + } + auto new_param_shape = param->get_partial_shape(); + auto new_param = std::make_shared(input->m_tensor_data->m_type, new_param_shape); + // Old param will be removed, so friendly name can be reused + new_param->set_friendly_name(param->get_friendly_name()); + std::shared_ptr node = new_param; + + // 2. Apply preprocessing + for (const auto& action : input->m_preprocess->m_actions) { + node = std::get<0>(action)(node); + tensor_data_updated |= std::get<1>(action); + } + + // Check final type + if (node->get_element_type() != param->get_element_type()) { + throw ngraph::ngraph_error( + std::string("Element type after preprocessing {") + node->get_element_type().c_type_string() + + std::string("} doesn't match with network element type {") + param->get_element_type().c_type_string() + + "}. Please add 'convert_element_type' explicitly"); + } + + // Replace parameter + for (auto consumer : consumers) { + consumer.replace_source_output(node); + } + if (input->has_index()) { + function->replace_parameter(input->m_index, new_param); + } else { + function->replace_parameter(0, new_param); + } + } + if (tensor_data_updated) { + function->validate_nodes_and_infer_types(); + } + return function; +} + +// --------------------- InputTensorInfo ------------------ +InputTensorInfo::InputTensorInfo() : m_impl(std::unique_ptr(new InputTensorInfoImpl())) {} +InputTensorInfo::InputTensorInfo(InputTensorInfo&&) noexcept = default; +InputTensorInfo& InputTensorInfo::operator=(InputTensorInfo&&) noexcept = default; +InputTensorInfo::~InputTensorInfo() = default; + +InputTensorInfo& InputTensorInfo::set_element_type(const element::Type& type) & { + m_impl->m_type = type; + return *this; +} + +InputTensorInfo&& InputTensorInfo::set_element_type(const element::Type& type) && { + m_impl->m_type = type; + return std::move(*this); +} + +// --------------------- PreProcessSteps ------------------ + +PreProcessSteps::PreProcessSteps() : m_impl(std::unique_ptr(new PreProcessStepsImpl())) {} +PreProcessSteps::PreProcessSteps(PreProcessSteps&&) noexcept = default; +PreProcessSteps& PreProcessSteps::operator=(PreProcessSteps&&) noexcept = default; +PreProcessSteps::~PreProcessSteps() = default; + +PreProcessSteps& PreProcessSteps::scale(float value) & { + m_impl->add_scale_impl(std::vector{value}); + return *this; +} + +PreProcessSteps&& PreProcessSteps::scale(float value) && { + m_impl->add_scale_impl(std::vector{value}); + return std::move(*this); +} + +PreProcessSteps& PreProcessSteps::mean(float value) & { + m_impl->add_mean_impl(std::vector{value}); + return *this; +} + +PreProcessSteps&& PreProcessSteps::mean(float value) && { + m_impl->add_mean_impl(std::vector{value}); + return std::move(*this); +} + +PreProcessSteps& PreProcessSteps::convert_element_type(const element::Type& type) & { + m_impl->add_convert_impl(type); + return *this; +} + +PreProcessSteps&& PreProcessSteps::convert_element_type(const element::Type& type) && { + m_impl->add_convert_impl(type); + return std::move(*this); +} + +PreProcessSteps& PreProcessSteps::custom(const CustomPreprocessOp& preprocess_cb) & { + // 'true' indicates that custom preprocessing step will trigger validate_and_infer_types + m_impl->m_actions.emplace_back(std::make_tuple(preprocess_cb, true)); + return *this; +} + +PreProcessSteps&& PreProcessSteps::custom(const CustomPreprocessOp& preprocess_cb) && { + // 'true' indicates that custom preprocessing step will trigger validate_and_infer_types + m_impl->m_actions.emplace_back(std::make_tuple(preprocess_cb, true)); + return std::move(*this); +} + +} // namespace preprocess +} // namespace ov diff --git a/ngraph/core/src/shape.cpp b/ngraph/core/src/shape.cpp index 3b7b1aab445aa5..40762deb899a1a 100644 --- a/ngraph/core/src/shape.cpp +++ b/ngraph/core/src/shape.cpp @@ -2,40 +2,383 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/shape.hpp" +#include "openvino/core/shape.hpp" -#include "ngraph/util.hpp" +#include +#include +#include -using namespace std; -using namespace ngraph; +#include "ngraph/check.hpp" -std::ostream& ngraph::operator<<(std::ostream& s, const Shape& shape) { - s << "Shape{"; - s << ngraph::join(shape); - s << "}"; - return s; +ov::Shape::Shape() : Shape(std::initializer_list{}) {} + +ov::Shape::Shape(std::initializer_list init) : Shape(true, init) {} + +ov::Shape::Shape(const std::vector& dimensions) + : m_rank_is_static(true), + m_dimensions(dimensions.begin(), dimensions.end()) {} + +ov::Shape::Shape(const StaticShape& shape) + : m_rank_is_static(true), + m_shape_type(ShapeType::SHAPE_IS_STATIC), + m_dimensions(shape.begin(), shape.end()) {} + +ov::Shape::Shape(bool rank_is_static, std::vector dimensions) + : m_rank_is_static(rank_is_static), + m_dimensions(std::move(dimensions)) {} + +ov::Shape::Shape(std::vector dimensions) : m_rank_is_static(true), m_dimensions(std::move(dimensions)) {} + +bool ov::Shape::is_static() const { + ShapeType shape_type = m_shape_type; + + if (m_shape_type == ShapeType::SHAPE_IS_UNKNOWN || m_shape_type == ShapeType::SHAPE_IS_UPDATED) { + shape_type = m_rank_is_static && std::all_of(m_dimensions.begin(), + m_dimensions.end(), + [](const Dimension& d) { + return d.is_static(); + }) + ? ShapeType::SHAPE_IS_STATIC + : ShapeType::SHAPE_IS_DYNAMIC; + + if (m_shape_type == ShapeType::SHAPE_IS_UNKNOWN) + m_shape_type = shape_type; + } + + return shape_type == ShapeType::SHAPE_IS_STATIC; +} + +bool ov::Shape::operator==(const Shape& partial_shape) const { + if (rank() != partial_shape.rank()) { + return false; + } + if (rank().is_dynamic()) { + return true; + } + for (auto i = 0; i < rank().get_length(); ++i) { + if (m_dimensions[i] != partial_shape.m_dimensions[i]) { + return false; + } + } + return true; +} + +bool ov::Shape::operator!=(const Shape& partial_shape) const { + return !(*this == partial_shape); +} + +ov::StaticShape ov::Shape::get_max_shape() const { + if (rank().is_dynamic()) { + return StaticShape(); + } else { + StaticShape shape; + for (auto dimension : m_dimensions) { + shape.push_back(dimension.get_interval().get_max_val()); + } + return shape; + } +} + +ov::StaticShape ov::Shape::get_min_shape() const { + if (rank().is_dynamic()) { + return StaticShape(); + } else { + StaticShape shape; + for (auto dimension : m_dimensions) { + shape.push_back(dimension.get_interval().get_min_val()); + } + return shape; + } +} + +ov::StaticShape ov::Shape::get_shape() const { + NGRAPH_CHECK(rank().is_static(), "get_shape() must be called on a static shape"); + StaticShape shape; + for (auto dimension : m_dimensions) { + auto min_val = dimension.get_interval().get_min_val(); + auto max_val = dimension.get_interval().get_max_val(); + NGRAPH_CHECK(min_val == max_val, "get_shape() must be called on a static shape"); + shape.push_back(min_val); + } + return shape; +} + +ov::Shape ov::operator+(const Shape& s1, const Shape& s2) { + if (s1.rank().is_dynamic() || s2.rank().is_dynamic()) { + return Shape::dynamic(); + } + + if (!s1.rank().compatible(s2.rank())) { + throw std::invalid_argument("rank mismatch"); + } + + Shape result{}; + result.m_rank_is_static = true; + for (size_t i = 0; i < s1.m_dimensions.size(); i++) { + result.m_dimensions.push_back(s1.m_dimensions[i] + s2.m_dimensions[i]); + } + return result; +} + +std::ostream& ov::operator<<(std::ostream& str, const Shape& shape) { + if (shape.m_rank_is_static) { + str << "{"; + bool first = true; + for (auto& d : shape.m_dimensions) { + if (!first) { + str << ","; + } + str << d; + first = false; + } + return (str << "}"); + } else { + return (str << "?"); + } +} + +ov::Shape ov::Shape::dynamic(Rank r) { + return Shape(r.is_static(), std::vector(r.is_static() ? r.get_length() : 0, Dimension::dynamic())); +} + +bool ov::Shape::compatible(const Shape& s) const { + // If we don't know *this's rank, or we don't know s's rank, they are compatible. + if (!m_rank_is_static || s.rank().is_dynamic()) { + return true; + } + // If we do know *this's rank and s's rank, and they are unequal, they are incompatible. + else if (rank().get_length() != s.rank().get_length()) { + return false; + } + // If we know both the ranks and they are equal, then *this and s are compatible iff they + // are elementwise compatible everywhere. + else { + for (int64_t i = 0; i < rank().get_length(); i++) { + if (!m_dimensions[i].compatible(s.m_dimensions[i])) { + return false; + } + } + // If we are still here, we know that s1 and s2 have the same rank and are elementwise + // compatible everywhere. + return true; + } } -ngraph::Shape::Shape() : std::vector() {} +bool ov::Shape::same_scheme(const Shape& s) const { + if (rank().is_dynamic() && s.rank().is_dynamic()) { + return true; + } else if (rank().is_static() && s.rank().is_static()) { + if (rank().get_length() != s.rank().get_length()) { + return false; + } -ngraph::Shape::Shape(const std::initializer_list& axis_lengths) : std::vector(axis_lengths) {} + bool success = true; -ngraph::Shape::Shape(const std::vector& axis_lengths) : std::vector(axis_lengths) {} + for (int64_t i = 0; i < rank().get_length(); i++) { + success &= (*this)[i].same_scheme(s[i]); + } -ngraph::Shape::Shape(const Shape& axis_lengths) : std::vector(axis_lengths) {} + return success; + } else { + return false; + } +} + +bool ov::Shape::relaxes(const Shape& s) const { + if (rank().is_dynamic()) { + return true; + } else if (s.rank().is_static() && rank().get_length() == s.rank().get_length()) { + bool all_relax = true; + + for (int64_t i = 0; i < rank().get_length(); i++) { + all_relax &= ((*this)[i].relaxes(s[i])); + } + + return all_relax; + } else { + return false; + } +} + +bool ov::Shape::refines(const Shape& s) const { + if (s.rank().is_dynamic()) { + return true; + } else if (rank().is_static() && rank().get_length() == s.rank().get_length()) { + bool all_refine = true; -ngraph::Shape::Shape(size_t n, size_t initial_value) : std::vector(n, initial_value) {} + for (int64_t i = 0; i < rank().get_length(); i++) { + all_refine &= ((*this)[i].refines(s[i])); + } + + return all_refine; + } else { + return false; + } +} -ngraph::Shape::~Shape() {} +bool ov::Shape::merge_rank(Rank r) { + if (r.is_dynamic()) { + return true; + } else if (!m_rank_is_static) { + m_rank_is_static = true; + m_dimensions = std::vector(r.get_length(), Dimension::dynamic()); + m_shape_type = ShapeType::SHAPE_IS_UNKNOWN; + return true; + } else { + return (static_cast(m_dimensions.size()) == r.get_length()); + } +} + +ov::StaticShape ov::Shape::to_shape() const { + if (is_dynamic()) { + throw std::invalid_argument("to_shape was called on a dynamic shape."); + } + + std::vector shape_dimensions(m_dimensions.size()); + std::transform(m_dimensions.begin(), m_dimensions.end(), shape_dimensions.begin(), [](const Dimension& d) { + return d.get_length(); + }); + + return shape_dimensions; +} + +bool ov::Shape::merge_into(Shape& dst, const Shape& src) { + if (dst.rank().is_dynamic()) { + dst = src; + return true; + } else if (src.rank().is_dynamic()) { + // No change to dst. + return true; + } else if (dst.rank().get_length() != src.rank().get_length()) { + // Mismatching static ranks, cannot merge. + return false; + } else { + // Ranks are both static, and they match. + bool success = true; + for (int64_t i = 0; i < dst.rank().get_length(); i++) { + success &= Dimension::merge(dst[i], dst[i], src[i]); + } + return success; + } +} + +bool ov::Shape::broadcast_merge_into(Shape& dst, const Shape& src, const ngraph::op::AutoBroadcastSpec& autob) { + switch (autob.m_type) { + case ngraph::op::AutoBroadcastType::NONE: + return true; + case ngraph::op::AutoBroadcastType::NUMPY: { + if (dst.rank().is_dynamic() || src.rank().is_dynamic()) { + dst = Shape::dynamic(); + return true; + } else { + // Ranks are both static. + auto dst_rank = dst.rank().get_length(); + auto src_rank = src.rank().get_length(); + auto new_rank = std::max(dst_rank, src_rank); + std::vector dims(new_rank); + bool success = true; + for (int64_t i = 0; i < new_rank; i++) { + auto dsti = i < (new_rank - dst_rank) ? Dimension(1) : dst[i - (new_rank - dst_rank)]; + auto srci = i < (new_rank - src_rank) ? Dimension(1) : src[i - (new_rank - src_rank)]; + success &= Dimension::broadcast_merge(dims[i], dsti, srci); + } + dst = Shape(std::move(dims)); + return success; + } + } + case ngraph::op::AutoBroadcastType::PDPD: { + if (dst.rank().is_dynamic() || src.rank().is_dynamic()) { + return true; + } else { + // Ranks are both static. + auto dst_rank = dst.rank().get_length(); + auto src_rank = src.rank().get_length(); + if (dst_rank == src_rank && dst.compatible(src)) + return true; + + int64_t axis = autob.m_axis; + if (axis < -1) { + return false; + } + if (axis == -1) { + axis = dst_rank - src_rank; + } + + size_t len = src_rank; + while (len > 0 && src[len - 1].is_static() && src[len - 1].get_length() == 1) { + --len; + } + + for (size_t i = axis; i < axis + len; ++i) { + if (!(dst[i].compatible(src[i - axis]))) { + return false; + } + } + + return true; + } + } + default: + NGRAPH_CHECK(false, "Unsupported auto broadcast type: ", autob.m_type); + } + + return false; +} + +bool ov::Shape::all_non_negative() const { + for (auto& d : m_dimensions) { + if (d.is_static() && d.get_length() < 0) { + return false; + } + } + + return true; +} + +const ov::Dimension& ov::Shape::operator[](size_t i) const { + if (i >= m_dimensions.size()) { + throw std::out_of_range("Accessing out-of-range dimension in Dimension[]"); + } + return m_dimensions[i]; +} + +ov::Dimension& ov::Shape::operator[](size_t i) { + if (i >= m_dimensions.size()) { + throw std::out_of_range("Accessing out-of-range dimension in Dimension[]"); + } + m_shape_type = ShapeType::SHAPE_IS_UPDATED; // We can't guarantee that the shape remains static or dynamic. + return m_dimensions[i]; +} -ngraph::Shape& ngraph::Shape::operator=(const Shape& v) { - static_cast*>(this)->operator=(v); - return *this; +const std::vector& ov::AttributeAdapter::get() { + if (!m_buffer_valid) { + m_buffer.clear(); + if (m_ref.rank().is_dynamic()) { + m_buffer.push_back(-2); + } else { + for (int64_t i = 0; i < m_ref.rank().get_length(); ++i) { + const auto& elt = static_cast(m_ref)[i]; + m_buffer.push_back(elt.is_dynamic() ? -1 : elt.get_length()); + } + } + m_buffer_valid = true; + } + return m_buffer; } -ngraph::Shape& ngraph::Shape::operator=(Shape&& v) noexcept { - static_cast*>(this)->operator=(std::move(v)); - return *this; +void ov::AttributeAdapter::set(const std::vector& value) { + m_ref = ov::Shape(); + if (value.size() == 1 && value[0] == -2) { + m_ref = ov::Shape::dynamic(); + } else { + std::vector dims; + dims.reserve(value.size()); + for (auto elt : value) { + dims.push_back(elt == -1 ? Dimension::dynamic() : elt); + } + m_ref = ov::Shape(dims); + } + m_buffer_valid = false; } -constexpr DiscreteTypeInfo ov::AttributeAdapter::type_info; +OPENVINO_API constexpr ov::DiscreteTypeInfo ov::AttributeAdapter::type_info; diff --git a/ngraph/core/src/static_shape.cpp b/ngraph/core/src/static_shape.cpp new file mode 100644 index 00000000000000..bfa0cf683dc877 --- /dev/null +++ b/ngraph/core/src/static_shape.cpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ngraph/shape.hpp" +#include "ngraph/util.hpp" + +using namespace std; + +std::ostream& ov::operator<<(std::ostream& s, const StaticShape& shape) { + s << "{"; + s << ngraph::join(shape); + s << "}"; + return s; +} + +ov::StaticShape::StaticShape() : std::vector() {} + +ov::StaticShape::StaticShape(const std::initializer_list& axis_lengths) : std::vector(axis_lengths) {} + +ov::StaticShape::StaticShape(const std::vector& axis_lengths) : std::vector(axis_lengths) {} + +ov::StaticShape::StaticShape(const StaticShape& axis_lengths) = default; + +ov::StaticShape::StaticShape(size_t n, size_t initial_value) : std::vector(n, initial_value) {} + +ov::StaticShape::~StaticShape() = default; + +ov::StaticShape& ov::StaticShape::operator=(const StaticShape& v) { + static_cast*>(this)->operator=(v); + return *this; +} + +ov::StaticShape& ov::StaticShape::operator=(StaticShape&& v) noexcept { + static_cast*>(this)->operator=(std::move(v)); + return *this; +} + +constexpr ov::DiscreteTypeInfo ov::AttributeAdapter::type_info; diff --git a/ngraph/python/src/ngraph/__init__.py b/ngraph/python/src/ngraph/__init__.py index 950a05e4edec41..dc6644b9969ea3 100644 --- a/ngraph/python/src/ngraph/__init__.py +++ b/ngraph/python/src/ngraph/__init__.py @@ -27,6 +27,7 @@ from ngraph.frontend import Place from ngraph.helpers import function_from_cnn from ngraph.helpers import function_to_cnn +from ngraph.helpers import partial_shape_from_data from ngraph.opset8 import absolute from ngraph.opset8 import absolute as abs from ngraph.opset8 import acos diff --git a/ngraph/python/src/ngraph/helpers.py b/ngraph/python/src/ngraph/helpers.py index d69a3b86529e32..7869affaba3e16 100644 --- a/ngraph/python/src/ngraph/helpers.py +++ b/ngraph/python/src/ngraph/helpers.py @@ -3,8 +3,10 @@ """nGraph helper functions.""" -from ngraph.impl import Function -from openvino.inference_engine import IENetwork +from typing import Union + +from ngraph.impl import Function, PartialShape +from openvino.inference_engine import IENetwork, DataPtr, CDataPtr def function_from_cnn(cnn_network: IENetwork) -> Function: @@ -18,3 +20,9 @@ def function_to_cnn(ng_function: Function) -> Function: """Get Inference Engine CNN network from nGraph function.""" capsule = Function.to_capsule(ng_function) return IENetwork(capsule) + + +def partial_shape_from_data(data: Union[DataPtr, CDataPtr]) -> PartialShape: + """Get nGraph PartialShape from Inference Engine Data.""" + capsule = data._get_partial_shape_capsule() + return PartialShape.from_capsule(capsule) diff --git a/ngraph/python/src/pyngraph/partial_shape.cpp b/ngraph/python/src/pyngraph/partial_shape.cpp index e63abbb15b801e..80004760dc0c02 100644 --- a/ngraph/python/src/pyngraph/partial_shape.cpp +++ b/ngraph/python/src/pyngraph/partial_shape.cpp @@ -17,6 +17,8 @@ namespace py = pybind11; +static const char* CAPSULE_NAME = "ngraph_partial_shape"; + void regclass_pyngraph_PartialShape(py::module m) { py::class_> shape(m, "PartialShape"); shape.doc() = "ngraph.impl.PartialShape wraps ngraph::PartialShape"; @@ -199,4 +201,18 @@ void regclass_pyngraph_PartialShape(py::module m) { shape.def("__repr__", [](const ngraph::PartialShape& self) -> std::string { return "() + ">"; }); + + shape.def_static("from_capsule", [](py::object* capsule) { + // get the underlying PyObject* which is a PyCapsule pointer + auto* pybind_capsule_ptr = capsule->ptr(); + // extract the pointer stored in the PyCapsule under the name CAPSULE_NAME + auto* capsule_ptr = PyCapsule_GetPointer(pybind_capsule_ptr, CAPSULE_NAME); + + auto* ngraph_pShape = static_cast*>(capsule_ptr); + if (ngraph_pShape && *ngraph_pShape) { + return *ngraph_pShape; + } else { + throw std::runtime_error("The provided capsule does not contain an ngraph::PartialShape"); + } + }); } diff --git a/ngraph/python/src/pyngraph/shape.cpp b/ngraph/python/src/pyngraph/shape.cpp index a0938fa88ca8c9..424d0abddca8a4 100644 --- a/ngraph/python/src/pyngraph/shape.cpp +++ b/ngraph/python/src/pyngraph/shape.cpp @@ -42,6 +42,6 @@ void regclass_pyngraph_Shape(py::module m) { }); shape.def("__repr__", [](const ngraph::Shape& self) -> std::string { - return "<" + py::cast(self).attr("__str__")().cast() + ">"; + return "() + ">"; }); } diff --git a/ngraph/python/tests/test_ngraph/test_core.py b/ngraph/python/tests/test_ngraph/test_core.py index 620341ff4f82a0..87974c6e527d2d 100644 --- a/ngraph/python/tests/test_ngraph/test_core.py +++ b/ngraph/python/tests/test_ngraph/test_core.py @@ -98,7 +98,7 @@ def test_partial_shape(): assert list(ps.get_max_shape()) == [1, 2, 3] assert list(ps.get_min_shape()) == [1, 2, 3] assert list(ps.to_shape()) == [1, 2, 3] - assert repr(shape) == "" + assert repr(shape) == "" assert repr(ps) == "" ps = PartialShape([Dimension(1), Dimension(2), Dimension(3), Dimension.dynamic()]) diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 1ac9ee9fcbad84..c682537d532872 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -49,6 +49,7 @@ set(SRC misc.cpp node_input_output.cpp op.cpp + opset.cpp op_eval/binary_convolution.cpp op_eval/bucketize.cpp op_eval/clamp.cpp @@ -76,6 +77,7 @@ set(SRC pass_config.cpp pass_manager.cpp pattern.cpp + preprocess.cpp provenance.cpp replace_node.cpp reshape_opt_kernel.cpp @@ -416,7 +418,6 @@ set(MULTI_TEST_SRC backend/bucketize.in.cpp backend/builder_reduce_ops_opset1.in.cpp backend/ceiling.in.cpp - backend/comparison.in.cpp backend/concat.in.cpp backend/constant.in.cpp backend/convolution_backprop.in.cpp diff --git a/ngraph/test/backend/comparison.in.cpp b/ngraph/test/backend/comparison.in.cpp deleted file mode 100644 index dbe1247b89668c..00000000000000 --- a/ngraph/test/backend/comparison.in.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include -#include -#include - -#include "gtest/gtest.h" -#include "ngraph/log.hpp" -#include "ngraph/ngraph.hpp" -#include "ngraph/runtime/tensor.hpp" -#include "runtime/backend.hpp" -#include "util/all_close.hpp" -#include "util/all_close_f.hpp" -#include "util/ndarray.hpp" -#include "util/random.hpp" -#include "util/test_control.hpp" -#include "util/test_tools.hpp" - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; - -NGRAPH_TEST(${BACKEND_NAME}, notequal) { - Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{1, 8, -8, 17, -0.5, 0, 1, 1}); - auto b = backend->create_tensor(element::f32, shape); - copy_data(b, vector{1, 8, 4, 8, 0, 0, 1, 1.5}); - auto result = backend->create_tensor(element::boolean, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a, b}); - EXPECT_EQ((vector{0, 0, 1, 1, 1, 0, 0, 1}), read_vector(result)); -} diff --git a/ngraph/test/backend/zero_sized.in.cpp b/ngraph/test/backend/zero_sized.in.cpp index 29846308d63c1b..ba8bdf53dbf57b 100644 --- a/ngraph/test/backend/zero_sized.in.cpp +++ b/ngraph/test/backend/zero_sized.in.cpp @@ -227,10 +227,6 @@ NGRAPH_TEST(${BACKEND_NAME}, zero_sized_multiply) { make_binary_empty_test("${BACKEND_NAME}"); } -NGRAPH_TEST(${BACKEND_NAME}, zero_sized_not_equal) { - make_binary_empty_test("${BACKEND_NAME}", true); -} - NGRAPH_TEST(${BACKEND_NAME}, zero_sized_power) { make_binary_empty_test("${BACKEND_NAME}"); } diff --git a/ngraph/test/check.cpp b/ngraph/test/check.cpp index 72e04f3834abc3..485686688e94b2 100644 --- a/ngraph/test/check.cpp +++ b/ngraph/test/check.cpp @@ -6,39 +6,78 @@ #include -using namespace ngraph; +#include "openvino/core/except.hpp" + using namespace std; TEST(check, check_true_string_info) { - NGRAPH_CHECK(true, "this should not throw"); + OPENVINO_ASSERT(true, "this should not throw"); } TEST(check, check_true_non_string_info) { - NGRAPH_CHECK(true, "this should not throw", 123); + OPENVINO_ASSERT(true, "this should not throw", 123); } TEST(check, check_true_no_info) { - NGRAPH_CHECK(true); + OPENVINO_ASSERT(true); } TEST(check, check_false_string_info) { - EXPECT_THROW({ NGRAPH_CHECK(false, "this should throw"); }, CheckFailure); + EXPECT_THROW({ OPENVINO_ASSERT(false, "this should throw"); }, ov::AssertFailure); } TEST(check, check_false_non_string_info) { - EXPECT_THROW({ NGRAPH_CHECK(false, "this should throw", 123); }, CheckFailure); + EXPECT_THROW({ OPENVINO_ASSERT(false, "this should throw", 123); }, ov::AssertFailure); } TEST(check, check_false_no_info) { - EXPECT_THROW({ NGRAPH_CHECK(false); }, CheckFailure); + EXPECT_THROW({ OPENVINO_ASSERT(false); }, ov::AssertFailure); } TEST(check, check_with_explanation) { bool check_failure_thrown = false; + try { + OPENVINO_ASSERT(false, "xyzzyxyzzy", 123); + } catch (const ov::AssertFailure& e) { + check_failure_thrown = true; + EXPECT_PRED_FORMAT2(testing::IsSubstring, "Check 'false' failed at", e.what()); + EXPECT_PRED_FORMAT2(testing::IsSubstring, "xyzzyxyzzy123", e.what()); + } + + EXPECT_TRUE(check_failure_thrown); +} + +TEST(check, ngraph_check_true_string_info) { + NGRAPH_CHECK(true, "this should not throw"); +} + +TEST(check, ngraph_check_true_non_string_info) { + NGRAPH_CHECK(true, "this should not throw", 123); +} + +TEST(check, ngraph_check_true_no_info) { + NGRAPH_CHECK(true); +} + +TEST(check, ngraph_check_false_string_info) { + EXPECT_THROW({ NGRAPH_CHECK(false, "this should throw"); }, ngraph::CheckFailure); +} + +TEST(check, ngraph_check_false_non_string_info) { + EXPECT_THROW({ NGRAPH_CHECK(false, "this should throw", 123); }, ngraph::CheckFailure); +} + +TEST(check, ngraph_check_false_no_info) { + EXPECT_THROW({ NGRAPH_CHECK(false); }, ngraph::CheckFailure); +} + +TEST(check, ngraph_check_with_explanation) { + bool check_failure_thrown = false; + try { NGRAPH_CHECK(false, "xyzzyxyzzy", 123); - } catch (const CheckFailure& e) { + } catch (const ngraph::CheckFailure& e) { check_failure_thrown = true; EXPECT_PRED_FORMAT2(testing::IsSubstring, "Check 'false' failed at", e.what()); EXPECT_PRED_FORMAT2(testing::IsSubstring, "xyzzyxyzzy123", e.what()); diff --git a/ngraph/test/opset.cpp b/ngraph/test/opset.cpp new file mode 100644 index 00000000000000..15392615b3d3a1 --- /dev/null +++ b/ngraph/test/opset.cpp @@ -0,0 +1,144 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/opsets/opset.hpp" + +#include + +#include "openvino/opsets/opset1.hpp" +#include "openvino/opsets/opset2.hpp" +#include "openvino/opsets/opset3.hpp" +#include "openvino/opsets/opset4.hpp" +#include "openvino/opsets/opset5.hpp" +#include "openvino/opsets/opset6.hpp" +#include "openvino/opsets/opset7.hpp" +#include "openvino/opsets/opset8.hpp" + +TEST(opset, opset1) { + auto op = std::make_shared(); + ASSERT_NE(nullptr, op); + EXPECT_TRUE(ov::op::util::is_parameter(op)); +} + +TEST(opset, opset1_dump) { + const auto& opset = ov::get_opset1(); + std::cout << "All opset1 operations: "; + for (const auto& t : opset.get_types_info()) { + std::cout << t.name << " "; + } + std::cout << std::endl; + ASSERT_EQ(110, opset.get_types_info().size()); +} + +TEST(opset, opset2) { + auto op = std::make_shared(); + ASSERT_NE(nullptr, op); + EXPECT_TRUE(ov::op::util::is_parameter(op)); +} + +TEST(opset, opset2_dump) { + const auto& opset = ov::get_opset2(); + std::cout << "All opset2 operations: "; + for (const auto& t : opset.get_types_info()) { + std::cout << t.name << " "; + } + std::cout << std::endl; + ASSERT_EQ(112, opset.get_types_info().size()); +} + +TEST(opset, opset3) { + auto op = std::make_shared(); + ASSERT_NE(nullptr, op); + EXPECT_TRUE(ov::op::util::is_parameter(op)); +} + +TEST(opset, opset3_dump) { + const auto& opset = ov::get_opset3(); + std::cout << "All opset3 operations: "; + for (const auto& t : opset.get_types_info()) { + std::cout << t.name << " "; + } + std::cout << std::endl; + ASSERT_EQ(127, opset.get_types_info().size()); +} + +TEST(opset, opset4) { + auto op = std::make_shared(); + ASSERT_NE(nullptr, op); + EXPECT_TRUE(ov::op::util::is_parameter(op)); +} + +TEST(opset, opset4_dump) { + const auto& opset = ov::get_opset4(); + std::cout << "All opset4 operations: "; + for (const auto& t : opset.get_types_info()) { + std::cout << t.name << " "; + } + std::cout << std::endl; + ASSERT_EQ(137, opset.get_types_info().size()); +} + +TEST(opset, opset5) { + auto op = std::make_shared(); + ASSERT_NE(nullptr, op); + EXPECT_TRUE(ov::op::util::is_parameter(op)); +} + +TEST(opset, opset5_dump) { + const auto& opset = ov::get_opset5(); + std::cout << "All opset5 operations: "; + for (const auto& t : opset.get_types_info()) { + std::cout << t.name << " "; + } + std::cout << std::endl; + ASSERT_EQ(145, opset.get_types_info().size()); +} + +TEST(opset, opset6) { + auto op = std::make_shared(); + ASSERT_NE(nullptr, op); + EXPECT_TRUE(ov::op::util::is_parameter(op)); +} + +TEST(opset, opset6_dump) { + const auto& opset = ov::get_opset6(); + std::cout << "All opset6 operations: "; + for (const auto& t : opset.get_types_info()) { + std::cout << t.name << " "; + } + std::cout << std::endl; + ASSERT_EQ(152, opset.get_types_info().size()); +} + +TEST(opset, opset7) { + auto op = std::make_shared(); + ASSERT_NE(nullptr, op); + EXPECT_TRUE(ov::op::util::is_parameter(op)); +} + +TEST(opset, opset7_dump) { + const auto& opset = ov::get_opset7(); + std::cout << "All opset7 operations: "; + for (const auto& t : opset.get_types_info()) { + std::cout << t.name << " "; + } + std::cout << std::endl; + ASSERT_EQ(156, opset.get_types_info().size()); +} + +TEST(opset, opset8) { + auto op = std::make_shared(); + ASSERT_NE(nullptr, op); + EXPECT_TRUE(ov::op::util::is_parameter(op)); +} + +TEST(opset, opset8_dump) { + const auto& opset = ov::get_opset8(); + std::cout << "All opset8 operations: "; + for (const auto& t : opset.get_types_info()) { + std::cout << t.name << " "; + } + std::cout << std::endl; + ASSERT_EQ(162, opset.get_types_info().size()); +} diff --git a/ngraph/test/preprocess.cpp b/ngraph/test/preprocess.cpp new file mode 100644 index 00000000000000..2f64ab2c18e830 --- /dev/null +++ b/ngraph/test/preprocess.cpp @@ -0,0 +1,205 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "ngraph/ops.hpp" +#include "openvino/core/preprocess/pre_post_process.hpp" +#include "util/all_close.hpp" +#include "util/all_close_f.hpp" +#include "util/test_tools.hpp" + +using namespace ov; +using namespace ov::preprocess; +using namespace ngraph::test; + +static std::shared_ptr create_simple_function(element::Type type, const Shape& shape) { + auto data1 = std::make_shared(type, shape); + data1->set_friendly_name("input1"); + auto res = std::make_shared(data1); + res->set_friendly_name("Result"); + return std::make_shared(ResultVector{res}, ParameterVector{data1}); +} + +static std::shared_ptr create_2inputs(element::Type type, const Shape& shape) { + auto data1 = std::make_shared(type, shape); + data1->set_friendly_name("input1"); + auto data2 = std::make_shared(type, shape); + data2->set_friendly_name("input2"); + auto res1 = std::make_shared(data1); + res1->set_friendly_name("Result"); + auto res2 = std::make_shared(data2); + res2->set_friendly_name("Result"); + return std::make_shared(ResultVector{res1, res2}, ParameterVector{data1, data2}); +} + +TEST(pre_post_process, simple_mean_scale) { + auto f = create_simple_function(element::f32, StaticShape{1, 3, 2, 2}); + f = PrePostProcessor().input(InputInfo().preprocess(PreProcessSteps().mean(1.f).scale(2.f))).build(f); + + auto result = std::make_shared(); + f->evaluate({result}, + {make_host_tensor(StaticShape{1, 3, 2, 2}, + {1., 3., 5., 7., 9., 11., 13., 15., 17., 19., 21., 23.})}); + auto result_val = read_vector(result); + EXPECT_TRUE(all_close_f(std::vector{0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.}, result_val)); +} + +TEST(pre_post_process, scale_then_mean) { + auto f = create_simple_function(element::f32, StaticShape{1, 3, 2, 2}); + f = PrePostProcessor().input(InputInfo().preprocess(PreProcessSteps().scale(2.0f).mean(2.0f))).build(f); + + auto result = std::make_shared(); + f->evaluate({result}, + {make_host_tensor(StaticShape{1, 3, 2, 2}, + {2., 4., 6., 8., 10., 12., 14., 16., 18., 20., 100., 200.})}); + auto result_val = read_vector(result); + EXPECT_TRUE(all_close_f(std::vector{-1., 0, 1., 2., 3., 4., 5., 6., 7., 8., 48., 98.}, result_val)); +} + +TEST(pre_post_process, convert_element_type_and_scale) { + auto f = create_simple_function(element::i8, StaticShape{1, 3, 2, 2}); + f = PrePostProcessor() + .input(InputInfo() + .tensor(InputTensorInfo().set_element_type(element::i16)) + .preprocess(PreProcessSteps() + .convert_element_type(element::f32) + .scale(2.f) + .convert_element_type(element::i8))) + .build(f); + + auto result = std::make_shared(); + f->evaluate( + {result}, + {make_host_tensor(StaticShape{1, 3, 2, 2}, {2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 10000, 200})}); + auto result_val = read_vector(result); + EXPECT_TRUE(all_close(std::vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, (int8_t)5000, 100}, result_val)); + EXPECT_EQ(f->get_parameters().front()->get_element_type(), element::i16); + + ASSERT_EQ(f->get_output_element_type(0), element::i8); +} + +TEST(pre_post_process, convert_element_type_from_unknown) { + auto f = create_simple_function(element::i32, StaticShape{1, 3, 224, 224}); + ASSERT_ANY_THROW( + f = PrePostProcessor() + .input(InputInfo().preprocess( + PreProcessSteps().convert_element_type(element::dynamic).convert_element_type(element::i32))) + .build(f)); +} + +TEST(pre_post_process, convert_element_type_no_match) { + auto f = create_simple_function(element::i32, StaticShape{1, 3, 224, 224}); + ASSERT_ANY_THROW(f = PrePostProcessor() + .input(InputInfo() + .tensor(InputTensorInfo().set_element_type(element::i32)) + .preprocess(PreProcessSteps().convert_element_type(element::f32).scale(2.0f))) + .build(f)); +} + +TEST(pre_post_process, scale_not_float) { + auto f = create_simple_function(element::i32, StaticShape{1, 3, 224, 224}); + ASSERT_ANY_THROW( + f = PrePostProcessor() + .input(InputInfo().preprocess(PreProcessSteps().convert_element_type(element::f32).scale(2.0f))) + .build(f)); +} + +TEST(pre_post_process, mean_not_float) { + auto f = create_simple_function(element::i32, StaticShape{1, 3, 224, 224}); + ASSERT_ANY_THROW( + f = PrePostProcessor() + .input(InputInfo().preprocess(PreProcessSteps().convert_element_type(element::f32).mean(2.0f))) + .build(f)); +} + +TEST(pre_post_process, tensor_element_type_and_scale) { + auto f = create_simple_function(element::i8, StaticShape{1, 3, 1, 1}); + f = PrePostProcessor() + .input(InputInfo() + .tensor(InputTensorInfo().set_element_type(element::f32)) + .preprocess(PreProcessSteps().scale(2.0f).convert_element_type(element::i8))) + .build(f); + + auto result = std::make_shared(); + f->evaluate({result}, {make_host_tensor(StaticShape{1, 3, 1, 1}, {2., 4., 6.})}); + auto result_val = read_vector(result); + EXPECT_TRUE(all_close(std::vector{1, 2, 3}, result_val)); + EXPECT_EQ(f->get_parameters().front()->get_element_type(), element::f32); + + ASSERT_EQ(f->get_output_element_type(0), element::i8); +} + +TEST(pre_post_process, custom_preprocessing) { + auto f = create_simple_function(element::i32, StaticShape{1, 3, 1, 1}); + f = PrePostProcessor() + .input(InputInfo().preprocess(PreProcessSteps().custom([](const std::shared_ptr& node) { + auto abs = std::make_shared(node); + abs->set_friendly_name(node->get_friendly_name() + "/abs"); + return abs; + }))) + .build(f); + + auto result = std::make_shared(); + f->evaluate({result}, {make_host_tensor(StaticShape{1, 3, 1, 1}, {0, 4, -6})}); + auto result_val = read_vector(result); + EXPECT_TRUE(all_close(std::vector{0, 4, 6}, result_val)); +} + +TEST(pre_post_process, test_lvalue) { + auto f = create_simple_function(element::i8, StaticShape{1, 3, 1, 1}); + auto p = PrePostProcessor(); + auto p1 = std::move(p); + p = std::move(p1); + auto inputInfo = InputInfo(); + auto inputInfo2 = std::move(inputInfo); + inputInfo = std::move(inputInfo2); + { + auto inputTensorInfo = InputTensorInfo(); + auto inputTensorInfo2 = std::move(inputTensorInfo); + inputTensorInfo = std::move(inputTensorInfo2); + auto& same = inputTensorInfo.set_element_type(element::f32); + inputInfo.tensor(std::move(same)); + } + { + auto preprocessSteps = PreProcessSteps(); + auto preprocessSteps2 = std::move(preprocessSteps); + preprocessSteps = std::move(preprocessSteps2); + preprocessSteps.mean(1.f); + preprocessSteps.scale(2.f); + preprocessSteps.custom([](const std::shared_ptr& node) { + auto abs = std::make_shared(node); + abs->set_friendly_name(node->get_friendly_name() + "/abs"); + return abs; + }); + auto& same = preprocessSteps.convert_element_type(element::i8); + inputInfo.preprocess(std::move(same)); + } + p.input(std::move(inputInfo)); + f = p.build(f); + + auto result = std::make_shared(); + f->evaluate({result}, {make_host_tensor(StaticShape{1, 3, 1, 1}, {-3., 5., 7.})}); + auto result_val = read_vector(result); + EXPECT_TRUE(all_close(std::vector{2, 2, 3}, result_val)); + EXPECT_EQ(f->get_parameters().front()->get_element_type(), element::f32); + + ASSERT_EQ(f->get_output_element_type(0), element::i8); +} + +TEST(pre_post_process, test_2_inputs_basic) { + auto f = create_2inputs(element::f32, StaticShape{1, 3, 1, 1}); + { f = PrePostProcessor().input(InputInfo(1).preprocess(PreProcessSteps().mean(1.f).scale(2.0f))).build(f); } + auto result1 = std::make_shared(); + auto result2 = std::make_shared(); + auto input1 = make_host_tensor(StaticShape{1, 3, 1, 1}, {3., 5., 7.}); + auto input2 = make_host_tensor(StaticShape{1, 3, 1, 1}, {3., 5., 7.}); + f->evaluate({result1, result2}, {input1, input2}); + + auto result1_val = read_vector(result1); + EXPECT_TRUE(all_close_f(std::vector{3, 5, 7}, result1_val)); + + auto result2_val = read_vector(result2); + EXPECT_TRUE(all_close_f(std::vector{1, 2, 3}, result2_val)); +} diff --git a/ngraph/test/runtime/ie/unit_test.manifest b/ngraph/test/runtime/ie/unit_test.manifest index 8c7fad49569dda..8ac6a7d0564ff8 100644 --- a/ngraph/test/runtime/ie/unit_test.manifest +++ b/ngraph/test/runtime/ie/unit_test.manifest @@ -290,7 +290,6 @@ zero_sized_lesseq zero_sized_maximum zero_sized_minimum zero_sized_multiply -zero_sized_not_equal zero_sized_power zero_sized_subtract sum_trivial diff --git a/ngraph/test/type_prop/broadcast.cpp b/ngraph/test/type_prop/broadcast.cpp index 2d76c8df845732..094377bb5e2305 100644 --- a/ngraph/test/type_prop/broadcast.cpp +++ b/ngraph/test/type_prop/broadcast.cpp @@ -85,8 +85,7 @@ TYPED_TEST_P(BroadcastTests, broadcast_fail_rank) { auto bc = make_shared(param, target_shape, axes_mapping); FAIL() << "Broadcast: target shape mismatch with input rank not detected"; } catch (const NodeValidationFailure& error) { - EXPECT_HAS_SUBSTRING(error.what(), - "Broadcast axes_mapping shape Shape{3} doesn't match rank of input tensor 2"); + EXPECT_HAS_SUBSTRING(error.what(), "Broadcast axes_mapping shape {3} doesn't match rank of input tensor 2"); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; } diff --git a/ngraph/test/type_prop/constant.cpp b/ngraph/test/type_prop/constant.cpp index facc9d1b57eb18..115f6584078195 100644 --- a/ngraph/test/type_prop/constant.cpp +++ b/ngraph/test/type_prop/constant.cpp @@ -41,7 +41,7 @@ TEST(type_prop, tensor_constant_bad_count) { } catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Did not get the expected number of literals for a " - "constant of shape Shape{2, 2} (got 3, expected 1 or 4)")); + "constant of shape {2, 2} (got 3, expected 1 or 4)")); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; } diff --git a/ngraph/test/util.cpp b/ngraph/test/util.cpp index 83be8232df94b2..e94c2dacd85976 100644 --- a/ngraph/test/util.cpp +++ b/ngraph/test/util.cpp @@ -13,6 +13,7 @@ #include "ngraph/graph_util.hpp" #include "ngraph/ngraph.hpp" #include "ngraph/op/util/op_annotations.hpp" +#include "ngraph/opsets/opset3.hpp" #include "ngraph/opsets/opset6.hpp" #include "ngraph/opsets/opset8.hpp" #include "ngraph/pass/manager.hpp" @@ -236,7 +237,7 @@ TEST(graph_util, clone_multiple_results) { auto copy = clone_function(*f); } -TEST(graph_util, clone_function_variables) { +TEST(graph_util, clone_function_variables_dynamic) { auto c_fp16 = make_shared(element::f16, Shape{3}, std::vector{0}); auto variable = make_shared(VariableInfo{PartialShape::dynamic(), element::dynamic, "var_1"}); auto read_value = make_shared(c_fp16, variable); @@ -253,6 +254,19 @@ TEST(graph_util, clone_function_variables) { copy = clone_function(*f); } +TEST(graph_util, clone_function_variables_validate_partially) { + auto c_fp16 = make_shared(element::f16, Shape{3}, std::vector{0}); + + auto read_value = make_shared(c_fp16, "var_1"); + auto assign = make_shared(read_value, "var_1"); + auto res = make_shared(read_value); + auto f = make_shared(ResultVector{res}, SinkVector{assign}, ParameterVector{}); + f->validate_nodes_and_infer_types(); + NodeMap nm; + auto copy = clone_function(*f, nm); + nm[assign.get()]->validate_and_infer_types(); +} + TEST(graph_util, clone_rt_info) { const std::string testAffinity = "CPU"; std::shared_ptr original_f; diff --git a/thirdparty/cnpy/README.md b/thirdparty/cnpy/README.md index 37c4a4340c8e4e..4f0f42ad0fc8ca 100644 --- a/thirdparty/cnpy/README.md +++ b/thirdparty/cnpy/README.md @@ -51,5 +51,3 @@ struct NpyArray { template T* data(); }; ``` - -See [example1.cpp](example1.cpp) for examples of how to use the library. example1 will also be build during cmake installation.