diff --git a/docs/template_plugin/tests/functional/op_reference/convert_color_nv12.cpp b/docs/template_plugin/tests/functional/op_reference/convert_color_nv12.cpp new file mode 100644 index 00000000000000..2d0ab7b88bcaf8 --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/convert_color_nv12.cpp @@ -0,0 +1,142 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include + +#include "base_reference_test.hpp" + +using namespace ov; +using namespace InferenceEngine; +using namespace reference_tests; + +class ReferenceConvertColorNV12LayerTest : public testing::Test, public CommonReferenceTest { +public: + void SetUp() override { + } + +public: + template + static std::shared_ptr CreateFunction(const Tensor& input) { + const auto in = std::make_shared(input.type, input.shape); + std::shared_ptr conv; + conv = std::make_shared(in); + auto res = std::make_shared(conv); + return std::make_shared(ResultVector{res}, ParameterVector {in}); + } + + template + static std::shared_ptr CreateFunction2(const Tensor& input1, const Tensor& input2) { + const auto in1 = std::make_shared(input1.type, input1.shape); + const auto in2 = std::make_shared(input2.type, input2.shape); + std::shared_ptr conv; + conv = std::make_shared(in1, in2); + auto res = std::make_shared(conv); + return std::make_shared(ResultVector{res}, ParameterVector {in1, in2}); + } +}; + +TEST_F(ReferenceConvertColorNV12LayerTest, CompareWithHardcodedRefs_r_u8_single_rgb) { + auto input = std::vector {0x51, 0x51, 0x51, 0x51, 0xf0, 0x5a}; + auto input_shape = Shape{1, 3, 2, 1}; + auto exp_out = std::vector {0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0}; + auto out_shape = Shape{1, 2, 2, 3}; + Tensor inp_tensor(input_shape, element::u8, input); + inputData = {inp_tensor.data}; + function = CreateFunction(inp_tensor); + Tensor exp_tensor_u8(out_shape, element::u8, exp_out); + refOutData = {exp_tensor_u8.data}; + Exec(); +} + +TEST_F(ReferenceConvertColorNV12LayerTest, CompareWithHardcodedRefs_color_u8_single_bgr) { + auto input = std::vector {0xeb, 0x51, 0xeb, 0x51, 0xb8, 0x6d}; + auto input_shape = Shape{1, 3, 2, 1}; + auto exp_out = std::vector {37, 37, 164, 215, 216, 255, 37, 37, 164, 215, 216, 255}; + auto out_shape = Shape{1, 2, 2, 3}; + + Tensor inp_tensor(input_shape, element::u8, input); + inputData = {inp_tensor.data}; + + Tensor exp_tensor_u8(out_shape, element::u8, exp_out); + refOutData = {exp_tensor_u8.data}; + + function = CreateFunction(inp_tensor); + + Exec(); +} + +TEST_F(ReferenceConvertColorNV12LayerTest, CompareWithHardcodedRefs_g_fp32_single_rgb) { + threshold = 2.f; + auto input = std::vector {145.f, 145.f, 145.f, 145.f, 34.f, 54.f}; + auto input_shape = Shape{1, 3, 2, 1}; + auto exp_out = std::vector {0, 255.f, 0, 0, 255.f, 0, 0, 255.f, 0, 0, 255.f, 0}; + auto out_shape = Shape{1, 2, 2, 3}; + + Tensor inp_tensor(input_shape, element::f32, input); + inputData = {inp_tensor.data}; + + Tensor exp_tensor(out_shape, element::f32, exp_out); + refOutData = {exp_tensor.data}; + + function = CreateFunction(inp_tensor); + + Exec(); +} + +TEST_F(ReferenceConvertColorNV12LayerTest, CompareWithHardcodedRefs_batch_fp32_two_bgr) { + threshold = 2.f; + auto input_y = std::vector {81.f, 81.f, 81.f, 81.f, + 145.f, 145.f, 145.f, 145.f, + 41.f, 41.f, 41.f, 41.f}; + auto input_shape_y = Shape{3, 2, 2, 1}; + + auto input_uv = std::vector {240., 90., + 34., 54., + 110., 240.}; + auto input_shape_uv = Shape{3, 1, 1, 2}; + + auto exp_out = std::vector {0, 0, 255., 0, 0, 255., 0, 0, 255., 0, 0, 255., + 0, 255., 0, 0, 255., 0, 0, 255., 0, 0, 255., 0, + 255., 0, 0, 255., 0, 0, 255., 0, 0, 255., 0, 0}; + auto out_shape = Shape{3, 2, 2, 3}; + + Tensor inp_tensor_y(input_shape_y, element::f32, input_y); + Tensor inp_tensor_uv(input_shape_uv, element::f32, input_uv); + inputData = {inp_tensor_y.data, inp_tensor_uv.data}; + + Tensor exp_tensor(out_shape, element::f32, exp_out); + refOutData = {exp_tensor.data}; + + function = CreateFunction2(inp_tensor_y, inp_tensor_uv); + + Exec(); +} + +TEST_F(ReferenceConvertColorNV12LayerTest, CompareWithHardcodedRefs_color2x2_f32_two_rgb) { + threshold = 2.f; + auto input_y = std::vector {235, 81, 235, 81}; + auto input_shape_y = Shape{1, 2, 2, 1}; + + auto input_uv = std::vector {184, 109}; + auto input_shape_uv = Shape{1, 1, 1, 2}; + + auto exp_out = std::vector {164, 37, 37, 216, 215, 255, 164, 37, 37, 216, 215, 255}; + auto out_shape = Shape{1, 2, 2, 3}; + + Tensor inp_tensor_y(input_shape_y, element::f32, input_y); + Tensor inp_tensor_uv(input_shape_uv, element::f32, input_uv); + inputData = {inp_tensor_y.data, inp_tensor_uv.data}; + + Tensor exp_tensor(out_shape, element::f32, exp_out); + refOutData = {exp_tensor.data}; + + function = CreateFunction2(inp_tensor_y, inp_tensor_uv); + + Exec(); +} diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/convert_color_nv12.cpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/convert_color_nv12.cpp new file mode 100644 index 00000000000000..ccfe80367663eb --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/convert_color_nv12.cpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_layer/convert_color_nv12.hpp" + +using namespace LayerTestsDefinitions; + +namespace { + +TEST_P(ConvertColorNV12LayerTest, Serialize) { + Serialize(); + } + +const std::vector inShapes_nhwc = { + {1, 10, 10, 1} +}; + +const std::vector inTypes = { + ov::element::u8, ov::element::f32 +}; + +const auto testCase_values = ::testing::Combine( + ::testing::ValuesIn(inShapes_nhwc), + ::testing::ValuesIn(inTypes), + ::testing::Bool(), + ::testing::Bool(), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, ConvertColorNV12LayerTest, testCase_values, ConvertColorNV12LayerTest::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/convert_color_nv12.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/convert_color_nv12.cpp new file mode 100644 index 00000000000000..8f2832e3f6ae6b --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/convert_color_nv12.cpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "single_layer_tests/convert_color_nv12.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace LayerTestsDefinitions; + +namespace { + +const std::vector inShapes_nhwc = { + {1, 10, 10, 1} +}; + +const std::vector inTypes = { + ov::element::u8, ov::element::f32 +}; + +const auto testCase_values = ::testing::Combine( + ::testing::ValuesIn(inShapes_nhwc), + ::testing::ValuesIn(inTypes), + ::testing::Bool(), + ::testing::Bool(), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + + +INSTANTIATE_TEST_SUITE_P(smoke_TestsConvertColorNV12, ConvertColorNV12LayerTest, testCase_values, ConvertColorNV12LayerTest::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/convert_color_nv12.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/convert_color_nv12.hpp new file mode 100644 index 00000000000000..0dcf2719819266 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/convert_color_nv12.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_layer/convert_color_nv12.hpp" + +namespace LayerTestsDefinitions { + +TEST_P(ConvertColorNV12LayerTest, CompareWithRefs) { + Run(); +}; + +} // namespace LayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_nv12.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_nv12.hpp new file mode 100644 index 00000000000000..acea1f021342b9 --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_nv12.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/layer_test_utils.hpp" +#include "ngraph_functions/builders.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" + +namespace LayerTestsDefinitions { + +using ConvertColorNV12ParamsTuple = std::tuple< + ov::Shape, // Input Shape + ov::element::Type, // Element type + bool, // Conversion type + bool, // 1 or 2 planes + std::string>; // Device name + +class ConvertColorNV12LayerTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + +protected: + void SetUp() override; +}; + +} // namespace LayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/convert_color_nv12.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/convert_color_nv12.cpp new file mode 100644 index 00000000000000..6a166a4bddd8e4 --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/convert_color_nv12.cpp @@ -0,0 +1,58 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_layer/convert_color_nv12.hpp" +#include "openvino/op/nv12_to_rgb.hpp" +#include "openvino/op/nv12_to_bgr.hpp" + +namespace LayerTestsDefinitions { + +std::string ConvertColorNV12LayerTest::getTestCaseName(const testing::TestParamInfo &obj) { + ov::Shape inputShape; + ov::element::Type type; + bool conversion, singlePlane; + std::string targetName; + std::tie(inputShape, type, conversion, singlePlane, targetName) = obj.param; + std::ostringstream result; + result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_"; + result << "netPRC=" << type.c_type_string() << "_"; + result << "convRGB=" << conversion << "_"; + result << "singlePlane=" << singlePlane << "_"; + result << "targetDevice=" << targetName; + return result.str(); +} + +void ConvertColorNV12LayerTest::SetUp() { + ov::Shape inputShape; + ov::element::Type ngPrc; + bool conversionToRGB, singlePlane; + threshold = 2.0f; // NV12 color conversion can use various of algorithms, thus some deviation is allowed + std::tie(inputShape, ngPrc, conversionToRGB, singlePlane, targetDevice) = GetParam(); + if (singlePlane) { + inputShape[1] = inputShape[1] * 3 / 2; + auto param = std::make_shared(ngPrc, inputShape); + std::shared_ptr convert_color; + if (conversionToRGB) { + convert_color = std::make_shared(param); + } else { + convert_color = std::make_shared(param); + } + function = std::make_shared(std::make_shared(convert_color), + ov::ParameterVector{param}, "ConvertColorNV12"); + } else { + auto uvShape = ov::Shape{inputShape[0], inputShape[1] / 2, inputShape[2] / 2, 2}; + auto param_y = std::make_shared(ngPrc, inputShape); + auto param_uv = std::make_shared(ngPrc, uvShape); + std::shared_ptr convert_color; + if (conversionToRGB) { + convert_color = std::make_shared(param_y, param_uv); + } else { + convert_color = std::make_shared(param_y, param_uv); + } + function = std::make_shared(std::make_shared(convert_color), + ov::ParameterVector{param_y, param_uv}, "ConvertColorNV12"); + } +} + +} // namespace LayerTestsDefinitions \ No newline at end of file diff --git a/ngraph/core/include/ngraph/op/nv12_to_bgr.hpp b/ngraph/core/include/ngraph/op/nv12_to_bgr.hpp new file mode 100644 index 00000000000000..73483c407b0f86 --- /dev/null +++ b/ngraph/core/include/ngraph/op/nv12_to_bgr.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/nv12_to_bgr.hpp" + +namespace ngraph { +namespace op { +namespace v8 { +using ov::op::v8::NV12toBGR; +} // namespace v8 +} // namespace op +} // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/nv12_to_rgb.hpp b/ngraph/core/include/ngraph/op/nv12_to_rgb.hpp new file mode 100644 index 00000000000000..4d66f9cbdeef9d --- /dev/null +++ b/ngraph/core/include/ngraph/op/nv12_to_rgb.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/nv12_to_rgb.hpp" + +namespace ngraph { +namespace op { +namespace v8 { +using ov::op::v8::NV12toRGB; +} // namespace v8 +} // namespace op +} // namespace ngraph diff --git a/ngraph/core/include/ngraph/ops.hpp b/ngraph/core/include/ngraph/ops.hpp index bd21031b1a3091..ff07083328adbd 100644 --- a/ngraph/core/include/ngraph/ops.hpp +++ b/ngraph/core/include/ngraph/ops.hpp @@ -103,6 +103,8 @@ #include "ngraph/op/normalize_l2.hpp" #include "ngraph/op/not.hpp" #include "ngraph/op/not_equal.hpp" +#include "ngraph/op/nv12_to_bgr.hpp" +#include "ngraph/op/nv12_to_rgb.hpp" #include "ngraph/op/one_hot.hpp" #include "ngraph/op/or.hpp" #include "ngraph/op/pad.hpp" diff --git a/ngraph/core/include/openvino/op/nv12_to_bgr.hpp b/ngraph/core/include/openvino/op/nv12_to_bgr.hpp new file mode 100644 index 00000000000000..38db41549efcd5 --- /dev/null +++ b/ngraph/core/include/openvino/op/nv12_to_bgr.hpp @@ -0,0 +1,62 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/convert_color_nv12_base.hpp" + +namespace ov { +namespace op { +namespace v8 { +/// \brief Color conversion operation from NV12 to RGB format. +/// Input: +/// - Input NV12 image can be represented in two ways: +/// a) Single plane (as it is in the file): NV12 height dimension is 1.5x bigger than image height. 'C' +/// dimension shall be 1. +/// b) Two separate planes (used this way in many physical video sources): Y and UV. In +/// this case +/// b1) Y plane has height same as image height. 'C' dimension equals to 1 +/// b2) UV plane has dimensions: 'H' = image_h / 2; 'W' = image_w / 2; 'C' = 2. +/// - Supported element types: u8 or any supported floating-point type. +/// Output: +/// - Output node will have NHWC layout and shape HxW same as image spatial dimensions. +/// - Number of output channels 'C' will be 3, as per interleaved RGB format, first channel is B, last is R +/// +/// \details Conversion of each pixel from NV12 (YUV) to RGB space is represented by following formulas: +/// R = 1.164 * (Y - 16) + 1.596 * (V - 128) +/// G = 1.164 * (Y - 16) - 0.813 * (V - 128) - 0.391 * (U - 128) +/// B = 1.164 * (Y - 16) + 2.018 * (U - 128) +/// Then R, G, B values are clipped to range (0, 255) +/// +class OPENVINO_API NV12toBGR : public util::ConvertColorNV12Base { +public: + OPENVINO_OP("NV12toBGR", "opset8", util::ConvertColorNV12Base); + + NV12toBGR() = default; + + /// \brief Constructs a conversion operation from input image in NV12 format + /// As per NV12 format definition, node height dimension shall be 1.5 times bigger than image height + /// so that image (w=640, h=480) is represented by NHWC shape {N,720,640,1} (height*1.5 x width) + /// + /// \param arg Node that produces the input tensor. Input tensor represents image in NV12 format (YUV). + explicit NV12toBGR(const Output& arg); + + /// \brief Constructs a conversion operation from 2-plane input image in NV12 format + /// In general case Y channel of image can be separated from UV channel which means that operation needs two nodes + /// for Y and UV planes respectively. Y plane has one channel, and UV has 2 channels, both expect 'NHWC' layout + /// + /// \param arg_y Node that produces the input tensor for Y plane (NHWC layout). Shall have WxH dimensions + /// equal to image dimensions. 'C' dimension equals to 1. + /// + /// \param arg_uv Node that produces the input tensor for UV plane (NHWC layout). 'H' is half of image height, + /// 'W' is half of image width, 'C' dimension equals to 2. Channel 0 represents 'U', channel 1 represents 'V' + /// channel + /// + explicit NV12toBGR(const Output& arg_y, const Output& arg_uv); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/nv12_to_rgb.hpp b/ngraph/core/include/openvino/op/nv12_to_rgb.hpp new file mode 100644 index 00000000000000..188fe77eb714c6 --- /dev/null +++ b/ngraph/core/include/openvino/op/nv12_to_rgb.hpp @@ -0,0 +1,62 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/convert_color_nv12_base.hpp" + +namespace ov { +namespace op { +namespace v8 { +/// \brief Color conversion operation from NV12 to RGB format. +/// Input: +/// - Input NV12 image can be represented in two ways: +/// a) Single plane (as it is in the file): NV12 height dimension is 1.5x bigger than image height. 'C' +/// dimension shall be 1. +/// b) Two separate planes (used this way in many physical video sources): Y and UV. In +/// this case +/// b1) Y plane has height same as image height. 'C' dimension equals to 1 +/// b2) UV plane has dimensions: 'H' = image_h / 2; 'W' = image_w / 2; 'C' = 2. +/// - Supported element types: u8 or any supported floating-point type. +/// Output: +/// - Output node will have NHWC layout and shape HxW same as image spatial dimensions. +/// - Number of output channels 'C' will be 3, as per interleaved RGB format, first channel is R, last is B +/// +/// \details Conversion of each pixel from NV12 (YUV) to RGB space is represented by following formulas: +/// R = 1.164 * (Y - 16) + 1.596 * (V - 128) +/// G = 1.164 * (Y - 16) - 0.813 * (V - 128) - 0.391 * (U - 128) +/// B = 1.164 * (Y - 16) + 2.018 * (U - 128) +/// Then R, G, B values are clipped to range (0, 255) +/// +class OPENVINO_API NV12toRGB : public util::ConvertColorNV12Base { +public: + OPENVINO_OP("NV12toRGB", "opset8", util::ConvertColorNV12Base); + + NV12toRGB() = default; + + /// \brief Constructs a conversion operation from input image in NV12 format + /// As per NV12 format definition, node height dimension shall be 1.5 times bigger than image height + /// so that image (w=640, h=480) is represented by NHWC shape {N,720,640,1} (height*1.5 x width) + /// + /// \param arg Node that produces the input tensor. Input tensor represents image in NV12 format (YUV). + explicit NV12toRGB(const Output& arg); + + /// \brief Constructs a conversion operation from 2-plane input image in NV12 format + /// In general case Y channel of image can be separated from UV channel which means that operation needs two nodes + /// for Y and UV planes respectively. Y plane has one channel, and UV has 2 channels, both expect 'NHWC' layout + /// + /// \param arg_y Node that produces the input tensor for Y plane (NHWC layout). Shall have WxH dimensions + /// equal to image dimensions. 'C' dimension equals to 1. + /// + /// \param arg_uv Node that produces the input tensor for UV plane (NHWC layout). 'H' is half of image height, + /// 'W' is half of image width, 'C' dimension equals to 2. Channel 0 represents 'U', channel 1 represents 'V' + /// channel + /// + NV12toRGB(const Output& arg_y, const Output& arg_uv); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/ops.hpp b/ngraph/core/include/openvino/op/ops.hpp index b37e9a4db3045f..350ed894022213 100644 --- a/ngraph/core/include/openvino/op/ops.hpp +++ b/ngraph/core/include/openvino/op/ops.hpp @@ -103,6 +103,8 @@ #include "openvino/op/non_zero.hpp" #include "openvino/op/normalize_l2.hpp" #include "openvino/op/not_equal.hpp" +#include "openvino/op/nv12_to_bgr.hpp" +#include "openvino/op/nv12_to_rgb.hpp" #include "openvino/op/one_hot.hpp" #include "openvino/op/pad.hpp" #include "openvino/op/parameter.hpp" diff --git a/ngraph/core/include/openvino/op/util/convert_color_nv12_base.hpp b/ngraph/core/include/openvino/op/util/convert_color_nv12_base.hpp new file mode 100644 index 00000000000000..ed0bdf697701d4 --- /dev/null +++ b/ngraph/core/include/openvino/op/util/convert_color_nv12_base.hpp @@ -0,0 +1,81 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace util { +/// \brief Base class for color conversion operation from NV12 to RGB/BGR format. +/// Input: +/// - Operation expects input shape in NHWC layout. +/// - Input NV12 image can be represented in a two ways: +/// a) Single plane: NV12 height dimension is 1.5x bigger than image height. 'C' dimension shall be 1 +/// b) Two separate planes: Y and UV. In this case +/// b1) Y plane has height same as image height. 'C' dimension equals to 1 +/// b2) UV plane has dimensions: 'H' = image_h / 2; 'W' = image_w / 2; 'C' = 2. +/// - Supported element types: u8 or any supported floating-point type. +/// Output: +/// - Output node will have NHWC layout and shape HxW same as image spatial dimensions. +/// - Number of output channels 'C' will be 3 +/// +/// \details Conversion of each pixel from NV12 (YUV) to RGB space is represented by following formulas: +/// R = 1.164 * (Y - 16) + 1.596 * (V - 128) +/// G = 1.164 * (Y - 16) - 0.813 * (V - 128) - 0.391 * (U - 128) +/// B = 1.164 * (Y - 16) + 2.018 * (U - 128) +/// Then R, G, B values are clipped to range (0, 255) +/// +class OPENVINO_API ConvertColorNV12Base : public Op { +public: + /// \brief Exact conversion format details + /// Currently supports conversion from NV12 to RGB or BGR, in future can be extended with NV21_to_RGBA/BGRA, etc + enum class ColorConversion : int { NV12_TO_RGB = 0, NV12_TO_BGR = 1 }; + +protected: + ConvertColorNV12Base() = default; + + /// \brief Constructs a conversion operation from input image in NV12 format + /// As per NV12 format definition, node height dimension shall be 1.5 times bigger than image height + /// so that image (w=640, h=480) is represented by NHWC shape {N,720,640,1} (height*1.5 x width) + /// + /// \param arg Node that produces the input tensor. Input tensor represents image in NV12 format (YUV). + /// \param format Conversion format. + explicit ConvertColorNV12Base(const Output& arg, ColorConversion format); + + /// \brief Constructs a conversion operation from 2-plane input image in NV12 format + /// In general case Y channel of image can be separated from UV channel which means that operation needs two nodes + /// for Y and UV planes respectively. Y plane has one channel, and UV has 2 channels, both expect 'NHWC' layout + /// + /// \param arg_y Node that produces the input tensor for Y plane (NHWC layout). Shall have WxH dimensions + /// equal to image dimensions. 'C' dimension equals to 1. + /// + /// \param arg_uv Node that produces the input tensor for UV plane (NHWC layout). 'H' is half of image height, + /// 'W' is half of image width, 'C' dimension equals to 2. Channel 0 represents 'U', channel 1 represents 'V' + /// channel + /// + /// \param format Conversion format. + ConvertColorNV12Base(const Output& arg_y, const Output& arg_uv, ColorConversion format); + +public: + OPENVINO_OP("ConvertColorNV12Base", "util"); + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + + bool has_evaluate() const override; + +protected: + bool is_type_supported(const ov::element::Type& type) const; + + ColorConversion m_format = ColorConversion::NV12_TO_RGB; +}; +} // namespace util +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/opsets/opset8_tbl.hpp b/ngraph/core/include/openvino/opsets/opset8_tbl.hpp index f7e56ae6fb5431..0c686bdab09213 100644 --- a/ngraph/core/include/openvino/opsets/opset8_tbl.hpp +++ b/ngraph/core/include/openvino/opsets/opset8_tbl.hpp @@ -181,6 +181,8 @@ _OPENVINO_OP_REG(DeformableConvolution, ov::op::v8) _OPENVINO_OP_REG(MatrixNms, ov::op::v8) _OPENVINO_OP_REG(MaxPool, ov::op::v8) _OPENVINO_OP_REG(MulticlassNms, ov::op::v8) +_OPENVINO_OP_REG(NV12toBGR, ov::op::v8) +_OPENVINO_OP_REG(NV12toRGB, ov::op::v8) _OPENVINO_OP_REG(RandomUniform, ov::op::v8) _OPENVINO_OP_REG(Slice, ov::op::v8) _OPENVINO_OP_REG(If, ov::op::v8) diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/convert_color_nv12.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/convert_color_nv12.hpp new file mode 100644 index 00000000000000..bf204535c0efa3 --- /dev/null +++ b/ngraph/core/reference/include/ngraph/runtime/reference/convert_color_nv12.hpp @@ -0,0 +1,75 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "openvino/op/util/convert_color_nv12_base.hpp" + +namespace ngraph { +namespace runtime { +namespace reference { +template +void color_convert_nv12(const T* arg_y, + const T* arg_uv, + T* out_ptr, + size_t batch_size, + size_t image_h, + size_t image_w, + size_t stride_y, + size_t stride_uv, + ov::op::util::ConvertColorNV12Base::ColorConversion color_format) { + // With C++20 - std::endian can be used at compile time + auto little_endian = []() -> int { + union { + int32_t i; + char c[4]; + } u = {0x00000001}; + return static_cast(u.c[0]); + }; + auto is_little_endian = little_endian(); + for (int batch = 0; batch < batch_size; batch++) { + T* out = out_ptr + batch * image_w * image_h; + auto y_ptr = arg_y + batch * stride_y; + auto uv_ptr = arg_uv + batch * stride_uv; + for (int h = 0; h < image_h; h++) { + for (int w = 0; w < image_w; w++) { + auto y_index = h * image_w + w; + // For little-endian systems: + // Y bytes are shuffled as Y1, Y0, Y3, Y2, Y5, Y4, etc. + // UV bytes are ordered as V0, U0, V1, U1, V2, U2, etc. + // For float point case follow the same order + auto add_y_index = is_little_endian ? (w % 2 ? -1 : 1) : 0; + auto y_val = static_cast(y_ptr[y_index + add_y_index]); + auto uv_index = (h / 2) * image_w + (w / 2) * 2; + auto u_val = static_cast(uv_ptr[uv_index + is_little_endian]); + auto v_val = static_cast(uv_ptr[uv_index + 1 - is_little_endian]); + auto c = y_val - 16.f; + auto d = u_val - 128.f; + auto e = v_val - 128.f; + auto clip = [](float a) -> T { + return a < 0.5f ? static_cast(0) : (a > 254.5f ? static_cast(255) : static_cast(a)); + }; + auto b = clip(1.164f * c + 2.018f * d); + auto g = clip(1.164f * c - 0.391f * d - 0.813f * e); + auto r = clip(1.164f * c + 1.596f * e); + if (color_format == ov::op::util::ConvertColorNV12Base::ColorConversion::NV12_TO_RGB) { + out[y_index * 3] = r; + out[y_index * 3 + 1] = g; + out[y_index * 3 + 2] = b; + } else if (color_format == ov::op::util::ConvertColorNV12Base::ColorConversion::NV12_TO_BGR) { + out[y_index * 3] = b; + out[y_index * 3 + 1] = g; + out[y_index * 3 + 2] = r; + } + } + } + } +} + +} // namespace reference +} // namespace runtime +} // namespace ngraph diff --git a/ngraph/core/src/op/nv12_to_bgr.cpp b/ngraph/core/src/op/nv12_to_bgr.cpp new file mode 100644 index 00000000000000..f57647839c7cfe --- /dev/null +++ b/ngraph/core/src/op/nv12_to_bgr.cpp @@ -0,0 +1,27 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/nv12_to_bgr.hpp" + +#include "itt.hpp" + +ov::op::v8::NV12toBGR::NV12toBGR(const Output& arg) + : util::ConvertColorNV12Base(arg, util::ConvertColorNV12Base::ColorConversion::NV12_TO_BGR) { + constructor_validate_and_infer_types(); +} + +ov::op::v8::NV12toBGR::NV12toBGR(const Output& arg_y, const Output& arg_uv) + : util::ConvertColorNV12Base(arg_y, arg_uv, util::ConvertColorNV12Base::ColorConversion::NV12_TO_BGR) { + constructor_validate_and_infer_types(); +} + +std::shared_ptr ov::op::v8::NV12toBGR::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_NV12toBGR_clone_with_new_inputs); + OPENVINO_ASSERT(new_args.size() == 1 || new_args.size() == 2, "NV12toBGR shall have one or two input nodes"); + if (new_args.size() == 1) { + return std::make_shared(new_args.at(0)); + } else { + return std::make_shared(new_args.at(0), new_args.at(1)); + } +} diff --git a/ngraph/core/src/op/nv12_to_rgb.cpp b/ngraph/core/src/op/nv12_to_rgb.cpp new file mode 100644 index 00000000000000..5b716c98916472 --- /dev/null +++ b/ngraph/core/src/op/nv12_to_rgb.cpp @@ -0,0 +1,27 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/nv12_to_rgb.hpp" + +#include "itt.hpp" + +ov::op::v8::NV12toRGB::NV12toRGB(const Output& arg) + : util::ConvertColorNV12Base(arg, util::ConvertColorNV12Base::ColorConversion::NV12_TO_RGB) { + constructor_validate_and_infer_types(); +} + +ov::op::v8::NV12toRGB::NV12toRGB(const Output& arg_y, const Output& arg_uv) + : util::ConvertColorNV12Base(arg_y, arg_uv, util::ConvertColorNV12Base::ColorConversion::NV12_TO_RGB) { + constructor_validate_and_infer_types(); +} + +std::shared_ptr ov::op::v8::NV12toRGB::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_NV12toRGB_clone_with_new_inputs); + OPENVINO_ASSERT(new_args.size() == 1 || new_args.size() == 2, "NV12toRGB shall have one or two input nodes"); + if (new_args.size() == 1) { + return std::make_shared(new_args.at(0)); + } else { + return std::make_shared(new_args.at(0), new_args.at(1)); + } +} diff --git a/ngraph/core/src/op/util/convert_color_nv12_base.cpp b/ngraph/core/src/op/util/convert_color_nv12_base.cpp new file mode 100644 index 00000000000000..0a65116e164b56 --- /dev/null +++ b/ngraph/core/src/op/util/convert_color_nv12_base.cpp @@ -0,0 +1,231 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/util/convert_color_nv12_base.hpp" + +#include +#include + +#include "itt.hpp" +#include "ngraph/runtime/reference/convert_color_nv12.hpp" +#include "openvino/core/layout.hpp" + +static const size_t N_DIM = 0; +static const size_t H_DIM = 1; +static const size_t W_DIM = 2; +static const size_t C_DIM = 3; + +ov::op::util::ConvertColorNV12Base::ConvertColorNV12Base(const Output& arg, ColorConversion format) + : Op({arg}), + m_format(format) {} + +ov::op::util::ConvertColorNV12Base::ConvertColorNV12Base(const Output& arg_y, + const Output& arg_uv, + ColorConversion format) + : Op({arg_y, arg_uv}), + m_format(format) { + constructor_validate_and_infer_types(); +} + +void ov::op::util::ConvertColorNV12Base::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v8_Convert_NV12_Base_validate_and_infer_types); + + NODE_VALIDATION_CHECK(this, + get_input_size() == 1 || get_input_size() == 2, + "NV12 conversion shall have one or 2 inputs, but it is ", + get_input_size()); + auto single_plane = get_input_size() == 1; + auto y_type = get_input_element_type(0); + NODE_VALIDATION_CHECK(this, + is_type_supported(y_type), + "Y input shall have u8 or floating-point precision, got ", + y_type); + const auto& shape_y = get_input_partial_shape(0); + if (shape_y.rank().is_static()) { + NODE_VALIDATION_CHECK(this, + shape_y.rank().get_length() == 4, + "Y input with static shape shall have 4 dimensions (N, H, W, C)"); + + NODE_VALIDATION_CHECK(this, + shape_y[C_DIM].is_dynamic() || shape_y[C_DIM].get_length() == 1, + "Y channels dimension shall be either dynamic or equal to 1. Current value is ", + shape_y[C_DIM].get_length()); + } + auto out_shape = shape_y; + auto out_type = y_type; + if (out_shape.rank().is_dynamic()) { + out_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), 3}; + } + out_shape[C_DIM] = 3; // 3 is number of channels (R, G, B) + if (single_plane) { + if (shape_y.rank().is_static() && shape_y[H_DIM].is_static()) { + NODE_VALIDATION_CHECK(this, + shape_y[H_DIM].get_length() % 3 == 0, + "NV12 image height shall be divisible by 3, but it is ", + shape_y[H_DIM].get_length()); + // E.g. if input shape height is 720 for NV12, then real image height is 720 * 2 / 3 = 480 + out_shape[H_DIM] = shape_y[H_DIM].get_length() * 2 / 3; + } + } else { + auto uv_type = get_input_element_type(1); + if (y_type.is_dynamic()) { + NODE_VALIDATION_CHECK(this, + is_type_supported(uv_type), + "UV input shall have u8 or floating-point precision, got ", + uv_type); + out_type = uv_type; + } else { + NODE_VALIDATION_CHECK(this, + uv_type.is_dynamic() || uv_type == y_type, + "UV input ", + uv_type, + " shall have same precision as Y input ", + y_type); + } + const auto& shape_uv = get_input_partial_shape(1); + NODE_VALIDATION_CHECK(this, + shape_uv.rank().is_dynamic() || shape_uv.rank().get_length() == 4, + "UV input with static shape shall have 4 dimensions (N, H, W, C)"); + if (shape_y.rank().is_static() && shape_uv.rank().is_static()) { + // Verify that height for Y input is 2 times bigger than input height for UV + NODE_VALIDATION_CHECK(this, + shape_y[H_DIM].is_dynamic() || shape_uv[H_DIM].is_dynamic() || + shape_y[H_DIM].get_length() == shape_uv[H_DIM].get_length() * 2, + "Y input height shall be 2 times bigger that UV input height: Y height = ", + shape_y[H_DIM].get_length(), + " UV height = ", + shape_uv[H_DIM].get_length()); + // Verify that width for Y input is 2 times bigger than input width for UV + NODE_VALIDATION_CHECK(this, + shape_y[W_DIM].is_dynamic() || shape_uv[W_DIM].is_dynamic() || + shape_y[W_DIM].get_length() == shape_uv[W_DIM].get_length() * 2, + "Y input width shall be 2 times bigger that UV input width: Y width = ", + shape_y[W_DIM].get_length(), + " UV width = ", + shape_uv[W_DIM].get_length()); + NODE_VALIDATION_CHECK(this, + shape_uv[C_DIM].is_dynamic() || shape_uv[C_DIM].get_length() == 2, + "UV channels dimension shall be either dynamic or equal to 2. Current value is ", + shape_uv[C_DIM].get_length()); + + NODE_VALIDATION_CHECK(this, + shape_y[N_DIM].is_dynamic() || shape_uv[N_DIM].is_dynamic() || + shape_y[N_DIM].get_length() == shape_uv[N_DIM].get_length(), + "Y input batch shall be same as UV input batch: Y batch = ", + shape_y[N_DIM].get_length(), + " UV batch = ", + shape_uv[N_DIM].get_length()); + } + // Set shape based on UV shape, if Y are dynamic + if (shape_uv.rank().is_static()) { + if (out_shape[N_DIM].is_dynamic()) { + out_shape[N_DIM] = shape_uv[N_DIM]; + } + if (out_shape[H_DIM].is_dynamic()) { + out_shape[H_DIM] = shape_uv[H_DIM] * 2; + } + if (out_shape[W_DIM].is_dynamic()) { + out_shape[W_DIM] = shape_uv[W_DIM] * 2; + } + } + } + NODE_VALIDATION_CHECK(this, + out_shape[H_DIM].is_dynamic() || out_shape[H_DIM].get_length() % 2 == 0, + "Image height must be even, but it is ", + out_shape[H_DIM].get_length()); + NODE_VALIDATION_CHECK(this, + out_shape[W_DIM].is_dynamic() || out_shape[W_DIM].get_length() % 2 == 0, + "Image width must be even, but it is ", + out_shape[W_DIM].get_length()); + set_output_type(0, out_type, out_shape); +} + +namespace color_convert_nv12_op { + +template +inline bool evaluate(const ov::HostTensorVector& input_values, + const ov::HostTensorPtr& output_value, + bool single_tensor, + ov::op::util::ConvertColorNV12Base::ColorConversion color_format) { + using namespace ov::op::util; + const auto& y_tensor = input_values[0]; + auto batch_size = y_tensor->get_shape()[N_DIM]; + auto image_w = y_tensor->get_shape()[W_DIM]; + auto image_h = y_tensor->get_shape()[H_DIM]; + if (single_tensor) { + OPENVINO_ASSERT(ngraph::validate_host_tensor_vector(input_values, 1)); + image_h = image_h * 2 / 3; + } else { + OPENVINO_ASSERT(ngraph::validate_host_tensor_vector(input_values, 2)); + } + output_value->set_shape({batch_size, image_h, image_w, 3}); // 3 is RGB + if (single_tensor) { + ngraph::runtime::reference::color_convert_nv12(y_tensor->get_data_ptr(), + y_tensor->get_data_ptr() + image_w * image_h, + output_value->get_data_ptr(), + batch_size, + image_h, + image_w, + image_w * image_h * 3 / 2, + image_w * image_h * 3 / 2, + color_format); + } else { + const auto& uv_tensor = input_values[1]; + ngraph::runtime::reference::color_convert_nv12(y_tensor->get_data_ptr(), + uv_tensor->get_data_ptr(), + output_value->get_data_ptr(), + batch_size, + image_h, + image_w, + image_w * image_h, + image_w * image_h / 2, + color_format); + } + return true; +} + +bool evaluate_nv12_convert(const ov::HostTensorVector& input_values, + const ov::HostTensorPtr& output_value, + bool single_tensor, + ov::op::util::ConvertColorNV12Base::ColorConversion conv_format) { + bool rc = false; + switch (input_values[0]->get_element_type()) { + NGRAPH_TYPE_CASE(evaluate_nv12_convert, u8, input_values, output_value, single_tensor, conv_format); + NGRAPH_TYPE_CASE(evaluate_nv12_convert, f16, input_values, output_value, single_tensor, conv_format); + NGRAPH_TYPE_CASE(evaluate_nv12_convert, bf16, input_values, output_value, single_tensor, conv_format); + NGRAPH_TYPE_CASE(evaluate_nv12_convert, f32, input_values, output_value, single_tensor, conv_format); + NGRAPH_TYPE_CASE(evaluate_nv12_convert, f64, input_values, output_value, single_tensor, conv_format); + default: + break; + } + return rc; +} + +} // namespace color_convert_nv12_op + +bool ov::op::util::ConvertColorNV12Base::visit_attributes(AttributeVisitor& visitor) { + return true; +} + +bool ov::op::util::ConvertColorNV12Base::evaluate(const HostTensorVector& output_values, + const HostTensorVector& input_values) const { + NGRAPH_OP_SCOPE(v0_ConvertColorNV12_evaluate); + OPENVINO_ASSERT(ngraph::validate_host_tensor_vector(output_values, 1)); + NODE_VALIDATION_CHECK(this, + get_input_size() == 1 || get_input_size() == 2, + "NV12 conversion shall have one or 2 inputs, but it is ", + get_input_size()); + auto single_plane = get_input_size() == 1; + return color_convert_nv12_op::evaluate_nv12_convert(input_values, output_values[0], single_plane, m_format); +} + +bool ov::op::util::ConvertColorNV12Base::has_evaluate() const { + NGRAPH_OP_SCOPE(v0_ConvertColorNV12Base_has_evaluate); + + return is_type_supported(get_input_element_type(0)); +} + +bool ov::op::util::ConvertColorNV12Base::is_type_supported(const ov::element::Type& type) const { + return type.is_dynamic() || type.is_real() || type == ov::element::u8; +} diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 0330baf4b449ea..3853ac1f4d9c17 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -114,6 +114,7 @@ set(SRC type_prop/concat.cpp type_prop/constant.cpp type_prop/convert.cpp + type_prop/convert_color_nv12.cpp type_prop/convolution.cpp type_prop/convolution_backprop_data.cpp type_prop/cos.cpp @@ -264,6 +265,7 @@ set(SRC visitors/op/ceiling.cpp visitors/op/constant.cpp visitors/op/convert.cpp + visitors/op/convert_color_nv12.cpp visitors/op/convolution_backprop.cpp visitors/op/cos.cpp visitors/op/cosh.cpp diff --git a/ngraph/test/opset.cpp b/ngraph/test/opset.cpp index a2fd32bba55548..8079170c81c2c1 100644 --- a/ngraph/test/opset.cpp +++ b/ngraph/test/opset.cpp @@ -141,7 +141,7 @@ TEST(opset, opset8_dump) { std::cout << t.name << " "; } std::cout << std::endl; - ASSERT_EQ(163, opset.get_types_info().size()); + ASSERT_EQ(165, opset.get_types_info().size()); } class MyOpOld : public ov::op::Op { diff --git a/ngraph/test/type_prop/convert_color_nv12.cpp b/ngraph/test/type_prop/convert_color_nv12.cpp new file mode 100644 index 00000000000000..eb2c7670224dd2 --- /dev/null +++ b/ngraph/test/type_prop/convert_color_nv12.cpp @@ -0,0 +1,9 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "convert_color_nv12_base.hpp" + +INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_nv12_to_rgb, ConvertNV12BaseTest, ::testing::Types); + +INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_nv12_to_bgr, ConvertNV12BaseTest, ::testing::Types); diff --git a/ngraph/test/type_prop/convert_color_nv12_base.hpp b/ngraph/test/type_prop/convert_color_nv12_base.hpp new file mode 100644 index 00000000000000..41587016ff469f --- /dev/null +++ b/ngraph/test/type_prop/convert_color_nv12_base.hpp @@ -0,0 +1,316 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "gtest/gtest.h" +#include "openvino/op/op.hpp" +#include "openvino/opsets/opset8.hpp" + +using namespace ov; + +template +class ConvertNV12BaseTest : public testing::Test +{ +}; + +TYPED_TEST_SUITE_P(ConvertNV12BaseTest); + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_single_tensor) +{ + auto param_shape = PartialShape{5, 3, 2, 1}; + auto out_shape = PartialShape{5, 2, 2, 3}; + auto param = std::make_shared(element::f32, param_shape); + auto op = std::make_shared(param); + ASSERT_EQ(op->output(0).get_element_type(), element::f32); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_single_tensor_dynamic) +{ + auto param_shape = PartialShape::dynamic(); + auto out_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), 3}; + auto param = std::make_shared(element::f32, param_shape); + auto op = std::make_shared(param); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), element::f32); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_single_tensor_dynamic_dims) +{ + auto param_shape = PartialShape{Dimension::dynamic(), 3, Dimension::dynamic(), Dimension::dynamic()}; + auto out_shape = PartialShape{Dimension::dynamic(), 2, Dimension::dynamic(), 3}; + auto param = std::make_shared(element::u8, param_shape); + auto op = std::make_shared(param); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), element::u8); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_single_tensor_dynamic_height) +{ + auto param_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), 8, Dimension::dynamic()}; + auto out_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), 8, 3}; + auto param = std::make_shared(element::u8, param_shape); + auto op = std::make_shared(param); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), element::u8); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_single_tensor_dynamic_type) +{ + auto param_shape = PartialShape{1, 6, 8, 1}; + auto out_shape = PartialShape{1, 4, 8, 3}; + auto param = std::make_shared(element::dynamic, param_shape); + auto op = std::make_shared(param); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), element::dynamic); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_single_tensor_error_channels) +{ + auto param_shape = PartialShape{1, 3, 4, 2}; // shall be 1 channel, not 2 + auto param = std::make_shared(element::u8, param_shape); + EXPECT_THROW(std::make_shared(param), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_single_tensor_error_dims_5) +{ + auto param_shape = PartialShape{1, 3, 3, 1, 1}; // must be 4 dimensions + auto param = std::make_shared(element::u8, param_shape); + EXPECT_THROW(std::make_shared(param), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_single_tensor_error_dims_3) +{ + auto param_shape = PartialShape{640, 480, 1}; // must be 4 dimensions + auto param = std::make_shared(element::u8, param_shape); + EXPECT_THROW(std::make_shared(param), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_single_tensor_error_height) +{ + auto param_shape = PartialShape{1, 4, 6, 1}; // height = 4, can't split to Y and UV + auto param = std::make_shared(element::u8, param_shape); + EXPECT_THROW(std::make_shared(param), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_single_tensor_error_width_odd) +{ + auto param_shape = PartialShape{1, 6, 5, 1}; // width is odd, can't split to U and V + auto param = std::make_shared(element::u8, param_shape); + EXPECT_THROW(std::make_shared(param), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_single_tensor_error_i8) +{ + auto param_shape = PartialShape{1, 640, 480, 1}; + auto param = std::make_shared(element::i8, param_shape); + EXPECT_THROW(std::make_shared(param), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_2_plane_simple) +{ + auto param_shape_y = PartialShape{10, 480, 640, 1}; + auto param_shape_uv = PartialShape{10, 240, 320, 2}; + auto out_shape = PartialShape{10, 480, 640, 3}; + auto param_y = std::make_shared(element::u8, param_shape_y); + auto param_uv = std::make_shared(element::u8, param_shape_uv); + auto op = std::make_shared(param_y, param_uv); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), element::u8); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_2_plane_dynamic) +{ + auto param_shape_y = PartialShape::dynamic(); + auto param_shape_uv = PartialShape::dynamic(); + auto out_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), 3}; + auto param_y = std::make_shared(element::f32, param_shape_y); + auto param_uv = std::make_shared(element::f32, param_shape_uv); + auto op = std::make_shared(param_y, param_uv); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), element::f32); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_2_plane_y_dynamic) +{ + auto param_shape_y = PartialShape::dynamic(); + auto param_shape_uv = PartialShape{1, 3, 2, 2}; + auto out_shape = PartialShape{1, 6, 4, 3}; + auto param_y = std::make_shared(element::bf16, param_shape_y); + auto param_uv = std::make_shared(element::bf16, param_shape_uv); + auto op = std::make_shared(param_y, param_uv); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), element::bf16); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_2_plane_uv_dynamic) +{ + auto param_shape_y = PartialShape{1, 4, 4, 1}; + auto param_shape_uv = PartialShape::dynamic(); + auto out_shape = PartialShape{1, 4, 4, 3}; + auto param_y = std::make_shared(element::f16, param_shape_y); + auto param_uv = std::make_shared(element::f16, param_shape_uv); + auto op = std::make_shared(param_y, param_uv); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), element::f16); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_2_plane_dynamic_types) +{ + auto param_shape_y = PartialShape{1, 4, 4, 1}; + auto param_shape_uv = PartialShape{1, 2, 2, 2}; + auto out_shape = PartialShape{1, 4, 4, 3}; + auto y_type = element::dynamic; + auto uv_type = element::dynamic; + auto out_type = element::dynamic; + auto param_y = std::make_shared(y_type, param_shape_y); + auto param_uv = std::make_shared(uv_type, param_shape_uv); + auto op = std::make_shared(param_y, param_uv); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), out_type); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_2_plane_uv_type) +{ + auto param_shape_y = PartialShape{1, 4, 4, 1}; + auto param_shape_uv = PartialShape{1, 2, 2, 2}; + auto out_shape = PartialShape{1, 4, 4, 3}; + auto y_type = element::dynamic; + auto uv_type = element::f64; + auto out_type = element::f64; + auto param_y = std::make_shared(y_type, param_shape_y); + auto param_uv = std::make_shared(uv_type, param_shape_uv); + auto op = std::make_shared(param_y, param_uv); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), out_type); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_2_plane_error_type_mismatch) +{ + auto param_y = std::make_shared(element::u8, PartialShape::dynamic()); + auto param_uv = std::make_shared(element::f32, PartialShape::dynamic()); + EXPECT_THROW(std::make_shared(param_y, param_uv), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_2_plane_error_uv_type) +{ + auto param_y = std::make_shared(element::dynamic, PartialShape::dynamic()); + auto param_uv = std::make_shared(element::i8, PartialShape::dynamic()); + EXPECT_THROW(std::make_shared(param_y, param_uv), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_2_plane_error_5dims) +{ + auto param_shape_y = PartialShape::dynamic(); + auto param_shape_uv = PartialShape{2, 2, 2, 2, 2}; + auto param_y = std::make_shared(element::u8, param_shape_y); + auto param_uv = std::make_shared(element::u8, param_shape_uv); + EXPECT_THROW(std::make_shared(param_y, param_uv), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_2_plane_error_3dims) +{ + auto param_shape_y = PartialShape::dynamic(); + auto param_shape_uv = PartialShape{2, 2, 2}; + auto param_y = std::make_shared(element::u8, param_shape_y); + auto param_uv = std::make_shared(element::u8, param_shape_uv); + EXPECT_THROW(std::make_shared(param_y, param_uv), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_2_plane_error_batch) +{ + auto param_shape_y = PartialShape{2, 480, 640, 1}; + auto param_shape_uv = PartialShape{1, 240, 320, 2}; + auto param_y = std::make_shared(element::u8, param_shape_y); + auto param_uv = std::make_shared(element::u8, param_shape_uv); + EXPECT_THROW(std::make_shared(param_y, param_uv), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_2_plane_error_height) +{ + auto param_shape_y = PartialShape{2, 480, 640, 1}; + auto param_shape_uv = PartialShape{2, 480, 320, 2}; + auto param_y = std::make_shared(element::u8, param_shape_y); + auto param_uv = std::make_shared(element::u8, param_shape_uv); + EXPECT_THROW(std::make_shared(param_y, param_uv), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_2_plane_error_height_odd) +{ + auto param_shape_y = PartialShape{2, 3, 2, 1}; // 3 is invalid, as UV shall be 2 times smaller + auto param_shape_uv = PartialShape::dynamic(); + auto param_y = std::make_shared(element::u8, param_shape_y); + auto param_uv = std::make_shared(element::u8, param_shape_uv); + EXPECT_THROW(std::make_shared(param_y, param_uv), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_2_plane_error_width) +{ + auto param_shape_y = PartialShape{2, 480, 640, 1}; + auto param_shape_uv = PartialShape{2, 240, 640, 2}; + auto param_y = std::make_shared(element::u8, param_shape_y); + auto param_uv = std::make_shared(element::u8, param_shape_uv); + EXPECT_THROW(std::make_shared(param_y, param_uv), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_2_plane_error_width_odd) +{ + auto param_shape_y = PartialShape{2, 4, 3, 1}; // 3 is invalid, as UV width shall be 2 times smaller + auto param_shape_uv = PartialShape::dynamic(); + auto param_y = std::make_shared(element::u8, param_shape_y); + auto param_uv = std::make_shared(element::u8, param_shape_uv); + EXPECT_THROW(std::make_shared(param_y, param_uv), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_2_plane_error_channels) +{ + auto param_shape_y = PartialShape{2, 480, 640, 1}; + auto param_shape_uv = PartialShape{2, 240, 320, 1}; + auto param_y = std::make_shared(element::u8, param_shape_y); + auto param_uv = std::make_shared(element::u8, param_shape_uv); + EXPECT_THROW(std::make_shared(param_y, param_uv), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertNV12BaseTest, shape_inference_error_many_types) +{ + auto param_y = std::make_shared(element::dynamic, PartialShape::dynamic()); + auto param_u = std::make_shared(element::dynamic, PartialShape::dynamic()); + auto param_v = std::make_shared(element::dynamic, PartialShape::dynamic()); + auto empty = std::make_shared(); + empty->set_arguments(NodeVector{param_y, param_u, param_v}); + + EXPECT_THROW(empty->constructor_validate_and_infer_types(), ov::AssertFailure); +} + + +REGISTER_TYPED_TEST_SUITE_P(ConvertNV12BaseTest, + shape_inference_single_tensor, + shape_inference_single_tensor_dynamic, + shape_inference_single_tensor_dynamic_dims, + shape_inference_single_tensor_dynamic_height, + shape_inference_single_tensor_dynamic_type, + shape_inference_single_tensor_error_channels, + shape_inference_single_tensor_error_dims_5, + shape_inference_single_tensor_error_dims_3, + shape_inference_single_tensor_error_height, + shape_inference_single_tensor_error_width_odd, + shape_inference_single_tensor_error_i8, + shape_inference_2_plane_simple, + shape_inference_2_plane_dynamic, + shape_inference_2_plane_y_dynamic, + shape_inference_2_plane_uv_dynamic, + shape_inference_2_plane_dynamic_types, + shape_inference_2_plane_uv_type, + shape_inference_2_plane_error_type_mismatch, + shape_inference_2_plane_error_uv_type, + shape_inference_2_plane_error_5dims, + shape_inference_2_plane_error_3dims, + shape_inference_2_plane_error_batch, + shape_inference_2_plane_error_height, + shape_inference_2_plane_error_height_odd, + shape_inference_2_plane_error_width, + shape_inference_2_plane_error_width_odd, + shape_inference_2_plane_error_channels, + shape_inference_error_many_types +); diff --git a/ngraph/test/visitors/op/convert_color_nv12.cpp b/ngraph/test/visitors/op/convert_color_nv12.cpp new file mode 100644 index 00000000000000..f5952a224ff375 --- /dev/null +++ b/ngraph/test/visitors/op/convert_color_nv12.cpp @@ -0,0 +1,52 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/nv12_to_bgr.hpp" +#include "openvino/op/nv12_to_rgb.hpp" +#include "util/visitor.hpp" + +using namespace std; +using namespace ov; +using ngraph::test::NodeBuilder; +using ngraph::test::ValueMap; + +TEST(attributes, convert_color_nv12_rgb) { + NodeBuilder::get_ops().register_factory(); + auto data = make_shared(element::u8, Shape{3, 720, 640, 1}); + auto convert_color = make_shared(data); + NodeBuilder builder(convert_color); + const auto expected_attr_count = 0; + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); +} + +TEST(attributes, convert_color_nv12_bgr) { + NodeBuilder::get_ops().register_factory(); + auto data = make_shared(element::u8, Shape{3, 720, 640, 1}); + auto convert_color = make_shared(data); + NodeBuilder builder(convert_color); + const auto expected_attr_count = 0; + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); +} + +TEST(attributes, convert_color_nv12_rgb_2planes) { + NodeBuilder::get_ops().register_factory(); + auto data1 = make_shared(element::u8, Shape{3, 480, 640, 1}); + auto data2 = make_shared(element::u8, Shape{3, 240, 320, 2}); + auto convert_color = make_shared(data1, data2); + NodeBuilder builder(convert_color); + const auto expected_attr_count = 0; + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); +} + +TEST(attributes, convert_color_nv12_bgr_2planes) { + NodeBuilder::get_ops().register_factory(); + auto data1 = make_shared(element::u8, Shape{3, 480, 640, 1}); + auto data2 = make_shared(element::u8, Shape{3, 240, 320, 2}); + auto convert_color = make_shared(data1, data2); + NodeBuilder builder(convert_color); + const auto expected_attr_count = 0; + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); +}