diff --git a/docs/template_plugin/tests/functional/op_reference/convert_color_i420.cpp b/docs/template_plugin/tests/functional/op_reference/convert_color_i420.cpp new file mode 100644 index 00000000000000..1433656e2553da --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/convert_color_i420.cpp @@ -0,0 +1,171 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include + +#include "base_reference_test.hpp" +#include "functional_test_utils/skip_tests_config.hpp" + +using namespace ov; +using namespace InferenceEngine; +using namespace reference_tests; + +class ReferenceConvertColorI420LayerTest : public testing::Test, public CommonReferenceTest { +public: + void SetUp() override { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + abs_threshold = 1.f; // allow R, G, B absolute deviation to 1 (of max 255) + threshold = 1.f; // Ignore relative comparison (100%) + } + +public: + template + static std::shared_ptr CreateFunction(const Tensor& input) { + const auto in = std::make_shared(input.type, input.shape); + std::shared_ptr conv; + conv = std::make_shared(in); + auto res = std::make_shared(conv); + return std::make_shared(ResultVector{res}, ParameterVector {in}); + } + + template + static std::shared_ptr CreateFunction3(const Tensor& input1, const Tensor& input2, const Tensor& input3) { + const auto in1 = std::make_shared(input1.type, input1.shape); + const auto in2 = std::make_shared(input2.type, input2.shape); + const auto in3 = std::make_shared(input3.type, input3.shape); + std::shared_ptr conv; + conv = std::make_shared(in1, in2, in3); + auto res = std::make_shared(conv); + return std::make_shared(ResultVector{res}, ParameterVector {in1, in2, in3}); + } +}; + +TEST_F(ReferenceConvertColorI420LayerTest, CompareWithHardcodedRefs_r_u8_single_rgb) { + auto input = std::vector {0x51, 0x51, 0x51, 0x51, + 0x51, 0x51, 0x51, 0x51, + 0x5a, 0x5a, 0xf0, 0xf0}; + auto input_shape = Shape{1, 3, 4, 1}; + auto exp_out = std::vector {0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0, + 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0}; + auto out_shape = Shape{1, 2, 4, 3}; + Tensor inp_tensor(input_shape, element::u8, input); + inputData = {inp_tensor.data}; + function = CreateFunction(inp_tensor); + Tensor exp_tensor_u8(out_shape, element::u8, exp_out); + refOutData = {exp_tensor_u8.data}; + Exec(); +} + +TEST_F(ReferenceConvertColorI420LayerTest, CompareWithHardcodedRefs_color_u8_single_bgr) { + auto input = std::vector {0x51, 0xeb, 0x51, 0xeb, + 0x51, 0xeb, 0x51, 0xeb, + 0x6d, 0x6d, 0xb8, 0xb8}; + auto input_shape = Shape{1, 6, 2, 1}; + auto exp_out = std::vector {37, 37, 164, 217, 216, 255, 37, 37, 164, 217, 216, 255, + 37, 37, 164, 217, 216, 255, 37, 37, 164, 217, 216, 255}; + auto out_shape = Shape{1, 4, 2, 3}; + + Tensor inp_tensor(input_shape, element::u8, input); + inputData = {inp_tensor.data}; + + Tensor exp_tensor_u8(out_shape, element::u8, exp_out); + refOutData = {exp_tensor_u8.data}; + + function = CreateFunction(inp_tensor); + + Exec(); +} + +TEST_F(ReferenceConvertColorI420LayerTest, CompareWithHardcodedRefs_g_fp32_single_rgb) { + auto input = std::vector {145.f, 145.f, 145.f, 145.f, + 145.f, 145.f, 145.f, 145.f, + 54.f, 54.f, 34.f, 34.f}; + auto input_shape = Shape{1, 3, 4, 1}; + auto exp_out = std::vector {0, 255.f, 0, 0, 255.f, 0, 0, 255.f, 0, 0, 255.f, 0, + 0, 255.f, 0, 0, 255.f, 0, 0, 255.f, 0, 0, 255.f, 0}; + auto out_shape = Shape{1, 2, 4, 3}; + + Tensor inp_tensor(input_shape, element::f32, input); + inputData = {inp_tensor.data}; + + Tensor exp_tensor(out_shape, element::f32, exp_out); + refOutData = {exp_tensor.data}; + + function = CreateFunction(inp_tensor); + + Exec(); +} + +TEST_F(ReferenceConvertColorI420LayerTest, CompareWithHardcodedRefs_batch_fp32_three_bgr) { + auto input_y = std::vector {81.f, 81.f, 81.f, 81.f, + 145.f, 145.f, 145.f, 145.f, + 41.f, 41.f, 41.f, 41.f}; + auto input_shape_y = Shape{3, 2, 2, 1}; + + auto input_u = std::vector {90., + 54., + 240.}; + auto input_shape_u = Shape{3, 1, 1, 1}; + + auto input_v = std::vector {240., + 34., + 110.}; + auto input_shape_v = Shape{3, 1, 1, 1}; + auto exp_out = std::vector {0, 0, 255., 0, 0, 255., 0, 0, 255., 0, 0, 255., + 0, 255., 0, 0, 255., 0, 0, 255., 0, 0, 255., 0, + 255., 0, 0, 255., 0, 0, 255., 0, 0, 255., 0, 0}; + auto out_shape = Shape{3, 2, 2, 3}; + + Tensor inp_tensor_y(input_shape_y, element::f32, input_y); + Tensor inp_tensor_u(input_shape_u, element::f32, input_u); + Tensor inp_tensor_v(input_shape_v, element::f32, input_v); + inputData = {inp_tensor_y.data, inp_tensor_u.data, inp_tensor_v.data}; + + Tensor exp_tensor(out_shape, element::f32, exp_out); + refOutData = {exp_tensor.data}; + + function = CreateFunction3(inp_tensor_y, inp_tensor_u, inp_tensor_v); + + Exec(); +} + +TEST_F(ReferenceConvertColorI420LayerTest, CompareWithHardcodedRefs_color4x4_f32_three_rgb) { + auto input_y = std::vector {81, 235, + 81, 235, + 81, 81, + 81, 81, + 145, 145, + 145, 145, + 41, 41, + 41, 41}; + auto input_shape_y = Shape{1, 8, 2, 1}; + + auto input_u = std::vector {109, 90, 54, 240}; + auto input_shape_u = Shape{1, 4, 1, 1}; + auto input_v = std::vector {184, 240, 34, 110}; + auto input_shape_v = Shape{1, 4, 1, 1}; + + auto exp_out = std::vector {165, 37, 37, 255, 216, 217, 165, 37, 37, 255, 216, 217, + 255, 0, 0, 255, 0, 0, 255, 0, 0, 255, 0, 0, + 0, 255, 0, 0, 255, 0, 0, 255, 0, 0, 255, 0, + 0, 0, 255, 0, 0, 255, 0, 0, 255, 0, 0, 255}; + auto out_shape = Shape{1, 2, 2, 3}; + + Tensor inp_tensor_y(input_shape_y, element::f32, input_y); + Tensor inp_tensor_u(input_shape_u, element::f32, input_u); + Tensor inp_tensor_v(input_shape_v, element::f32, input_v); + inputData = {inp_tensor_y.data, inp_tensor_u.data, inp_tensor_v.data}; + + Tensor exp_tensor(out_shape, element::f32, exp_out); + refOutData = {exp_tensor.data}; + + function = CreateFunction3(inp_tensor_y, inp_tensor_u, inp_tensor_v); + + Exec(); +} diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/convert_color_i420.cpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/convert_color_i420.cpp new file mode 100644 index 00000000000000..c738515715c9bc --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/convert_color_i420.cpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_layer/convert_color_i420.hpp" + +using namespace LayerTestsDefinitions; + +namespace { + +TEST_P(ConvertColorI420LayerTest, Serialize) { + Serialize(); +} + +const std::vector inShapes_nhwc = { + {1, 10, 10, 1} +}; + +const std::vector inTypes = { + ov::element::u8, ov::element::f32 +}; + +const auto testCase_values = ::testing::Combine( + ::testing::ValuesIn(inShapes_nhwc), + ::testing::ValuesIn(inTypes), + ::testing::Bool(), + ::testing::Bool(), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, ConvertColorI420LayerTest, testCase_values, ConvertColorI420LayerTest::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/convert_color_i420.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/convert_color_i420.cpp new file mode 100644 index 00000000000000..2ef57e9a8d35dd --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/convert_color_i420.cpp @@ -0,0 +1,59 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "single_layer_tests/convert_color_i420.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace LayerTestsDefinitions; + +namespace { + +const std::vector inShapes_nhwc = { + {1, 10, 10, 1} +}; + +const std::vector inTypes = { + ov::element::u8, ov::element::f32 +}; + +const auto testCase_values = ::testing::Combine( + ::testing::ValuesIn(inShapes_nhwc), + ::testing::ValuesIn(inTypes), + ::testing::Bool(), + ::testing::Bool(), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + + +INSTANTIATE_TEST_SUITE_P(smoke_TestsConvertColorI420, ConvertColorI420LayerTest, testCase_values, ConvertColorI420LayerTest::getTestCaseName); + +const auto testCase_accuracy_values = ::testing::Combine( + ::testing::Values(ov::Shape{1, 16*6, 16, 1}), + ::testing::Values(ov::element::u8), + ::testing::Values(false), + ::testing::Values(true), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +INSTANTIATE_TEST_SUITE_P(smoke_TestsConvertColorI420_acc, + ConvertColorI420AccuracyTest, + testCase_accuracy_values, + ConvertColorI420LayerTest::getTestCaseName); + +const auto testCase_accuracy_values_nightly = ::testing::Combine( + ::testing::Values(ov::Shape{1, 256*256, 256, 1}), + ::testing::Values(ov::element::u8), + ::testing::Values(false), + ::testing::Values(true), + ::testing::Values(CommonTestUtils::DEVICE_CPU) +); + +INSTANTIATE_TEST_SUITE_P(nightly_TestsConvertColorI420_acc, + ConvertColorI420AccuracyTest, + testCase_accuracy_values_nightly, + ConvertColorI420LayerTest::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/convert_color_i420.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/convert_color_i420.hpp new file mode 100644 index 00000000000000..f365576121eaff --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/convert_color_i420.hpp @@ -0,0 +1,19 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_layer/convert_color_i420.hpp" + +namespace LayerTestsDefinitions { + +TEST_P(ConvertColorI420LayerTest, CompareWithRefs) { + Run(); +} + +TEST_P(ConvertColorI420AccuracyTest, CompareWithRefs) { + Run(); +} + +} // namespace LayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_i420.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_i420.hpp new file mode 100644 index 00000000000000..dd2b484752010d --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_i420.hpp @@ -0,0 +1,92 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +#include "shared_test_classes/base/layer_test_utils.hpp" +#include "ngraph_functions/builders.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" + +namespace LayerTestsDefinitions { + +using ConvertColorI420ParamsTuple = std::tuple< + ov::Shape, // Input Shape + ov::element::Type, // Element type + bool, // Conversion type + bool, // 1 or 3 planes + std::string>; // Device name + +class ConvertColorI420LayerTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + +protected: + void SetUp() override; +}; + +//---------------------------------------- + +class ConvertColorI420AccuracyTest : public ConvertColorI420LayerTest { +protected: + void GenerateInputs() override; // Generate predefined image with R/G/B combinations + void Validate() override; // Regular validate + percentage of acceptable deviations + std::vector>> CalculateRefs() override; + + std::vector GetOutputs() override; +private: + std::vector expected_output; + InferenceEngine::Blob::Ptr actual_output; +}; + +namespace I420TestUtils { + +template +inline void ValidateColors(const T* expected, const T* actual, size_t size, float dev_threshold, float abs_threshold = 0.01f) { + size_t mismatches = 0; + for (size_t i = 0; i < size; i++) { + if (std::abs(static_cast(expected[i]) - static_cast(actual[i])) > abs_threshold) { + mismatches++; + } + } + ASSERT_LT(static_cast(mismatches) / size, dev_threshold) << mismatches << + " out of " << size << " color mismatches found which exceeds allowed threshold " << dev_threshold; +} + +inline std::vector color_test_image(size_t height, size_t width, int b_step) { + // Test all possible r/g/b values within dimensions + int b_dim = 255 / b_step + 1; + auto input_yuv = std::vector(height * b_dim * width * 3 / 2); + for (int b = 0; b <= 255; b += b_step) { + for (size_t y = 0; y < height / 2; y++) { + for (size_t x = 0; x < width / 2; x++) { + int r = static_cast(y) * 512 / static_cast(height); + int g = static_cast(x) * 512 / static_cast(width); + // Can't use random y/u/v for testing as this can lead to invalid R/G/B values + int y_val = ((66 * r + 129 * g + 25 * b + 128) / 256) + 16; + int u_val = ((-38 * r - 74 * g + 112 * b + 128) / 256) + 128; + int v_val = ((112 * r - 94 * g + 18 * b + 128) / 256) + 128; + + size_t b_offset = height * width * b / b_step; + size_t u_index = b_offset + height * width + y * width / 2 + x * 2; + size_t v_index = u_index + height * width / 4; + input_yuv[u_index] = u_val; + input_yuv[v_index] = v_val; + size_t y_index = b_offset + y * 2 * width + x * 2; + input_yuv[y_index] = y_val; + input_yuv[y_index + 1] = y_val; + input_yuv[y_index + width] = y_val; + input_yuv[y_index + width + 1] = y_val; + } + } + } + return input_yuv; +} + +} // namespace I420TestUtils +} // namespace LayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/convert_color_i420.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/convert_color_i420.cpp new file mode 100644 index 00000000000000..ee7da8b057808c --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/convert_color_i420.cpp @@ -0,0 +1,124 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_layer/convert_color_i420.hpp" +#include "openvino/op/i420_to_rgb.hpp" +#include "openvino/op/i420_to_bgr.hpp" + +namespace LayerTestsDefinitions { + +std::string ConvertColorI420LayerTest::getTestCaseName(const testing::TestParamInfo &obj) { + ov::Shape inputShape; + ov::element::Type type; + bool conversion, singlePlane; + std::string targetName; + std::tie(inputShape, type, conversion, singlePlane, targetName) = obj.param; + std::ostringstream result; + result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_"; + result << "netPRC=" << type.c_type_string() << "_"; + result << "convRGB=" << conversion << "_"; + result << "singlePlane=" << singlePlane << "_"; + result << "targetDevice=" << targetName; + return result.str(); +} + +void ConvertColorI420LayerTest::SetUp() { + ov::Shape inputShape; + ov::element::Type ngPrc; + bool conversionToRGB, singlePlane; + abs_threshold = 1.0f; // I420 conversion can use various algorithms, thus some absolute deviation is allowed + threshold = 1.f; // Ignore relative comparison for I420 convert (allow 100% relative deviation) + std::tie(inputShape, ngPrc, conversionToRGB, singlePlane, targetDevice) = GetParam(); + if (singlePlane) { + inputShape[1] = inputShape[1] * 3 / 2; + auto param = std::make_shared(ngPrc, inputShape); + std::shared_ptr convert_color; + if (conversionToRGB) { + convert_color = std::make_shared(param); + } else { + convert_color = std::make_shared(param); + } + function = std::make_shared(std::make_shared(convert_color), + ov::ParameterVector{param}, "ConvertColorI420"); + } else { + auto uvShape = ov::Shape{inputShape[0], inputShape[1] / 2, inputShape[2] / 2, 1}; + auto param_y = std::make_shared(ngPrc, inputShape); + auto param_u = std::make_shared(ngPrc, uvShape); + auto param_v = std::make_shared(ngPrc, uvShape); + std::shared_ptr convert_color; + if (conversionToRGB) { + convert_color = std::make_shared(param_y, param_u, param_v); + } else { + convert_color = std::make_shared(param_y, param_u, param_v); + } + function = std::make_shared(std::make_shared(convert_color), + ov::ParameterVector{param_y, param_u, param_v}, + "ConvertColorI420"); + } +} + +// -------- Accuracy test (R/G/B combinations) -------- + +void ConvertColorI420AccuracyTest::GenerateInputs() { + inputs.clear(); + const auto& inputsInfo = executableNetwork.GetInputsInfo(); + const auto& functionParams = function->get_parameters(); + for (const auto& param : functionParams) { + const auto infoIt = inputsInfo.find(param->get_friendly_name()); + GTEST_ASSERT_NE(infoIt, inputsInfo.cend()); + InferenceEngine::InputInfo::CPtr info = infoIt->second; + InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info->getTensorDesc()); + blob->allocate(); + size_t full_height = param->get_shape()[1]; + size_t full_width = param->get_shape()[2]; + int b_dim = static_cast(full_height * 2 / (3 * full_width)); + ASSERT_GT(b_dim, 1) << "Image height is invalid for I420 Accuracy test"; + ASSERT_EQ(255 % (b_dim - 1), 0) << "Image height is invalid for I420 Accuracy test"; + int b_step = 255 / (b_dim - 1); + auto input_image = I420TestUtils::color_test_image(full_width, full_width, b_step); + auto* rawBlobDataPtr = blob->buffer().as(); + for (size_t j = 0; j < input_image.size(); ++j) { + rawBlobDataPtr[j] = input_image[j]; + } + + inputs.push_back(blob); + } +} + +void ConvertColorI420AccuracyTest::Validate() { + ConvertColorI420LayerTest::Validate(); + + ASSERT_FALSE(expected_output.empty()); + ASSERT_TRUE(actual_output); + auto memory = InferenceEngine::as(actual_output); + const auto lockedMemory = memory->wmap(); + const auto* actualBuffer = lockedMemory.as(); + + // Allow less than 2% of deviations with 1 color step. 2% is experimental value + // For different calculation methods - 1.4% deviation is observed + I420TestUtils::ValidateColors(expected_output.data(), actualBuffer, expected_output.size(), 0.02); +} + +std::vector>> ConvertColorI420AccuracyTest::CalculateRefs() { + auto refs = ConvertColorI420LayerTest::CalculateRefs(); + if (!refs.empty()) { + auto out = refs[0].second; + expected_output.reserve(out.size()); + for (auto val : out) { + expected_output.push_back(val); + } + } + return refs; +} + +std::vector ConvertColorI420AccuracyTest::GetOutputs() { + auto outputs = ConvertColorI420LayerTest::GetOutputs(); + if (!outputs.empty()) { + actual_output = InferenceEngine::Blob::Ptr(outputs[0]); + } + return outputs; +} + + +} // namespace LayerTestsDefinitions \ No newline at end of file diff --git a/ngraph/core/include/ngraph/op/i420_to_bgr.hpp b/ngraph/core/include/ngraph/op/i420_to_bgr.hpp new file mode 100644 index 00000000000000..617a1f04cca937 --- /dev/null +++ b/ngraph/core/include/ngraph/op/i420_to_bgr.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/i420_to_bgr.hpp" + +namespace ngraph { +namespace op { +namespace v8 { +using ov::op::v8::I420toBGR; +} // namespace v8 +} // namespace op +} // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/i420_to_rgb.hpp b/ngraph/core/include/ngraph/op/i420_to_rgb.hpp new file mode 100644 index 00000000000000..b8cde10c1b5aa5 --- /dev/null +++ b/ngraph/core/include/ngraph/op/i420_to_rgb.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/i420_to_rgb.hpp" + +namespace ngraph { +namespace op { +namespace v8 { +using ov::op::v8::I420toRGB; +} // namespace v8 +} // namespace op +} // namespace ngraph diff --git a/ngraph/core/include/ngraph/ops.hpp b/ngraph/core/include/ngraph/ops.hpp index ff07083328adbd..30cd22de2776c6 100644 --- a/ngraph/core/include/ngraph/ops.hpp +++ b/ngraph/core/include/ngraph/ops.hpp @@ -74,6 +74,8 @@ #include "ngraph/op/hard_sigmoid.hpp" #include "ngraph/op/hsigmoid.hpp" #include "ngraph/op/hswish.hpp" +#include "ngraph/op/i420_to_bgr.hpp" +#include "ngraph/op/i420_to_rgb.hpp" #include "ngraph/op/idft.hpp" #include "ngraph/op/if.hpp" #include "ngraph/op/interpolate.hpp" diff --git a/ngraph/core/include/openvino/op/i420_to_bgr.hpp b/ngraph/core/include/openvino/op/i420_to_bgr.hpp new file mode 100644 index 00000000000000..59f1e661fd2373 --- /dev/null +++ b/ngraph/core/include/openvino/op/i420_to_bgr.hpp @@ -0,0 +1,65 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/convert_color_i420_base.hpp" + +namespace ov { +namespace op { +namespace v8 { +/// \brief Color conversion operation from I420 to BGR format. +/// Input: +/// - Input NV12 image can be represented in two ways: +/// a) Single plane (as it is in the file): I420 height dimension is 1.5x bigger than image height. 'C' +/// dimension shall be 1. +/// b) Three separate planes (used this way in many physical video sources): Y, U and V. In +/// this case +/// b1) Y plane has height same as image height. 'C' dimension equals to 1 +/// b2) U plane has dimensions: 'H' = image_h / 2; 'W' = image_w / 2; 'C' = 1. +/// b3) V plane has dimensions: 'H' = image_h / 2; 'W' = image_w / 2; 'C' = 1. +/// - Supported element types: u8 or any supported floating-point type. +/// Output: +/// - Output node will have NHWC layout and shape HxW same as image spatial dimensions. +/// - Number of output channels 'C' will be 3, as per interleaved BGR format, first channel is B, last is R +/// +/// \details Conversion of each pixel from I420 (YUV) to RGB space is represented by following formulas: +/// R = 1.164 * (Y - 16) + 1.596 * (V - 128) +/// G = 1.164 * (Y - 16) - 0.813 * (V - 128) - 0.391 * (U - 128) +/// B = 1.164 * (Y - 16) + 2.018 * (U - 128) +/// Then R, G, B values are clipped to range (0, 255) +/// +class OPENVINO_API I420toBGR : public util::ConvertColorI420Base { +public: + OPENVINO_OP("I420toBGR", "opset8", util::ConvertColorI420Base); + + I420toBGR() = default; + + /// \brief Constructs a conversion operation from input image in I420 format + /// As per I420 format definition, node height dimension shall be 1.5 times bigger than image height + /// so that image (w=640, h=480) is represented by NHWC shape {N,720,640,1} (height*1.5 x width) + /// + /// \param arg Node that produces the input tensor. Input tensor represents image in NV12 format (YUV). + explicit I420toBGR(const Output& arg); + + /// \brief Constructs a conversion operation from 2-plane input image in NV12 format + /// In general case Y channel of image can be separated from UV channel which means that operation needs two nodes + /// for Y and UV planes respectively. Y plane has one channel, and UV has 2 channels, both expect 'NHWC' layout + /// + /// \param arg_y Node that produces the input tensor for Y plane (NHWC layout). Shall have WxH dimensions + /// equal to image dimensions. 'C' dimension equals to 1. + /// + /// \param arg_u Node that produces the input tensor for U plane (NHWC layout). 'H' is half of image height, + /// 'W' is half of image width, 'C' dimension equals to 1. + /// + /// \param arg_v Node that produces the input tensor for V plane (NHWC layout). 'H' is half of image height, + /// 'W' is half of image width, 'C' dimension equals to 1. + /// + explicit I420toBGR(const Output& arg_y, const Output& arg_u, const Output& arg_v); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/i420_to_rgb.hpp b/ngraph/core/include/openvino/op/i420_to_rgb.hpp new file mode 100644 index 00000000000000..c95f5de6b5c180 --- /dev/null +++ b/ngraph/core/include/openvino/op/i420_to_rgb.hpp @@ -0,0 +1,65 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/convert_color_i420_base.hpp" + +namespace ov { +namespace op { +namespace v8 { +/// \brief Color conversion operation from I420 to RGB format. +/// Input: +/// - Input NV12 image can be represented in two ways: +/// a) Single plane (as it is in the file): I420 height dimension is 1.5x bigger than image height. 'C' +/// dimension shall be 1. +/// b) Three separate planes (used this way in many physical video sources): Y, U and V. In +/// this case +/// b1) Y plane has height same as image height. 'C' dimension equals to 1 +/// b2) U plane has dimensions: 'H' = image_h / 2; 'W' = image_w / 2; 'C' = 1. +/// b3) V plane has dimensions: 'H' = image_h / 2; 'W' = image_w / 2; 'C' = 1. +/// - Supported element types: u8 or any supported floating-point type. +/// Output: +/// - Output node will have NHWC layout and shape HxW same as image spatial dimensions. +/// - Number of output channels 'C' will be 3, as per interleaved RGB format, first channel is R, last is B +/// +/// \details Conversion of each pixel from I420 (YUV) to RGB space is represented by following formulas: +/// R = 1.164 * (Y - 16) + 1.596 * (V - 128) +/// G = 1.164 * (Y - 16) - 0.813 * (V - 128) - 0.391 * (U - 128) +/// B = 1.164 * (Y - 16) + 2.018 * (U - 128) +/// Then R, G, B values are clipped to range (0, 255) +/// +class OPENVINO_API I420toRGB : public util::ConvertColorI420Base { +public: + OPENVINO_OP("I420toRGB", "opset8", util::ConvertColorI420Base); + + I420toRGB() = default; + + /// \brief Constructs a conversion operation from input image in I420 format + /// As per I420 format definition, node height dimension shall be 1.5 times bigger than image height + /// so that image (w=640, h=480) is represented by NHWC shape {N,720,640,1} (height*1.5 x width) + /// + /// \param arg Node that produces the input tensor. Input tensor represents image in NV12 format (YUV). + explicit I420toRGB(const Output& arg); + + /// \brief Constructs a conversion operation from 2-plane input image in NV12 format + /// In general case Y channel of image can be separated from UV channel which means that operation needs two nodes + /// for Y and UV planes respectively. Y plane has one channel, and UV has 2 channels, both expect 'NHWC' layout + /// + /// \param arg_y Node that produces the input tensor for Y plane (NHWC layout). Shall have WxH dimensions + /// equal to image dimensions. 'C' dimension equals to 1. + /// + /// \param arg_u Node that produces the input tensor for U plane (NHWC layout). 'H' is half of image height, + /// 'W' is half of image width, 'C' dimension equals to 1. + /// + /// \param arg_v Node that produces the input tensor for V plane (NHWC layout). 'H' is half of image height, + /// 'W' is half of image width, 'C' dimension equals to 1. + /// + explicit I420toRGB(const Output& arg_y, const Output& arg_u, const Output& arg_v); + + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; +}; +} // namespace v8 +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/op/ops.hpp b/ngraph/core/include/openvino/op/ops.hpp index 350ed894022213..eb8ab178ee23c2 100644 --- a/ngraph/core/include/openvino/op/ops.hpp +++ b/ngraph/core/include/openvino/op/ops.hpp @@ -73,6 +73,8 @@ #include "openvino/op/hard_sigmoid.hpp" #include "openvino/op/hsigmoid.hpp" #include "openvino/op/hswish.hpp" +#include "openvino/op/i420_to_bgr.hpp" +#include "openvino/op/i420_to_rgb.hpp" #include "openvino/op/idft.hpp" #include "openvino/op/if.hpp" #include "openvino/op/interpolate.hpp" diff --git a/ngraph/core/include/openvino/op/util/convert_color_i420_base.hpp b/ngraph/core/include/openvino/op/util/convert_color_i420_base.hpp new file mode 100644 index 00000000000000..324be5260caddd --- /dev/null +++ b/ngraph/core/include/openvino/op/util/convert_color_i420_base.hpp @@ -0,0 +1,89 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace util { +/// \brief Base class for color conversion operation from I420 to RGB/BGR format. +/// Input: +/// - Operation expects input shape in NHWC layout. +/// - Input NV12 image can be represented in a two ways: +/// a) Single plane: NV12 height dimension is 1.5x bigger than image height. 'C' dimension shall be 1 +/// b) Three separate planes: Y, U and V. In this case +/// b1) Y plane has height same as image height. 'C' dimension equals to 1 +/// b2) U plane has dimensions: 'H' = image_h / 2; 'W' = image_w / 2; 'C' = 1. +/// b3) V plane has dimensions: 'H' = image_h / 2; 'W' = image_w / 2; 'C' = 1. +/// - Supported element types: u8 or any supported floating-point type. +/// Output: +/// - Output node will have NHWC layout and shape HxW same as image spatial dimensions. +/// - Number of output channels 'C' will be 3 +/// +/// \details Conversion of each pixel from I420 (YUV) to RGB space is represented by following formulas: +/// R = 1.164 * (Y - 16) + 1.596 * (V - 128) +/// G = 1.164 * (Y - 16) - 0.813 * (V - 128) - 0.391 * (U - 128) +/// B = 1.164 * (Y - 16) + 2.018 * (U - 128) +/// Then R, G, B values are clipped to range (0, 255) +/// +class OPENVINO_API ConvertColorI420Base : public Op { +public: + /// \brief Exact conversion format details + /// Currently supports conversion from I420 to RGB or BGR + enum class ColorConversion : int { I420_TO_RGB = 0, I420_TO_BGR = 1 }; + +protected: + ConvertColorI420Base() = default; + + /// \brief Constructs a conversion operation from input image in NV12 format + /// As per I420 format definition, node height dimension shall be 1.5 times bigger than image height + /// so that image (w=640, h=480) is represented by NHWC shape {N,720,640,1} (height*1.5 x width) + /// + /// \param arg Node that produces the input tensor. Input tensor represents image in I420 format (YUV). + /// \param format Conversion format. + explicit ConvertColorI420Base(const Output& arg, ColorConversion format); + + /// \brief Constructs a conversion operation from 3-plane input image in I420 format + /// In general case Y, U and V channels of image can be separated which means that operation needs three nodes + /// for Y, U and V planes respectively. All planes will have 1 channel and expect 'NHWC' layout + /// + /// \param arg_y Node that produces the input tensor for Y plane (NHWC layout). Shall have WxH dimensions + /// equal to image dimensions. 'C' dimension equals to 1. + /// + /// \param arg_u Node that produces the input tensor for U plane (NHWC layout). 'H' is half of image height, + /// 'W' is half of image width, 'C' dimension equals to 1. + /// + /// \param arg_v Node that produces the input tensor for V plane (NHWC layout). 'H' is half of image height, + /// 'W' is half of image width, 'C' dimension equals to 1. + /// + /// \param format Conversion format. + ConvertColorI420Base(const Output& arg_y, + const Output& arg_u, + const Output& arg_v, + ColorConversion format); + +public: + OPENVINO_OP("ConvertColorI420Base", "util"); + + void validate_and_infer_types() override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + OPENVINO_SUPPRESS_DEPRECATED_START + bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + OPENVINO_SUPPRESS_DEPRECATED_END + + bool has_evaluate() const override; + +protected: + bool is_type_supported(const ov::element::Type& type) const; + + ColorConversion m_format = ColorConversion::I420_TO_RGB; +}; +} // namespace util +} // namespace op +} // namespace ov diff --git a/ngraph/core/include/openvino/opsets/opset8_tbl.hpp b/ngraph/core/include/openvino/opsets/opset8_tbl.hpp index e3e9c0a5f8383e..14f9dbc8d4691b 100644 --- a/ngraph/core/include/openvino/opsets/opset8_tbl.hpp +++ b/ngraph/core/include/openvino/opsets/opset8_tbl.hpp @@ -178,6 +178,8 @@ _OPENVINO_OP_REG(GatherND, ov::op::v8) _OPENVINO_OP_REG(AdaptiveAvgPool, ov::op::v8) _OPENVINO_OP_REG(AdaptiveMaxPool, ov::op::v8) _OPENVINO_OP_REG(DeformableConvolution, ov::op::v8) +_OPENVINO_OP_REG(I420toBGR, ov::op::v8) +_OPENVINO_OP_REG(I420toRGB, ov::op::v8) _OPENVINO_OP_REG(MatrixNms, ov::op::v8) _OPENVINO_OP_REG(MaxPool, ov::op::v8) _OPENVINO_OP_REG(MulticlassNms, ov::op::v8) diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/convert_color_nv12.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/convert_color_nv12.hpp index f863c4a686956b..a40eb0dbc3ca11 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/convert_color_nv12.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/convert_color_nv12.hpp @@ -7,11 +7,31 @@ #include #include +#include "openvino/op/util/convert_color_i420_base.hpp" #include "openvino/op/util/convert_color_nv12_base.hpp" namespace ngraph { namespace runtime { namespace reference { + +template +std::tuple yuv_pixel_to_rgb(float y_val, float u_val, float v_val) { + auto c = y_val - 16.f; + auto d = u_val - 128.f; + auto e = v_val - 128.f; + auto clip = [](float a) -> T { + if (std::is_integral()) { + return static_cast(std::min(std::max(std::round(a), 0.f), 255.f)); + } else { + return static_cast(std::min(std::max(a, 0.f), 255.f)); + } + }; + auto b = clip(1.164f * c + 2.018f * d); + auto g = clip(1.164f * c - 0.391f * d - 0.813f * e); + auto r = clip(1.164f * c + 1.596f * e); + return std::tuple{r, g, b}; +} + template void color_convert_nv12(const T* arg_y, const T* arg_uv, @@ -33,19 +53,8 @@ void color_convert_nv12(const T* arg_y, auto uv_index = (h / 2) * image_w + (w / 2) * 2; auto u_val = static_cast(uv_ptr[uv_index]); auto v_val = static_cast(uv_ptr[uv_index + 1]); - auto c = y_val - 16.f; - auto d = u_val - 128.f; - auto e = v_val - 128.f; - auto clip = [](float a) -> T { - if (std::is_integral()) { - return static_cast(std::min(std::max(std::round(a), 0.f), 255.f)); - } else { - return static_cast(std::min(std::max(a, 0.f), 255.f)); - } - }; - auto b = clip(1.164f * c + 2.018f * d); - auto g = clip(1.164f * c - 0.391f * d - 0.813f * e); - auto r = clip(1.164f * c + 1.596f * e); + T r, g, b; + std::tie(r, g, b) = yuv_pixel_to_rgb(y_val, u_val, v_val); if (color_format == ov::op::util::ConvertColorNV12Base::ColorConversion::NV12_TO_RGB) { out[y_index * 3] = r; out[y_index * 3 + 1] = g; @@ -60,6 +69,45 @@ void color_convert_nv12(const T* arg_y, } } +template +void color_convert_i420(const T* arg_y, + const T* arg_u, + const T* arg_v, + T* out_ptr, + size_t batch_size, + size_t image_h, + size_t image_w, + size_t stride_y, + size_t stride_uv, + ov::op::util::ConvertColorI420Base::ColorConversion color_format) { + for (int batch = 0; batch < batch_size; batch++) { + T* out = out_ptr + batch * image_w * image_h * 3; + auto y_ptr = arg_y + batch * stride_y; + auto u_ptr = arg_u + batch * stride_uv; + auto v_ptr = arg_v + batch * stride_uv; + for (int h = 0; h < image_h; h++) { + for (int w = 0; w < image_w; w++) { + auto y_index = h * image_w + w; + auto y_val = static_cast(y_ptr[y_index]); + auto uv_index = (h / 2) * (image_w / 2) + (w / 2); + auto u_val = static_cast(u_ptr[uv_index]); + auto v_val = static_cast(v_ptr[uv_index]); + T r, g, b; + std::tie(r, g, b) = yuv_pixel_to_rgb(y_val, u_val, v_val); + if (color_format == ov::op::util::ConvertColorI420Base::ColorConversion::I420_TO_RGB) { + out[y_index * 3] = r; + out[y_index * 3 + 1] = g; + out[y_index * 3 + 2] = b; + } else if (color_format == ov::op::util::ConvertColorI420Base::ColorConversion::I420_TO_BGR) { + out[y_index * 3] = b; + out[y_index * 3 + 1] = g; + out[y_index * 3 + 2] = r; + } + } + } + } +} + } // namespace reference } // namespace runtime } // namespace ngraph diff --git a/ngraph/core/src/op/i420_to_bgr.cpp b/ngraph/core/src/op/i420_to_bgr.cpp new file mode 100644 index 00000000000000..b6eac7b3c35a0f --- /dev/null +++ b/ngraph/core/src/op/i420_to_bgr.cpp @@ -0,0 +1,27 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/i420_to_bgr.hpp" + +#include "itt.hpp" + +ov::op::v8::I420toBGR::I420toBGR(const Output& arg) + : util::ConvertColorI420Base(arg, util::ConvertColorI420Base::ColorConversion::I420_TO_BGR) { + constructor_validate_and_infer_types(); +} + +ov::op::v8::I420toBGR::I420toBGR(const Output& arg_y, const Output& arg_u, const Output& arg_v) + : util::ConvertColorI420Base(arg_y, arg_u, arg_v, util::ConvertColorI420Base::ColorConversion::I420_TO_BGR) { + constructor_validate_and_infer_types(); +} + +std::shared_ptr ov::op::v8::I420toBGR::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_I420toBGR_clone_with_new_inputs); + OPENVINO_ASSERT(new_args.size() == 1 || new_args.size() == 3, "I420toBGR shall have one or three input nodes"); + if (new_args.size() == 1) { + return std::make_shared(new_args.at(0)); + } else { + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); + } +} diff --git a/ngraph/core/src/op/i420_to_rgb.cpp b/ngraph/core/src/op/i420_to_rgb.cpp new file mode 100644 index 00000000000000..9375c6eb921580 --- /dev/null +++ b/ngraph/core/src/op/i420_to_rgb.cpp @@ -0,0 +1,27 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/i420_to_rgb.hpp" + +#include "itt.hpp" + +ov::op::v8::I420toRGB::I420toRGB(const Output& arg) + : util::ConvertColorI420Base(arg, util::ConvertColorI420Base::ColorConversion::I420_TO_RGB) { + constructor_validate_and_infer_types(); +} + +ov::op::v8::I420toRGB::I420toRGB(const Output& arg_y, const Output& arg_u, const Output& arg_v) + : util::ConvertColorI420Base(arg_y, arg_u, arg_v, util::ConvertColorI420Base::ColorConversion::I420_TO_RGB) { + constructor_validate_and_infer_types(); +} + +std::shared_ptr ov::op::v8::I420toRGB::clone_with_new_inputs(const OutputVector& new_args) const { + NGRAPH_OP_SCOPE(v0_NV12toRGB_clone_with_new_inputs); + OPENVINO_ASSERT(new_args.size() == 1 || new_args.size() == 3, "I420toRGB shall have one or three input nodes"); + if (new_args.size() == 1) { + return std::make_shared(new_args.at(0)); + } else { + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); + } +} diff --git a/ngraph/core/src/op/util/convert_color_i420_base.cpp b/ngraph/core/src/op/util/convert_color_i420_base.cpp new file mode 100644 index 00000000000000..7382439cf64724 --- /dev/null +++ b/ngraph/core/src/op/util/convert_color_i420_base.cpp @@ -0,0 +1,216 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/util/convert_color_i420_base.hpp" + +#include + +#include "itt.hpp" +#include "ngraph/runtime/reference/convert_color_nv12.hpp" +#include "openvino/core/layout.hpp" + +namespace i420_op { +static const size_t N_DIM = 0; +static const size_t H_DIM = 1; +static const size_t W_DIM = 2; +static const size_t C_DIM = 3; +} // namespace i420_op + +ov::op::util::ConvertColorI420Base::ConvertColorI420Base(const Output& arg, ColorConversion format) + : Op({arg}), + m_format(format) {} + +ov::op::util::ConvertColorI420Base::ConvertColorI420Base(const Output& arg_y, + const Output& arg_u, + const Output& arg_v, + ColorConversion format) + : Op({arg_y, arg_u, arg_v}), + m_format(format) { + constructor_validate_and_infer_types(); +} + +void ov::op::util::ConvertColorI420Base::validate_and_infer_types() { + NGRAPH_OP_SCOPE(v8_Convert_I420_Base_validate_and_infer_types); + + NODE_VALIDATION_CHECK(this, + get_input_size() == 1 || get_input_size() == 3, + "I420 conversion shall have one or 3 inputs, but it is ", + get_input_size()); + auto single_plane = get_input_size() == 1; + auto y_type = get_input_element_type(0); + const auto& shape_y = get_input_partial_shape(0); + const auto one_channel_nhwc_shape = + PartialShape({Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), 1}); + NODE_VALIDATION_CHECK(this, + shape_y.compatible(one_channel_nhwc_shape), + "Y input shall have 4 dimensions (N, H, W, C) with channels dimension equal to 1"); + auto out_shape = shape_y; + auto out_type = y_type; + if (out_shape.rank().is_dynamic()) { + out_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), 3}; + } + out_shape[i420_op::C_DIM] = 3; // 3 is number of channels (R, G, B) + if (single_plane) { + if (shape_y.rank().is_static() && shape_y[i420_op::H_DIM].is_static()) { + NODE_VALIDATION_CHECK(this, + shape_y[i420_op::H_DIM].get_length() % 3 == 0, + "I420 image height shall be divisible by 3, but it is ", + shape_y[i420_op::H_DIM].get_length()); + // E.g. if input shape height is 720 for I420, then real image height is 720 * 2 / 3 = 480 + out_shape[i420_op::H_DIM] = shape_y[i420_op::H_DIM].get_length() * 2 / 3; + } + } else { + auto u_type = get_input_element_type(1); + auto v_type = get_input_element_type(2); + NODE_VALIDATION_CHECK(this, + ov::element::Type::merge(out_type, out_type, u_type), + "Y, U, V inputs shall have compatible types, got ", + y_type, + u_type, + v_type); + NODE_VALIDATION_CHECK(this, + ov::element::Type::merge(out_type, out_type, v_type), + "Y, U, V inputs shall have compatible types, got ", + y_type, + u_type, + v_type); + // Validate Y/U/V shapes compatibility + const auto& shape_u = get_input_partial_shape(1); + NODE_VALIDATION_CHECK(this, + shape_u.compatible(one_channel_nhwc_shape), + "U input shall have 4 dimensions (N, H, W, C) with channels dimension equal to 1, got ", + shape_u); + const auto& shape_v = get_input_partial_shape(2); + NODE_VALIDATION_CHECK(this, + shape_v.compatible(one_channel_nhwc_shape), + "V input shall have 4 dimensions (N, H, W, C) with channels dimension equal to 1, got ", + shape_v); + NODE_VALIDATION_CHECK(this, + shape_u.compatible(shape_v), + "U shape shall be compatible with V shape: ", + shape_u, + shape_v); + auto shape_uv = shape_u; + PartialShape::merge_into(shape_uv, shape_v); + if (shape_uv.rank().is_static()) { + if (!shape_uv[i420_op::H_DIM].is_dynamic()) { + shape_uv[i420_op::H_DIM] *= 2; + } + if (!shape_uv[i420_op::W_DIM].is_dynamic()) { + shape_uv[i420_op::W_DIM] *= 2; + } + } + NODE_VALIDATION_CHECK(this, + shape_y.compatible(shape_uv), + "Y shape is inconsistent with U and V shapes: ", + shape_y, + shape_u, + shape_v); + PartialShape::merge_into(out_shape, shape_uv); + } + NODE_VALIDATION_CHECK(this, + out_shape[i420_op::H_DIM].is_dynamic() || out_shape[i420_op::H_DIM].get_length() % 2 == 0, + "Image height must be even, but it is ", + out_shape[i420_op::H_DIM].get_length()); + NODE_VALIDATION_CHECK(this, + out_shape[i420_op::W_DIM].is_dynamic() || out_shape[i420_op::W_DIM].get_length() % 2 == 0, + "Image width must be even, but it is ", + out_shape[i420_op::W_DIM].get_length()); + NODE_VALIDATION_CHECK(this, + is_type_supported(out_type), + "Input type shall have u8 or floating-point precision, got ", + out_type); + set_output_type(0, out_type, out_shape); +} + +namespace i420_op { +namespace { + +template +inline bool evaluate(const ov::HostTensorVector& input_values, + const ov::HostTensorPtr& output_value, + bool single_tensor, + ov::op::util::ConvertColorI420Base::ColorConversion color_format) { + using namespace ov::op::util; + const auto& y_tensor = input_values[0]; + auto batch_size = y_tensor->get_shape()[N_DIM]; + auto image_w = y_tensor->get_shape()[W_DIM]; + auto image_h = y_tensor->get_shape()[H_DIM]; + if (single_tensor) { + OPENVINO_ASSERT(ngraph::validate_host_tensor_vector(input_values, 1)); + image_h = image_h * 2 / 3; + } else { + OPENVINO_ASSERT(ngraph::validate_host_tensor_vector(input_values, 3)); + } + output_value->set_shape({batch_size, image_h, image_w, 3}); // 3 is RGB + if (single_tensor) { + ngraph::runtime::reference::color_convert_i420(y_tensor->get_data_ptr(), + y_tensor->get_data_ptr() + image_w * image_h, + y_tensor->get_data_ptr() + 5 * image_w * image_h / 4, + output_value->get_data_ptr(), + batch_size, + image_h, + image_w, + image_w * image_h * 3 / 2, + image_w * image_h * 3 / 2, + color_format); + } else { + const auto& u_tensor = input_values[1]; + const auto& v_tensor = input_values[2]; + ngraph::runtime::reference::color_convert_i420(y_tensor->get_data_ptr(), + u_tensor->get_data_ptr(), + v_tensor->get_data_ptr(), + output_value->get_data_ptr(), + batch_size, + image_h, + image_w, + image_w * image_h, + image_w * image_h / 4, + color_format); + } + return true; +} + +bool evaluate_i420_convert(const ov::HostTensorVector& input_values, + const ov::HostTensorPtr& output_value, + bool single_tensor, + ov::op::util::ConvertColorI420Base::ColorConversion conv_format) { + bool rc = false; + switch (input_values[0]->get_element_type()) { + NGRAPH_TYPE_CASE(evaluate_i420_convert, u8, input_values, output_value, single_tensor, conv_format); + NGRAPH_TYPE_CASE(evaluate_i420_convert, f32, input_values, output_value, single_tensor, conv_format); + default: + break; + } + return rc; +} + +} // namespace +} // namespace i420_op + +bool ov::op::util::ConvertColorI420Base::visit_attributes(AttributeVisitor& visitor) { + return true; +} + +bool ov::op::util::ConvertColorI420Base::evaluate(const HostTensorVector& output_values, + const HostTensorVector& input_values) const { + NGRAPH_OP_SCOPE(v0_ConvertColorI420_evaluate); + OPENVINO_ASSERT(ngraph::validate_host_tensor_vector(output_values, 1)); + NODE_VALIDATION_CHECK(this, + get_input_size() == 1 || get_input_size() == 3, + "I420 conversion shall have one or 3 inputs, but it is ", + get_input_size()); + auto single_plane = get_input_size() == 1; + return i420_op::evaluate_i420_convert(input_values, output_values[0], single_plane, m_format); +} + +bool ov::op::util::ConvertColorI420Base::has_evaluate() const { + NGRAPH_OP_SCOPE(v0_ConvertColorI420Base_has_evaluate); + + return is_type_supported(get_input_element_type(0)); +} + +bool ov::op::util::ConvertColorI420Base::is_type_supported(const ov::element::Type& type) const { + return type.is_dynamic() || type.is_real() || type == ov::element::u8; +} diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index edc3eaef0383e0..144e00229164b2 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -124,6 +124,7 @@ set(SRC type_prop/concat.cpp type_prop/constant.cpp type_prop/convert.cpp + type_prop/convert_color_i420.cpp type_prop/convert_color_nv12.cpp type_prop/convolution.cpp type_prop/convolution_backprop_data.cpp @@ -281,6 +282,7 @@ set(SRC visitors/op/clamp.cpp visitors/op/constant.cpp visitors/op/convert.cpp + visitors/op/convert_color_i420.cpp visitors/op/convert_color_nv12.cpp visitors/op/convolution_backprop.cpp visitors/op/convolution.cpp diff --git a/ngraph/test/opset.cpp b/ngraph/test/opset.cpp index a4b16ac49aef87..15752fd6be65c6 100644 --- a/ngraph/test/opset.cpp +++ b/ngraph/test/opset.cpp @@ -141,7 +141,7 @@ TEST(opset, opset8_dump) { std::cout << t.name << " "; } std::cout << std::endl; - ASSERT_EQ(165, opset.get_types_info().size()); + ASSERT_EQ(167, opset.get_types_info().size()); } class MyOpOld : public ov::op::Op { diff --git a/ngraph/test/type_prop/convert_color_i420.cpp b/ngraph/test/type_prop/convert_color_i420.cpp new file mode 100644 index 00000000000000..81c937ac3e4ab1 --- /dev/null +++ b/ngraph/test/type_prop/convert_color_i420.cpp @@ -0,0 +1,9 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "convert_color_i420_base.hpp" + +INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_i420_to_rgb, ConvertI420BaseTest, ::testing::Types); + +INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_i420_to_bgr, ConvertI420BaseTest, ::testing::Types); diff --git a/ngraph/test/type_prop/convert_color_i420_base.hpp b/ngraph/test/type_prop/convert_color_i420_base.hpp new file mode 100644 index 00000000000000..4ade01896b0c4e --- /dev/null +++ b/ngraph/test/type_prop/convert_color_i420_base.hpp @@ -0,0 +1,370 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "gtest/gtest.h" +#include "openvino/op/op.hpp" +#include "openvino/opsets/opset8.hpp" + +using namespace ov; + +template +class ConvertI420BaseTest : public testing::Test +{ +}; + +TYPED_TEST_SUITE_P(ConvertI420BaseTest); + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_single_tensor) +{ + auto param_shape = PartialShape{5, 3, 2, 1}; + auto out_shape = PartialShape{5, 2, 2, 3}; + auto param = std::make_shared(element::f32, param_shape); + auto op = std::make_shared(param); + ASSERT_EQ(op->output(0).get_element_type(), element::f32); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_single_tensor_dynamic) +{ + auto param_shape = PartialShape::dynamic(); + auto out_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), 3}; + auto param = std::make_shared(element::f32, param_shape); + auto op = std::make_shared(param); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), element::f32); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_single_tensor_dynamic_dims) +{ + auto param_shape = PartialShape{Dimension::dynamic(), 3, Dimension::dynamic(), Dimension::dynamic()}; + auto out_shape = PartialShape{Dimension::dynamic(), 2, Dimension::dynamic(), 3}; + auto param = std::make_shared(element::u8, param_shape); + auto op = std::make_shared(param); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), element::u8); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_single_tensor_dynamic_height) +{ + auto param_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), 8, Dimension::dynamic()}; + auto out_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), 8, 3}; + auto param = std::make_shared(element::u8, param_shape); + auto op = std::make_shared(param); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), element::u8); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_single_tensor_dynamic_type) +{ + auto param_shape = PartialShape{1, 6, 8, 1}; + auto out_shape = PartialShape{1, 4, 8, 3}; + auto param = std::make_shared(element::dynamic, param_shape); + auto op = std::make_shared(param); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), element::dynamic); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_single_tensor_error_channels) +{ + auto param_shape = PartialShape{1, 3, 4, 2}; // shall be 1 channel, not 2 + auto param = std::make_shared(element::u8, param_shape); + EXPECT_THROW(std::make_shared(param), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_single_tensor_error_dims_5) +{ + auto param_shape = PartialShape{1, 3, 3, 1, 1}; // must be 4 dimensions + auto param = std::make_shared(element::u8, param_shape); + EXPECT_THROW(std::make_shared(param), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_single_tensor_error_dims_3) +{ + auto param_shape = PartialShape{640, 480, 1}; // must be 4 dimensions + auto param = std::make_shared(element::u8, param_shape); + EXPECT_THROW(std::make_shared(param), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_single_tensor_error_height) +{ + auto param_shape = PartialShape{1, 4, 6, 1}; // height = 4, can't split to Y and UV + auto param = std::make_shared(element::u8, param_shape); + EXPECT_THROW(std::make_shared(param), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_single_tensor_error_width_odd) +{ + auto param_shape = PartialShape{1, 6, 5, 1}; // width is odd, can't split to U and V + auto param = std::make_shared(element::u8, param_shape); + EXPECT_THROW(std::make_shared(param), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_single_tensor_error_i8) +{ + auto param_shape = PartialShape{1, 640, 480, 1}; + auto param = std::make_shared(element::i8, param_shape); + EXPECT_THROW(std::make_shared(param), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_simple) +{ + auto param_shape_y = PartialShape{10, 480, 640, 1}; + auto param_shape_uv = PartialShape{10, 240, 320, 1}; + auto out_shape = PartialShape{10, 480, 640, 3}; + auto param_y = std::make_shared(element::u8, param_shape_y); + auto param_u = std::make_shared(element::u8, param_shape_uv); + auto param_v = std::make_shared(element::u8, param_shape_uv); + auto op = std::make_shared(param_y, param_u, param_v); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), element::u8); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_dynamic) +{ + auto param_shape_y = PartialShape::dynamic(); + auto param_shape_u = PartialShape::dynamic(); + auto param_shape_v = PartialShape::dynamic(); + auto out_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), 3}; + auto param_y = std::make_shared(element::f32, param_shape_y); + auto param_u = std::make_shared(element::f32, param_shape_u); + auto param_v = std::make_shared(element::f32, param_shape_v); + auto op = std::make_shared(param_y, param_u, param_v); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), element::f32); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_y_dynamic) +{ + auto param_shape_y = PartialShape::dynamic(); + auto param_shape_uv = PartialShape{1, 3, 2, 1}; + auto out_shape = PartialShape{1, 6, 4, 3}; + auto param_y = std::make_shared(element::bf16, param_shape_y); + auto param_u = std::make_shared(element::bf16, param_shape_uv); + auto param_v = std::make_shared(element::bf16, param_shape_uv); + auto op = std::make_shared(param_y, param_u, param_v); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), element::bf16); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_uv_dynamic) +{ + auto param_shape_y = PartialShape{1, 4, 4, 1}; + auto param_shape_uv = PartialShape::dynamic(); + auto out_shape = PartialShape{1, 4, 4, 3}; + auto param_y = std::make_shared(element::f16, param_shape_y); + auto param_u = std::make_shared(element::f16, param_shape_uv); + auto param_v = std::make_shared(element::f16, param_shape_uv); + auto op = std::make_shared(param_y, param_u, param_v); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), element::f16); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_dynamic_types) +{ + auto param_shape_y = PartialShape{1, 4, 4, 1}; + auto param_shape_uv = PartialShape{1, 2, 2, 1}; + auto out_shape = PartialShape{1, 4, 4, 3}; + auto y_type = element::dynamic; + auto uv_type = element::dynamic; + auto out_type = element::dynamic; + auto param_y = std::make_shared(y_type, param_shape_y); + auto param_u = std::make_shared(uv_type, param_shape_uv); + auto param_v = std::make_shared(uv_type, param_shape_uv); + auto op = std::make_shared(param_y, param_u, param_v); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), out_type); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_uv_type) +{ + auto param_shape_y = PartialShape{1, 4, 4, 1}; + auto param_shape_uv = PartialShape{1, 2, 2, 1}; + auto out_shape = PartialShape{1, 4, 4, 3}; + auto y_type = element::dynamic; + auto uv_type = element::f64; + auto out_type = element::f64; + auto param_y = std::make_shared(y_type, param_shape_y); + auto param_u = std::make_shared(uv_type, param_shape_uv); + auto param_v = std::make_shared(uv_type, param_shape_uv); + auto op = std::make_shared(param_y, param_u, param_v); + ASSERT_EQ(op->output(0).get_partial_shape(), out_shape); + ASSERT_EQ(op->output(0).get_element_type(), out_type); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_error_type_mismatch_y) +{ + auto param_y = std::make_shared(element::u8, PartialShape::dynamic()); + auto param_u = std::make_shared(element::f32, PartialShape::dynamic()); + auto param_v = std::make_shared(element::f32, PartialShape::dynamic()); + EXPECT_THROW(std::make_shared(param_y, param_u, param_v), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_error_type_mismatch_u) +{ + auto param_y = std::make_shared(element::f32, PartialShape::dynamic()); + auto param_u = std::make_shared(element::u8, PartialShape::dynamic()); + auto param_v = std::make_shared(element::f32, PartialShape::dynamic()); + EXPECT_THROW(std::make_shared(param_y, param_u, param_v), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_error_type_mismatch_v) +{ + auto param_y = std::make_shared(element::f32, PartialShape::dynamic()); + auto param_u = std::make_shared(element::f32, PartialShape::dynamic()); + auto param_v = std::make_shared(element::u8, PartialShape::dynamic()); + EXPECT_THROW(std::make_shared(param_y, param_u, param_v), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_error_u_type) +{ + auto param_y = std::make_shared(element::dynamic, PartialShape::dynamic()); + auto param_u = std::make_shared(element::i8, PartialShape::dynamic()); + auto param_v = std::make_shared(element::dynamic, PartialShape::dynamic()); + EXPECT_THROW(std::make_shared(param_y, param_u, param_v), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_error_v_type) +{ + auto param_y = std::make_shared(element::dynamic, PartialShape::dynamic()); + auto param_u = std::make_shared(element::dynamic, PartialShape::dynamic()); + auto param_v = std::make_shared(element::i8, PartialShape::dynamic()); + EXPECT_THROW(std::make_shared(param_y, param_u, param_v), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_error_5dims) +{ + auto param_shape_y = PartialShape::dynamic(); + auto param_shape_u = PartialShape{1, 1, 1, 1, 1}; + auto param_shape_v = PartialShape::dynamic(); + auto param_1 = std::make_shared(element::u8, param_shape_y); + auto param_2 = std::make_shared(element::u8, param_shape_u); + auto param_3 = std::make_shared(element::u8, param_shape_v); + EXPECT_THROW(std::make_shared(param_1, param_2, param_3), ov::AssertFailure); + EXPECT_THROW(std::make_shared(param_1, param_3, param_2), ov::AssertFailure); + EXPECT_THROW(std::make_shared(param_2, param_1, param_3), ov::AssertFailure); + EXPECT_THROW(std::make_shared(param_2, param_3, param_1), ov::AssertFailure); + EXPECT_THROW(std::make_shared(param_3, param_1, param_2), ov::AssertFailure); + EXPECT_THROW(std::make_shared(param_3, param_2, param_1), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_error_3dims) +{ + auto param_shape_good = PartialShape::dynamic(); + auto param_shape_bad = PartialShape{1, 1, 1}; + auto param_1 = std::make_shared(element::u8, param_shape_good); + auto param_2 = std::make_shared(element::u8, param_shape_bad); + auto param_3 = std::make_shared(element::u8, param_shape_good); + EXPECT_THROW(std::make_shared(param_1, param_2, param_3), ov::AssertFailure); + EXPECT_THROW(std::make_shared(param_1, param_3, param_2), ov::AssertFailure); + EXPECT_THROW(std::make_shared(param_2, param_1, param_3), ov::AssertFailure); + EXPECT_THROW(std::make_shared(param_2, param_3, param_1), ov::AssertFailure); + EXPECT_THROW(std::make_shared(param_3, param_1, param_2), ov::AssertFailure); + EXPECT_THROW(std::make_shared(param_3, param_2, param_1), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_error_batch) +{ + auto param_shape_y = PartialShape{2, 480, 640, 1}; + auto param_shape_uv = PartialShape{1, 240, 320, 1}; + auto param_y = std::make_shared(element::u8, param_shape_y); + auto param_u = std::make_shared(element::u8, param_shape_uv); + auto param_v = std::make_shared(element::u8, param_shape_uv); + EXPECT_THROW(std::make_shared(param_y, param_u, param_v), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_error_height) +{ + auto param_shape_y = PartialShape{2, 480, 640, 1}; + auto param_shape_uv = PartialShape{2, 480, 320, 2}; + auto param_y = std::make_shared(element::u8, param_shape_y); + auto param_u = std::make_shared(element::u8, param_shape_uv); + auto param_v = std::make_shared(element::u8, param_shape_uv); + EXPECT_THROW(std::make_shared(param_y, param_u, param_v), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_error_height_odd) +{ + auto param_shape_y = PartialShape{2, 3, 2, 1}; // 3 is invalid, as UV shall be 2 times smaller + auto param_shape_uv = PartialShape::dynamic(); + auto param_y = std::make_shared(element::u8, param_shape_y); + auto param_u = std::make_shared(element::u8, param_shape_uv); + auto param_v = std::make_shared(element::u8, param_shape_uv); + EXPECT_THROW(std::make_shared(param_y, param_u, param_v), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_error_width) +{ + auto param_shape_y = PartialShape{2, 480, 640, 1}; + auto param_shape_uv = PartialShape{2, 240, 640, 2}; + auto param_y = std::make_shared(element::u8, param_shape_y); + auto param_u = std::make_shared(element::u8, param_shape_uv); + auto param_v = std::make_shared(element::u8, param_shape_uv); + EXPECT_THROW(std::make_shared(param_y, param_u, param_v), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_error_width_odd) +{ + auto param_shape_y = PartialShape{2, 4, 3, 1}; // 3 is invalid, as UV width shall be 2 times smaller + auto param_shape_uv = PartialShape::dynamic(); + auto param_y = std::make_shared(element::u8, param_shape_y); + auto param_u = std::make_shared(element::u8, param_shape_uv); + auto param_v = std::make_shared(element::u8, param_shape_uv); + EXPECT_THROW(std::make_shared(param_y, param_u, param_v), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_3_plane_error_channels) +{ + auto param_shape_y = PartialShape{2, 480, 640, 1}; + auto param_shape_uv = PartialShape{2, 240, 320, 2}; + auto param_y = std::make_shared(element::u8, param_shape_y); + auto param_u = std::make_shared(element::u8, param_shape_uv); + auto param_v = std::make_shared(element::u8, param_shape_uv); + EXPECT_THROW(std::make_shared(param_y, param_u, param_v), ov::AssertFailure); +} + +TYPED_TEST_P(ConvertI420BaseTest, shape_inference_error_2_planes) +{ + auto param_y = std::make_shared(element::dynamic, PartialShape::dynamic()); + auto param_u = std::make_shared(element::dynamic, PartialShape::dynamic()); + auto empty = std::make_shared(); + empty->set_arguments(NodeVector{param_y, param_u}); + + EXPECT_THROW(empty->constructor_validate_and_infer_types(), ov::AssertFailure); +} + + +REGISTER_TYPED_TEST_SUITE_P(ConvertI420BaseTest, + shape_inference_single_tensor, + shape_inference_single_tensor_dynamic, + shape_inference_single_tensor_dynamic_dims, + shape_inference_single_tensor_dynamic_height, + shape_inference_single_tensor_dynamic_type, + shape_inference_single_tensor_error_channels, + shape_inference_single_tensor_error_dims_5, + shape_inference_single_tensor_error_dims_3, + shape_inference_single_tensor_error_height, + shape_inference_single_tensor_error_width_odd, + shape_inference_single_tensor_error_i8, + shape_inference_3_plane_simple, + shape_inference_3_plane_dynamic, + shape_inference_3_plane_y_dynamic, + shape_inference_3_plane_uv_dynamic, + shape_inference_3_plane_dynamic_types, + shape_inference_3_plane_uv_type, + shape_inference_3_plane_error_type_mismatch_y, + shape_inference_3_plane_error_type_mismatch_u, + shape_inference_3_plane_error_type_mismatch_v, + shape_inference_3_plane_error_u_type, + shape_inference_3_plane_error_v_type, + shape_inference_3_plane_error_5dims, + shape_inference_3_plane_error_3dims, + shape_inference_3_plane_error_batch, + shape_inference_3_plane_error_height, + shape_inference_3_plane_error_height_odd, + shape_inference_3_plane_error_width, + shape_inference_3_plane_error_width_odd, + shape_inference_3_plane_error_channels, + shape_inference_error_2_planes +); diff --git a/ngraph/test/visitors/op/convert_color_i420.cpp b/ngraph/test/visitors/op/convert_color_i420.cpp new file mode 100644 index 00000000000000..5c34a46d117858 --- /dev/null +++ b/ngraph/test/visitors/op/convert_color_i420.cpp @@ -0,0 +1,54 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "ngraph/op/util/attr_types.hpp" +#include "openvino/op/i420_to_bgr.hpp" +#include "openvino/op/i420_to_rgb.hpp" +#include "util/visitor.hpp" + +using namespace std; +using namespace ov; +using ngraph::test::NodeBuilder; +using ngraph::test::ValueMap; + +TEST(attributes, convert_color_i420_rgb) { + NodeBuilder::get_ops().register_factory(); + auto data = make_shared(element::u8, Shape{3, 720, 640, 1}); + auto convert_color = make_shared(data); + NodeBuilder builder(convert_color); + const auto expected_attr_count = 0; + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); +} + +TEST(attributes, convert_color_i420_bgr) { + NodeBuilder::get_ops().register_factory(); + auto data = make_shared(element::u8, Shape{3, 720, 640, 1}); + auto convert_color = make_shared(data); + NodeBuilder builder(convert_color); + const auto expected_attr_count = 0; + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); +} + +TEST(attributes, convert_color_i420_rgb_3planes) { + NodeBuilder::get_ops().register_factory(); + auto data1 = make_shared(element::u8, Shape{3, 480, 640, 1}); + auto data2 = make_shared(element::u8, Shape{3, 240, 320, 1}); + auto data3 = make_shared(element::u8, Shape{3, 240, 320, 1}); + auto convert_color = make_shared(data1, data2, data3); + NodeBuilder builder(convert_color); + const auto expected_attr_count = 0; + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); +} + +TEST(attributes, convert_color_i420_bgr_3planes) { + NodeBuilder::get_ops().register_factory(); + auto data1 = make_shared(element::u8, Shape{3, 480, 640, 1}); + auto data2 = make_shared(element::u8, Shape{3, 240, 320, 1}); + auto data3 = make_shared(element::u8, Shape{3, 240, 320, 1}); + auto convert_color = make_shared(data1, data2, data3); + NodeBuilder builder(convert_color); + const auto expected_attr_count = 0; + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); +}