diff --git a/inference-engine/src/gna_plugin/backend/make_pwl.cpp b/inference-engine/src/gna_plugin/backend/make_pwl.cpp index 6c3c3433b08e21..29522b63dffe93 100644 --- a/inference-engine/src/gna_plugin/backend/make_pwl.cpp +++ b/inference-engine/src/gna_plugin/backend/make_pwl.cpp @@ -282,10 +282,10 @@ void make_gna_pwl(const DnnActivation fun, int16_t y_lower = y_min; int16_t y_upper = y_max; if (fun.fqParams.set) { - x_lower = FLOAT_TO_INT32(*fun.fqParams.input_low * 1.25 * in_scale); - x_upper = FLOAT_TO_INT32(*fun.fqParams.input_high * 1.25 * in_scale); - y_lower = FLOAT_TO_INT16(*fun.fqParams.input_low * 1.25 * out_scale); - y_upper = FLOAT_TO_INT16(*fun.fqParams.input_high * 1.25 * out_scale); + x_lower = std::max(FLOAT_TO_INT64(*fun.fqParams.input_low * 1.25 * in_scale), static_cast(x_lower)); + x_upper = std::min(FLOAT_TO_INT64(*fun.fqParams.input_high * 1.25 * in_scale), static_cast(x_upper)); + y_lower = std::max(FLOAT_TO_INT32(*fun.fqParams.input_low * 1.25 * out_scale), static_cast(y_lower)); + y_upper = std::min(FLOAT_TO_INT32(*fun.fqParams.input_high * 1.25 * out_scale), static_cast(y_upper)); } else { if (x_lower < y_lower * in_scale / out_scale) x_lower = FLOAT_TO_INT32(y_lower * in_scale / out_scale); if (y_lower < x_lower * out_scale / in_scale) y_lower = FLOAT_TO_INT16(x_lower * out_scale / in_scale); @@ -365,10 +365,10 @@ void make_gna_pwl(const DnnActivation fun, int16_t y_lower = y_min; int16_t y_upper = y_max; if (fun == kActFakeQuantize && fun.fqParams.set) { - x_lower = *fun.fqParams.input_low * in_scale; - x_upper = *fun.fqParams.input_high * in_scale; - y_lower = *fun.fqParams.input_low * out_scale; - y_upper = *fun.fqParams.input_high * out_scale; + x_lower = std::max(static_cast(*fun.fqParams.input_low * in_scale), static_cast(x_lower)); + x_upper = std::min(static_cast(*fun.fqParams.input_high * in_scale), static_cast(x_upper)); + y_lower = std::max(static_cast(*fun.fqParams.input_low * out_scale), static_cast(y_lower)); + y_upper = std::min(static_cast(*fun.fqParams.input_high * out_scale), static_cast(y_upper)); } auto n_segments = 2; if (fun == kActKaldiLstmClipping) { diff --git a/inference-engine/src/gna_plugin/round_float_define.hpp b/inference-engine/src/gna_plugin/round_float_define.hpp index 584d14ecc1ac3f..2fd4b1422be4f5 100644 --- a/inference-engine/src/gna_plugin/round_float_define.hpp +++ b/inference-engine/src/gna_plugin/round_float_define.hpp @@ -10,3 +10,4 @@ #define FLOAT_TO_INT8(a) static_cast(((a) < 0)?((a) - 0.5f):((a) + 0.5f)) #define FLOAT_TO_INT16(a) static_cast(((a) < 0)?((a) - 0.5f):((a) + 0.5f)) #define FLOAT_TO_INT32(a) static_cast(((a) < 0)?((a)-0.5f):((a)+0.5f)) +#define FLOAT_TO_INT64(a) static_cast(((a) < 0)?((a)-0.5f):((a)+0.5f)) \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/conv_fq_eltwise.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/conv_fq_eltwise.cpp new file mode 100644 index 00000000000000..0a09c33f6df8f6 --- /dev/null +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/subgraph_tests/conv_fq_eltwise.cpp @@ -0,0 +1,59 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +#include "subgraph_tests/conv_fq_eltwise.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace SubgraphTestsDefinitions; + +namespace { + +const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16, +}; + +const std::vector> configs = { + {{"GNA_DEVICE_MODE", "GNA_SW_FP32"}}, + {{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}} +}; + +const std::vector> inputShapes = { + {1, 1024} +}; + +const size_t levels = 65535; + +const std::vector> inputParams = {{-10, 10, 1}}; + +const auto fqParams = ::testing::Combine( + ::testing::Values(levels), + ::testing::ValuesIn(inputParams) +); + +const std::vector> kernels = {{1, 3}}; +const std::vector> strides = {{1, 1}}; +const std::vector inputChannels = {8}; +const std::vector outputChannels {4}; + +const auto convParams = ::testing::Combine( + ::testing::ValuesIn(kernels), + ::testing::ValuesIn(strides), + ::testing::ValuesIn(inputChannels), + ::testing::ValuesIn(outputChannels) +); + +INSTANTIATE_TEST_CASE_P(smoke_ConvFqEltwiseTest, ConvFqEltwiseTest, + ::testing::Combine( + fqParams, + convParams, + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(inputShapes), + ::testing::Values(CommonTestUtils::DEVICE_GNA), + ::testing::ValuesIn(configs)), + ConvFqEltwiseTest::getTestCaseName); + +} // namespace diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/conv_fq_eltwise.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/conv_fq_eltwise.hpp new file mode 100644 index 00000000000000..453e93a493c0cf --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/conv_fq_eltwise.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/subgraph/conv_fq_eltwise.hpp" + +namespace SubgraphTestsDefinitions { + +TEST_P(ConvFqEltwiseTest, CompareWithRefs) { + Run(); +} + +} // namespace SubgraphTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_eltwise.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_eltwise.hpp new file mode 100644 index 00000000000000..c55c8e4761a813 --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/conv_fq_eltwise.hpp @@ -0,0 +1,55 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "shared_test_classes/base/layer_test_utils.hpp" +#include "ngraph_functions/builders.hpp" +#include "ngraph_functions/utils/ngraph_helpers.hpp" + +namespace SubgraphTestsDefinitions { + +typedef std::tuple< + size_t, // levels + std::vector // input generator data: low, high, resolution +> FqSpecificParams; + +typedef std::tuple< + std::vector, // Kernel Shape + std::vector, // Strides + size_t, // Input channels + size_t // Output channels +> ConvParams; + +typedef std::tuple< + FqSpecificParams, + ConvParams, + InferenceEngine::Precision, // Net precision + InferenceEngine::SizeVector, // Input shapes + LayerTestsUtils::TargetDevice, // Device name + std::map // Additional backend configuration and alis name to it +> ConvFqEltwiseTestParamsSet; + +class ConvFqEltwiseTest : public testing::WithParamInterface, + virtual public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(testing::TestParamInfo obj); + +protected: + void SetUp() override; + InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; + +protected: + float inputDataMin = 0.0; + float inputDataMax = 10.0; + float inputDataResolution = 1.0; + int32_t seed = 1; +}; + +} // namespace SubgraphTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/conv_fq_eltwise.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/conv_fq_eltwise.cpp new file mode 100644 index 00000000000000..830ba8638b3d24 --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/conv_fq_eltwise.cpp @@ -0,0 +1,117 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/subgraph/conv_fq_eltwise.hpp" + +namespace SubgraphTestsDefinitions { + +std::string ConvFqEltwiseTest::getTestCaseName(testing::TestParamInfo obj) { + FqSpecificParams fqParams; + ConvParams convParams; + InferenceEngine::Precision netPrecision; + InferenceEngine::SizeVector inputShapes; + std::string targetDevice; + std::map config; + std::tie(fqParams, convParams, netPrecision, inputShapes, targetDevice, config) = obj.param; + + size_t levels; + std::vector inputArg; + std::tie(levels, inputArg) = fqParams; + + std::vector kernelShape; + std::vector strides; + size_t inputChannels; + size_t outputChannels; + std::tie(kernelShape, strides, inputChannels, outputChannels) = convParams; + + std::ostringstream result; + result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_"; + result << "LEVELS=" << levels << "_"; + result << "netPRC=" << netPrecision.name() << "_"; + result << "trgDev=" << targetDevice; + for (auto const& configItem : config) { + result << "_configItem=" << configItem.first << "_" << configItem.second; + } + if (inputArg.size() == 3) { + result << "_inputArg=" << inputArg[0] << "_" << inputArg[1] << "_" << inputArg[2]; + } + result << "_KERNEL=" << CommonTestUtils::vec2str(kernelShape) << "_"; + result << "STRIDES=" << CommonTestUtils::vec2str(strides) << "_"; + result << "IC=" << inputChannels << "_"; + result << "OC=" << outputChannels; + return result.str(); +} + +void ConvFqEltwiseTest::SetUp() { + FqSpecificParams fqParams; + ConvParams convParams; + std::vector inputShape; + std::map config; + auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; + std::tie(fqParams, convParams, netPrecision, inputShape, targetDevice, config) = this->GetParam(); + configuration.insert(config.begin(), config.end()); + + size_t levels; + std::vector inputArg; + std::tie(levels, inputArg) = fqParams; + if (inputArg.size() == 3) { + inputDataMin = inputArg[0]; + inputDataMax = inputArg[1]; + inputDataResolution = inputArg[2]; + } + + std::vector kernelShape; + std::vector strides; + size_t inputChannels; + size_t outputChannels; + std::tie(kernelShape, strides, inputChannels, outputChannels) = convParams; + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + + auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); + + const int seed = 0; + std::mt19937 gen(seed); + + std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + + float weightVal = 0.2; + auto filterWeightsNode = ngraph::builder::makeConstant(ngPrc, {outputChannels, inputChannels, kernelShape[0], kernelShape[1]}, + { weightVal }); + auto convLowNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{ 1 }, std::vector{-weightVal}); + auto convHighNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{ 1 }, std::vector{weightVal}); + auto convWeightsFQNode = std::make_shared(filterWeightsNode, + convLowNode, convHighNode, convLowNode, convHighNode, levels); + auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); + auto conv = std::make_shared(reshape1, convWeightsFQ, strides, std::vector{ 0, 0 }, + std::vector{ 0, 0 }, std::vector{ 1, 1 }, + ngraph::op::PadType::VALID); + auto biasesWeightsNode = ngraph::builder::makeConstant(ngPrc, {}, std::vector{ 0.0f }); + auto add_1 = std::make_shared(conv, biasesWeightsNode); + + auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / strides[1] + 1; + auto heightAfterConv = (convInputShape[2] - kernelShape[0]) / strides[0] + 1; + std::vector outFormShapes = {1, outputChannels * widthAfterConv * heightAfterConv }; + + auto lowNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{ 1 }, + std::vector{inputDataMin * weightVal * kernelShape[1] * 1.5f}); + auto highNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{ 1 }, + std::vector{inputDataMax * weightVal * kernelShape[1] * 1.5f}); + auto fq = std::make_shared(add_1, lowNode, highNode, lowNode, highNode, levels); + + auto constNode = ngraph::builder::makeConstant(ngPrc, {}, std::vector{ 0.5f }); + auto add_2 = std::make_shared(fq, constNode); + + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(add_2, reshapePattern2, false); + + function = std::make_shared(reshape2, params, "convFqEltwise"); +} + +InferenceEngine::Blob::Ptr ConvFqEltwiseTest::GenerateInput(const InferenceEngine::InputInfo &info) const { + return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), inputDataMax - inputDataMin, inputDataMin, 1 / inputDataResolution, + seed); +} +} // namespace SubgraphTestsDefinitions \ No newline at end of file