Skip to content

Commit

Permalink
[ IE TESTS ] Update tensor comparation function according plugin requ…
Browse files Browse the repository at this point in the history
…irments (#23226)

### Details:
- *Comparation function was changed to compare tensors based on element
comparation*
- *`std::abs(ref_value - plugin_value) <= abs_threshold + rel_threshold
* ref_value`*
- *`abs_threshold ` =
std::max(std::numeric_limits::eps<plugin_element_type>(),
std::numeric_limits::eps<ref_element_type>())*
- *`ref_threshold = eps_by_expected_type()`, which is based on half `bit
length of mantissa`*

### Tickets:
 - [CVS-133173](https://jira.devtools.intel.com/browse/CVS-133173)
 - [CVS-135540](https://jira.devtools.intel.com/browse/CVS-135540)

---------

Co-authored-by: sbalandi <[email protected]>
  • Loading branch information
iefode and sbalandi authored Mar 23, 2024
1 parent b520763 commit aebf814
Show file tree
Hide file tree
Showing 37 changed files with 597 additions and 132 deletions.
6 changes: 6 additions & 0 deletions .github/workflows/job_cxx_unit_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,12 @@ jobs:
--gtest_filter=*smoke* \
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TemplateFuncTests.xml
- name: OV utils unit tests
run: |
source ${INSTALL_DIR}/setupvars.sh
${INSTALL_TEST_DIR}/ov_util_tests --gtest_print_time=1 \
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_util_tests.xml
- name: OpenVINO C API tests
if: fromJSON(inputs.affected-components).C_API.test
run: |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ class AUGRUCellCPUTest : public testing::WithParamInterface<AUGRUCellCpuSpecific

if (additionalConfig[ov::hint::inference_precision.name()] == ov::element::bf16) {
selectedType = makeSelectedTypeStr(selectedType, ElementType::bf16);
abs_threshold = 2e-2;
} else {
selectedType = makeSelectedTypeStr(selectedType, netPrecision);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,14 @@ void ActivationLayerCPUTest::SetUp() {
auto activation = utils::make_activation(params, netPrecision, activationType, activationShapes, constantsValue);
activation->get_rt_info() = getCPUInfo();
function = std::make_shared<ov::Model>(ov::NodeVector{activation}, ov::ParameterVector{params}, "Activation");
#if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64)
if (netPrecision == ov::element::f32 && outPrecision == ov::element::f32) {
abs_threshold = 8e-4;
}
#endif
if (netPrecision == ov::element::bf16 && outPrecision == ov::element::f32) {
abs_threshold = 6e-2;
}
}

std::string ActivationLayerCPUTest::getPrimitiveType(const utils::ActivationTypes& activation_type,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ void PoolingLayerCPUTest::SetUp() {

std::shared_ptr<ov::Node> poolInput = params[0];
if (isInt8) {
abs_threshold = 2e-2;
ov::Shape newShape(poolInput->get_output_partial_shape(0).size(), 1);
poolInput = ov::test::utils::make_fake_quantize(poolInput, inPrc, 256, newShape);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,13 @@ void ReduceCPULayerTest::SetUp() {
}

function = makeNgraphFunction(netPrecision, params, reduce, "Reduce");

if (ov::with_cpu_x86_avx512_core_amx()) {
if (netPrecision == ov::element::f32 && configuration.count(ov::hint::inference_precision.name()) &&
configuration.at(ov::hint::inference_precision.name()) == ov::element::f16) {
abs_threshold = 5e-3;
}
}
}

void ReduceCPULayerTest::generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,10 @@ class DefConvLayerCPUTest : public testing::WithParamInterface<DefConvLayerCPUTe
}

function = makeNgraphFunction(netPrecision, parameters, deformable_conv, "deformable_convolution");

if (netPrecision == ov::element::f32) {
abs_threshold = 5e-6;
}
}
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,10 @@ class FakeQuantizeLayerCPUTest : public testing::WithParamInterface<fqLayerTestP
}

function = makeNgraphFunction(inPrec, params, fq, "FakeQuantizeCPU");

if (inPrec == ov::element::f32) {
abs_threshold = 1e-4;
}
}

void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,9 @@ class LRNLayerCPUTest : public testing::WithParamInterface<LRNParams>, public ov
auto axesNode = ov::op::v0::Constant::create(ov::element::i32, { axes.size() }, axes);
auto lrn = std::make_shared<ov::op::v0::LRN>(params[0], axesNode, alpha, beta, bias, size);
function = makeNgraphFunction(inputPrecision, params, lrn, "LRN");
if (inputPrecision == ov::element::f32) {
abs_threshold = 5e-3;
}
}
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,10 @@ class RDFTTestCPU : public testing::WithParamInterface<std::tuple<ov::element::T
}
}
function = std::make_shared<Model>(rdft, inputs);

if (precision == ov::element::f32) {
abs_threshold = 1e-4;
}
}

void generate_inputs(const std::vector<Shape>& targetInputStaticShapes) override {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -290,8 +290,6 @@ class ConvSumBroadcastTest : public ConvSumInPlaceTest {
};

TEST_P(ConvSumBroadcastTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()

run();

CheckPluginRelatedResults(compiledModel, "Convolution");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,8 @@ class ConvsAndSums : virtual public SubgraphBaseStaticTest {

auto result = std::make_shared<ov::op::v0::Result>(relu3);
function = std::make_shared<ov::Model>(result, params, "SimpleNet");

abs_threshold = 9e-4;
}
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,9 @@ class FuseScaleShiftAndFakeQuantizeTest : public testing::WithParamInterface<Fus
quantizeIntervals[3]);
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(quantize)};
function = std::make_shared<ov::Model>(results, ov::ParameterVector{param}, "FuseScaleShiftAndQuantize");
if (inputPrecision == element::f32) {
abs_threshold = 2e-7;
}
}
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,14 @@ class MatmulWeightsDecompression : public testing::WithParamInterface<MatmulWeig
std::tie(postOpMgrPtr, fusedOps) = fusing_params;
init_input_shapes({shape_params.data_shape, {{}, {{shape_params.weights_shape}}}});

// if dynamic quantization is enabled
if (configuration.count(ov::hint::dynamic_quantization_group_size.name()) &&
configuration.at(ov::hint::dynamic_quantization_group_size.name()) != 0) {
abs_threshold = 0.1;
} else if (!configuration.count(ov::hint::dynamic_quantization_group_size.name())) {
abs_threshold = 5e-3;
}

ElementType netType = ov::element::f32;
inType = outType = netType;

Expand Down
Loading

0 comments on commit aebf814

Please sign in to comment.