From 9ae3c5f648e715aa030afc150905449f58b84139 Mon Sep 17 00:00:00 2001 From: Maksim Kutakov Date: Tue, 13 Oct 2020 13:00:11 +0300 Subject: [PATCH] [IE TESTS][CPU BF16] Most of the bloat16 tests have been fixed. --- .../plugin/cpu/bfloat16/bf16_network_restoring.cpp | 8 ++++---- .../functional/plugin/cpu/bfloat16/concat_in_place.cpp | 2 +- .../plugin/cpu/bfloat16/conv_relu_pool_conv_relu_pool.cpp | 4 ++-- .../cpu/bfloat16/gather_x2_add_mul_relu_concat_matmul.cpp | 6 +++--- .../plugin/cpu/bfloat16/mobilenet_ssd_with_branching.cpp | 8 ++++---- .../plugin/cpu/bfloat16/scaleshift_conv_relu.cpp | 2 +- .../cpu/bfloat16/scaleshift_conv_x2_concat_relu.cpp | 2 +- .../cpu/bfloat16/scaleshift_x3_conv_eltwise_relu.cpp | 2 +- 8 files changed, 17 insertions(+), 17 deletions(-) diff --git a/inference-engine/tests/functional/plugin/cpu/bfloat16/bf16_network_restoring.cpp b/inference-engine/tests/functional/plugin/cpu/bfloat16/bf16_network_restoring.cpp index 5d1defd2932aae..8cc114c4594676 100644 --- a/inference-engine/tests/functional/plugin/cpu/bfloat16/bf16_network_restoring.cpp +++ b/inference-engine/tests/functional/plugin/cpu/bfloat16/bf16_network_restoring.cpp @@ -30,7 +30,7 @@ class BF16NetworkRestore1 : public BasicBF16Test { std::shared_ptr createGraph(InferenceEngine::Precision netPrecision) override { // + Power1(FP32) // | - // + AvgPooling1(FP32) + // + AvgPooling1(BF16) // | // + Convolution1(BF16) // | @@ -45,7 +45,7 @@ class BF16NetworkRestore1 : public BasicBF16Test { // | / // ReLU3 (Fused to Conv2) / // | / - // MaxPooling1 (FP32) / + // MaxPooling1 (BF16) / // \ / // Eltwise // | @@ -180,7 +180,7 @@ class BF16NetworkRestore1 : public BasicBF16Test { // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters expectedPrecisions["Power1"] = "FP32"; - expectedPrecisions["AvgPooling1"] = "FP32"; + expectedPrecisions["AvgPooling1"] = "BF16"; expectedPrecisions["Convolution1"] = "BF16"; expectedPrecisions["ReLU1"] = "ndef"; expectedPrecisions["Convolution2"] = "BF16"; @@ -189,7 +189,7 @@ class BF16NetworkRestore1 : public BasicBF16Test { expectedPrecisions["Norm1"] = "FP32"; expectedPrecisions["Eltwise1"] = "ndef"; expectedPrecisions["ReLU3"] = "ndef"; - expectedPrecisions["maxPooling1"] = "FP32"; + expectedPrecisions["maxPooling1"] = "BF16"; expectedPrecisions["Eltwise2"] = "FP32"; } }; diff --git a/inference-engine/tests/functional/plugin/cpu/bfloat16/concat_in_place.cpp b/inference-engine/tests/functional/plugin/cpu/bfloat16/concat_in_place.cpp index d77fb09f044efc..cc74eb684edb3b 100644 --- a/inference-engine/tests/functional/plugin/cpu/bfloat16/concat_in_place.cpp +++ b/inference-engine/tests/functional/plugin/cpu/bfloat16/concat_in_place.cpp @@ -131,7 +131,7 @@ class Concat_in_place : public BasicBF16Test { expectedPrecisions["ADD_1"] = "FP32"; expectedPrecisions["CONV_1"] = "BF16"; expectedPrecisions["CONV_2"] = "BF16"; - expectedPrecisions["CONC_1_TEST"] = "FP32"; + expectedPrecisions["CONC_1_TEST"] = "BF16"; expectedPrecisions["RELU_1"] = "FP32"; } }; diff --git a/inference-engine/tests/functional/plugin/cpu/bfloat16/conv_relu_pool_conv_relu_pool.cpp b/inference-engine/tests/functional/plugin/cpu/bfloat16/conv_relu_pool_conv_relu_pool.cpp index 4dde0eaf889756..20131cb1720f4f 100644 --- a/inference-engine/tests/functional/plugin/cpu/bfloat16/conv_relu_pool_conv_relu_pool.cpp +++ b/inference-engine/tests/functional/plugin/cpu/bfloat16/conv_relu_pool_conv_relu_pool.cpp @@ -32,7 +32,7 @@ class ConvReLUPoolConvReLUPool : public BasicBF16Test { // | // ReLU1 (Fused) // | - // Pooling1 (FP32) + // Pooling1 (BF16) // | // Convolution2 (BF16) // | @@ -164,7 +164,7 @@ class ConvReLUPoolConvReLUPool : public BasicBF16Test { // performance counters expectedPrecisions["Convolution_1"] = "FP32"; expectedPrecisions["ReLU_1"] = "ndef"; - expectedPrecisions["AvgPool_1"] = "FP32"; + expectedPrecisions["AvgPool_1"] = "BF16"; expectedPrecisions["Convolution_2"] = "BF16"; expectedPrecisions["ReLU_2"] = "ndef"; expectedPrecisions["MaxPool_2"] = "BF16"; diff --git a/inference-engine/tests/functional/plugin/cpu/bfloat16/gather_x2_add_mul_relu_concat_matmul.cpp b/inference-engine/tests/functional/plugin/cpu/bfloat16/gather_x2_add_mul_relu_concat_matmul.cpp index 03185914a47577..2f29cb0a6c1ea3 100644 --- a/inference-engine/tests/functional/plugin/cpu/bfloat16/gather_x2_add_mul_relu_concat_matmul.cpp +++ b/inference-engine/tests/functional/plugin/cpu/bfloat16/gather_x2_add_mul_relu_concat_matmul.cpp @@ -37,7 +37,7 @@ class Gather_x2_add_mul_relu_concat_matmul : public BasicBF16Test { // \ / / // Mul(FP32) ReLU(FP32) // \ / -// Concat(FP32) Const +// Concat(BF16) Const // \ / // Matmul(BF16) @@ -116,7 +116,7 @@ class Gather_x2_add_mul_relu_concat_matmul : public BasicBF16Test { fnPtr = createGraph(netPrecision); // STAGE2: set up safe threshold <= 5% from maximum value of output tensor - threshold = 170.02f; // Max in fp32 network by output: 3887.11 + threshold = 177.f; // Max in fp32 network by output: 3887.11 // STAGE3: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in @@ -125,7 +125,7 @@ class Gather_x2_add_mul_relu_concat_matmul : public BasicBF16Test { expectedPrecisions["Mul_1"] = "FP32"; expectedPrecisions["Add_1"] = "FP32"; expectedPrecisions["Relu_1"] = "FP32"; - expectedPrecisions["Conc_1"] = "FP32"; + expectedPrecisions["Conc_1"] = "BF16"; expectedPrecisions["Matmul_1"] = "BF16"; } }; diff --git a/inference-engine/tests/functional/plugin/cpu/bfloat16/mobilenet_ssd_with_branching.cpp b/inference-engine/tests/functional/plugin/cpu/bfloat16/mobilenet_ssd_with_branching.cpp index 4855ca390151d2..aca7bd6eec27c4 100644 --- a/inference-engine/tests/functional/plugin/cpu/bfloat16/mobilenet_ssd_with_branching.cpp +++ b/inference-engine/tests/functional/plugin/cpu/bfloat16/mobilenet_ssd_with_branching.cpp @@ -24,7 +24,7 @@ class MobileNet_ssd_with_branching : public BasicBF16Test { // | // Conv1 (FP32) // | \ - // Conv2 (FP32 so far while we have not greedy mode. This must be fixed. Such pattern shouild have Conv2 in BF16) + // Conv2 (BF16) \ // | | // relu(fused) | // | Normalize (not LRN) @@ -145,18 +145,18 @@ class MobileNet_ssd_with_branching : public BasicBF16Test { fnPtr = createGraph(netPrecision); // STAGE1: - threshold = 0.8f; // max value in latest tensor is 87.67 + threshold = 0.85f; // max value in latest tensor is 87.67 // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters expectedPrecisions["ADD_1"] = "FP32"; expectedPrecisions["CONV_1"] = "BF16"; - expectedPrecisions["CONV_2"] = "FP32"; + expectedPrecisions["CONV_2"] = "BF16"; expectedPrecisions["RELU_2"] = "ndef"; expectedPrecisions["DW_CONV"] = "BF16"; expectedPrecisions["RELU_DW"] = "ndef"; expectedPrecisions["NORM_1"] = "FP32"; - expectedPrecisions["CONC_1"] = "FP32"; + expectedPrecisions["CONC_1"] = "BF16"; } }; diff --git a/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_conv_relu.cpp b/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_conv_relu.cpp index cff8ce820f8d4f..d1bfeb0de6f999 100644 --- a/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_conv_relu.cpp +++ b/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_conv_relu.cpp @@ -93,7 +93,7 @@ class ScaleshiftConvRelu : public BasicBF16Test { fnPtr = createGraph(netPrecision); // STAGE1: - threshold = 5e-2; + threshold = 7e-2; // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in // performance counters diff --git a/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_conv_x2_concat_relu.cpp b/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_conv_x2_concat_relu.cpp index f8e5ae16c5da63..b94f24111d2abc 100644 --- a/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_conv_x2_concat_relu.cpp +++ b/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_conv_x2_concat_relu.cpp @@ -117,7 +117,7 @@ class ScaleshiftConv_x2_ConcatRelu : public BasicBF16Test { expectedPrecisions["ADD_1"] = "FP32"; expectedPrecisions["CONV_1"] = "BF16"; expectedPrecisions["CONV_2"] = "BF16"; - expectedPrecisions["CONC_1"] = "FP32"; + expectedPrecisions["CONC_1"] = "BF16"; expectedPrecisions["RELU_1"] = "FP32"; } }; diff --git a/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_x3_conv_eltwise_relu.cpp b/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_x3_conv_eltwise_relu.cpp index 35cace53067492..a3a45a3e09c6d6 100644 --- a/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_x3_conv_eltwise_relu.cpp +++ b/inference-engine/tests/functional/plugin/cpu/bfloat16/scaleshift_x3_conv_eltwise_relu.cpp @@ -142,7 +142,7 @@ class Scaleshift_x3_ConvEltwiseRelu : public BasicBF16Test { fnPtr = createGraph(netPrecision); // STAGE1: - threshold = 2e-1; + threshold = 5e-1; // STAGE2: // filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in