Skip to content

Commit

Permalink
[IE TESTS][CPU BF16] Most of the bloat16 tests have been fixed.
Browse files Browse the repository at this point in the history
  • Loading branch information
maxnick committed Nov 3, 2020
1 parent 0b395e7 commit e89cf34
Show file tree
Hide file tree
Showing 8 changed files with 17 additions and 17 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class BF16NetworkRestore1 : public BasicBF16Test {
std::shared_ptr<ngraph::Function> createGraph(InferenceEngine::Precision netPrecision) override {
// + Power1(FP32)
// |
// + AvgPooling1(FP32)
// + AvgPooling1(BF16)
// |
// + Convolution1(BF16)
// |
Expand All @@ -45,7 +45,7 @@ class BF16NetworkRestore1 : public BasicBF16Test {
// | /
// ReLU3 (Fused to Conv2) /
// | /
// MaxPooling1 (FP32) /
// MaxPooling1 (BF16) /
// \ /
// Eltwise
// |
Expand Down Expand Up @@ -180,7 +180,7 @@ class BF16NetworkRestore1 : public BasicBF16Test {
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
// performance counters
expectedPrecisions["Power1"] = "FP32";
expectedPrecisions["AvgPooling1"] = "FP32";
expectedPrecisions["AvgPooling1"] = "BF16";
expectedPrecisions["Convolution1"] = "BF16";
expectedPrecisions["ReLU1"] = "ndef";
expectedPrecisions["Convolution2"] = "BF16";
Expand All @@ -189,7 +189,7 @@ class BF16NetworkRestore1 : public BasicBF16Test {
expectedPrecisions["Norm1"] = "FP32";
expectedPrecisions["Eltwise1"] = "ndef";
expectedPrecisions["ReLU3"] = "ndef";
expectedPrecisions["maxPooling1"] = "FP32";
expectedPrecisions["maxPooling1"] = "BF16";
expectedPrecisions["Eltwise2"] = "FP32";
}
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ class Concat_in_place : public BasicBF16Test {
expectedPrecisions["ADD_1"] = "FP32";
expectedPrecisions["CONV_1"] = "BF16";
expectedPrecisions["CONV_2"] = "BF16";
expectedPrecisions["CONC_1_TEST"] = "FP32";
expectedPrecisions["CONC_1_TEST"] = "BF16";
expectedPrecisions["RELU_1"] = "FP32";
}
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ class ConvReLUPoolConvReLUPool : public BasicBF16Test {
// |
// ReLU1 (Fused)
// |
// Pooling1 (FP32)
// Pooling1 (BF16)
// |
// Convolution2 (BF16)
// |
Expand Down Expand Up @@ -164,7 +164,7 @@ class ConvReLUPoolConvReLUPool : public BasicBF16Test {
// performance counters
expectedPrecisions["Convolution_1"] = "FP32";
expectedPrecisions["ReLU_1"] = "ndef";
expectedPrecisions["AvgPool_1"] = "FP32";
expectedPrecisions["AvgPool_1"] = "BF16";
expectedPrecisions["Convolution_2"] = "BF16";
expectedPrecisions["ReLU_2"] = "ndef";
expectedPrecisions["MaxPool_2"] = "BF16";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class Gather_x2_add_mul_relu_concat_matmul : public BasicBF16Test {
// \ / /
// Mul(FP32) ReLU(FP32)
// \ /
// Concat(FP32) Const
// Concat(BF16) Const
// \ /
// Matmul(BF16)

Expand Down Expand Up @@ -116,7 +116,7 @@ class Gather_x2_add_mul_relu_concat_matmul : public BasicBF16Test {
fnPtr = createGraph(netPrecision);

// STAGE2: set up safe threshold <= 5% from maximum value of output tensor
threshold = 170.02f; // Max in fp32 network by output: 3887.11
threshold = 177.f; // Max in fp32 network by output: 3887.11

// STAGE3:
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
Expand All @@ -125,7 +125,7 @@ class Gather_x2_add_mul_relu_concat_matmul : public BasicBF16Test {
expectedPrecisions["Mul_1"] = "FP32";
expectedPrecisions["Add_1"] = "FP32";
expectedPrecisions["Relu_1"] = "FP32";
expectedPrecisions["Conc_1"] = "FP32";
expectedPrecisions["Conc_1"] = "BF16";
expectedPrecisions["Matmul_1"] = "BF16";
}
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ class MobileNet_ssd_with_branching : public BasicBF16Test {
// |
// Conv1 (FP32)
// | \
// Conv2 (FP32 so far while we have not greedy mode. This must be fixed. Such pattern shouild have Conv2 in BF16)
// Conv2 (BF16) \
// | |
// relu(fused) |
// | Normalize (not LRN)
Expand Down Expand Up @@ -145,18 +145,18 @@ class MobileNet_ssd_with_branching : public BasicBF16Test {
fnPtr = createGraph(netPrecision);

// STAGE1:
threshold = 0.8f; // max value in latest tensor is 87.67
threshold = 0.85f; // max value in latest tensor is 87.67
// STAGE2:
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
// performance counters
expectedPrecisions["ADD_1"] = "FP32";
expectedPrecisions["CONV_1"] = "BF16";
expectedPrecisions["CONV_2"] = "FP32";
expectedPrecisions["CONV_2"] = "BF16";
expectedPrecisions["RELU_2"] = "ndef";
expectedPrecisions["DW_CONV"] = "BF16";
expectedPrecisions["RELU_DW"] = "ndef";
expectedPrecisions["NORM_1"] = "FP32";
expectedPrecisions["CONC_1"] = "FP32";
expectedPrecisions["CONC_1"] = "BF16";
}
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ class ScaleshiftConvRelu : public BasicBF16Test {
fnPtr = createGraph(netPrecision);

// STAGE1:
threshold = 5e-2;
threshold = 7e-2;
// STAGE2:
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
// performance counters
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ class ScaleshiftConv_x2_ConcatRelu : public BasicBF16Test {
expectedPrecisions["ADD_1"] = "FP32";
expectedPrecisions["CONV_1"] = "BF16";
expectedPrecisions["CONV_2"] = "BF16";
expectedPrecisions["CONC_1"] = "FP32";
expectedPrecisions["CONC_1"] = "BF16";
expectedPrecisions["RELU_1"] = "FP32";
}
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ class Scaleshift_x3_ConvEltwiseRelu : public BasicBF16Test {
fnPtr = createGraph(netPrecision);

// STAGE1:
threshold = 2e-1;
threshold = 5e-1;

// STAGE2:
// filling of expected precision of layer execution defined by precisoin of input tensor to the primitive and reflected in
Expand Down

0 comments on commit e89cf34

Please sign in to comment.