Skip to content

Commit

Permalink
[GNA] Add negative CNN2D tests (#5741)
Browse files Browse the repository at this point in the history
* invalid kernel
  * invalid input
  * invalid padding
  * invalid stride
  * invalid dilation
  • Loading branch information
kbruniec authored May 26, 2021
1 parent 5b291b5 commit df9525b
Show file tree
Hide file tree
Showing 4 changed files with 181 additions and 5 deletions.
5 changes: 3 additions & 2 deletions inference-engine/src/gna_plugin/backend/gna_limitations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ bool RangeMultipleLimit::isValid(const uint32_t val) const {
std::string RangeMultipleLimit::GetErrorOrEmpty(const uint32_t val) const {
auto e = RangeLimit::GetErrorOrEmpty(val);
std::ostringstream out;
if (!isValid(val)) {
if (val % multiplier != 0) {
out << "Unsupported " << what << ": " << val << ", must be multiple of " << multiplier << "\n";
}
return e + out.str();
Expand Down Expand Up @@ -95,14 +95,15 @@ std::string VectorOrSquareLimitByChannelsAndPrecision::GetErrorOrEmpty(const uin

void Validator::ValidateCnn2D(std::string name, const uint32_t inHeight, const uint32_t inWidth,
const uint32_t inChannels, const uint32_t kH, const uint32_t kW, const uint32_t kN,
OvGnaType inPrecision) const {
const uint32_t strideH, const uint32_t strideW, OvGnaType inPrecision) const {
const std::string prefix = "Layer Convolution2D: " + name + ":";
auto error = inputHWLimit.GetErrorOrEmpty(inHeight, inWidth);

error += kernelNumberLimit.GetErrorOrEmpty(kN);

error += inputChannelsNumberLimit.GetErrorOrEmpty(inChannels);
error += kernelLimit.GetErrorOrEmpty(kH, kW, inPrecision, inChannels, "kernel");
error += strideLimit.GetErrorOrEmpty(strideH, strideW, inPrecision, inChannels, "convolution stride");
ThrowIfNotEmpty(prefix, error);
}

Expand Down
4 changes: 2 additions & 2 deletions inference-engine/src/gna_plugin/backend/gna_limitations.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,14 +75,14 @@ class Validator {
VectorOrSquareLimitByChannelsAndPrecision kernelLimit {
{ 240, { 3, 7, 3 }, { 2, 7, 2 } },
{ 120, { 3, 7, 3 }, { 1, 7, 1 } } };

VectorOrSquareLimitByChannelsAndPrecision& strideLimit = kernelLimit;
const VectorOrSquareLimit poolingWindowLimit{ 3, 1, 1 };

static void ThrowIfNotEmpty(const std::string prefix, const std::string error);
public:
void ValidateCnn2D(std::string name, const uint32_t inHeight, const uint32_t inWidth,
const uint32_t inChannels, const uint32_t kH, const uint32_t kW, const uint32_t kN,
OvGnaType inPrecision) const;
const uint32_t strideH, const uint32_t strideW, OvGnaType inPrecision) const;

void ValidatePooling2D(std::string name,
const uint32_t windowH, const uint32_t windowW,
Expand Down
2 changes: 1 addition & 1 deletion inference-engine/src/gna_plugin/gna_graph_compiler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -606,7 +606,7 @@ void GNAGraphCompiler::finalizeConvolution2DPrimitive(InferenceEngine::CNNLayerP

cnn2dValidator.ValidateCnn2D(layer->name,
in_height, in_width, in_channels,
convolution._kernel_y, convolution._kernel_x, filter_n, inputPrec);
convolution._kernel_y, convolution._kernel_x, filter_n, convolution._stride_y, convolution._stride_x, inputPrec);

float weight_scale_factor = 1.0f;
float output_scale_factor = 1.0f;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <vector>
#include "shared_test_classes/single_layer/convolution.hpp"
#include "common_test_utils/test_assertions.hpp"
#include "common_test_utils/test_constants.hpp"
#include "../skip_tests_check.hpp"

using namespace LayerTestsDefinitions;

namespace {

const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};

/* ============= 2D Convolution ============= */
const std::vector<std::vector<size_t >> kernels2D = {
{1, 3},
{7, 1},
{3, 3},
};

const std::vector<std::vector<size_t >> InvalidKernels2D = {
{1, 4},
{2, 3},
{3, 2},
{8, 1},
{4, 4},
};

const std::vector<std::vector<size_t >> strides2D = {
{1, 1},
};
const std::vector<std::vector<size_t >> strides2DInvalid = {
{4, 4}, {1, 4}
};
const std::vector<std::vector<ptrdiff_t>> padBegins2D = { {0, 0},
};
const std::vector<std::vector<ptrdiff_t>> padEnds2D = { {0, 0},
};
const std::vector<std::vector<ptrdiff_t>> padBegins2DInvalid = { {1, 0}, {1, 1}, {0, 1}
};
const std::vector<std::vector<ptrdiff_t>> padEnds2DInvalid = { {1, 0}, {1, 1}, {0, 1}
};
const std::vector<std::vector<size_t >> dilations2D = { {1, 1},
};
const std::vector<std::vector<size_t >> dilations2DInvalid = { {2, 2},
};
const std::vector<size_t> numOutCannels2D = { 32 };
const std::vector<size_t> numOutCannels2DInvalid = { 1, 7, 9, 400 };

const std::vector<std::vector<size_t>> input2DNCHWFine = { { 1, 8, 20, 16 } };

const std::vector<std::vector<size_t>> input2DNCHWInvalidInputC = {
{ 1, 7, 20, 16 },
{ 1, 9, 20, 16 },
{ 1, 400, 20, 16 } };
const std::vector<std::vector<size_t>> input2DNCHWInvalidInputH = { { 1, 8, 15, 16 }, { 1, 8, 400, 16 } };
const std::vector<std::vector<size_t>> input2DNCHWInvalidInputW = { { 1, 8, 20, 14 }, { 1, 8, 20, 400 } };

const auto conv2DParametersFine = ::testing::Combine(
::testing::ValuesIn(kernels2D),
::testing::ValuesIn(strides2D),
::testing::ValuesIn(padBegins2D),
::testing::ValuesIn(padEnds2D),
::testing::ValuesIn(dilations2D),
::testing::ValuesIn(numOutCannels2D),
::testing::Values(ngraph::op::PadType::EXPLICIT)
);
const auto conv2DParametersInvalidKernel = ::testing::Combine(
::testing::ValuesIn(InvalidKernels2D),
::testing::ValuesIn(strides2D),
::testing::ValuesIn(padBegins2D),
::testing::ValuesIn(padEnds2D),
::testing::ValuesIn(dilations2D),
::testing::ValuesIn(numOutCannels2D),
::testing::Values(ngraph::op::PadType::EXPLICIT)
);
const auto conv2DParametersInvalidFilterNumber = ::testing::Combine(
::testing::ValuesIn(kernels2D),
::testing::ValuesIn(strides2D),
::testing::ValuesIn(padBegins2D),
::testing::ValuesIn(padEnds2D),
::testing::ValuesIn(dilations2D),
::testing::ValuesIn(numOutCannels2DInvalid),
::testing::Values(ngraph::op::PadType::EXPLICIT)
);
const auto conv2DParametersInvalidPadding = ::testing::Combine(
::testing::ValuesIn(kernels2D),
::testing::ValuesIn(strides2D),
::testing::ValuesIn(padBegins2DInvalid),
::testing::ValuesIn(padEnds2DInvalid),
::testing::ValuesIn(dilations2D),
::testing::ValuesIn(numOutCannels2D),
::testing::Values(ngraph::op::PadType::EXPLICIT)
);
const auto conv2DParametersInvalidStride = ::testing::Combine(
::testing::ValuesIn(kernels2D),
::testing::ValuesIn(strides2DInvalid),
::testing::ValuesIn(padBegins2D),
::testing::ValuesIn(padEnds2D),
::testing::ValuesIn(dilations2D),
::testing::ValuesIn(numOutCannels2D),
::testing::Values(ngraph::op::PadType::EXPLICIT)
);
const auto conv2DParametersInvalidDilation = ::testing::Combine(
::testing::ValuesIn(kernels2D),
::testing::ValuesIn(strides2D),
::testing::ValuesIn(padBegins2D),
::testing::ValuesIn(padEnds2D),
::testing::ValuesIn(dilations2DInvalid),
::testing::ValuesIn(numOutCannels2D),
::testing::Values(ngraph::op::PadType::EXPLICIT)
);

class GnaConv2DNegativeTest : public ConvolutionLayerTest, protected GnaLayerTestCheck {
protected:
virtual std::string expectedSubstring() = 0;
void Run() override {
GnaLayerTestCheck::SkipTestCheck();

if (!GnaLayerTestCheck::skipTest) {
try {
ConvolutionLayerTest::LoadNetwork();
FAIL() << "GNA's unsupported configuration of Convolution2D was not detected in ConvolutionLayerTest::LoadNetwork()";
}
catch (std::runtime_error& e) {
const std::string errorMsg = e.what();
const auto expected = expectedSubstring();
ASSERT_STR_CONTAINS(errorMsg, expected);
EXPECT_TRUE(errorMsg.find(expected) != std::string::npos) << "Wrong error message, actula error message: " << errorMsg <<
", expected: " << expected;
}
}
}
void SetUp() override {
ConvolutionLayerTest::SetUp();
}
};

#define GNA_NEG_INSTANTIATE(whats_wrong, sufix_params, sufix_input, error_message) \
struct GnaConv2DNegativeTest##whats_wrong : GnaConv2DNegativeTest { \
std::string expectedSubstring() override { \
return error_message; \
} \
}; \
TEST_P(GnaConv2DNegativeTest##whats_wrong, ThrowAsNotSupported) { \
Run(); \
} \
INSTANTIATE_TEST_CASE_P(smoke_GnaConv2DNegativeTestInvalid##whats_wrong, GnaConv2DNegativeTest##whats_wrong, \
::testing::Combine( \
conv2DParameters##sufix_params, \
::testing::ValuesIn(netPrecisions), \
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), \
::testing::Values(InferenceEngine::Precision::UNSPECIFIED), \
::testing::Values(InferenceEngine::Layout::ANY), \
::testing::Values(InferenceEngine::Layout::ANY), \
::testing::ValuesIn(input2DNCHW##sufix_input), \
::testing::Values(CommonTestUtils::DEVICE_GNA)), \
GnaConv2DNegativeTest##whats_wrong::getTestCaseName);

GNA_NEG_INSTANTIATE(FilterNumber, InvalidFilterNumber, Fine, "Unsupported number of kernels")
GNA_NEG_INSTANTIATE(Kernel, InvalidKernel, Fine, "Unsupported kernel shape")
GNA_NEG_INSTANTIATE(InputH, Fine, InvalidInputH, "Unsupported input height")
GNA_NEG_INSTANTIATE(InputW, Fine, InvalidInputW, "Unsupported input width")
GNA_NEG_INSTANTIATE(InputC, Fine, InvalidInputC, "Unsupported number of input channels")
GNA_NEG_INSTANTIATE(Padding, InvalidPadding, Fine, "Convolution's input padding is not supported")
GNA_NEG_INSTANTIATE(Stride, InvalidStride, Fine, "Unsupported convolution stride shape")
GNA_NEG_INSTANTIATE(Dilation, InvalidDilation, Fine, "dilation is not supported on GNA")

} // namespace

0 comments on commit df9525b

Please sign in to comment.