Skip to content

Commit

Permalink
WIP
Browse files Browse the repository at this point in the history
  • Loading branch information
antonvor committed Jul 19, 2021
1 parent ad59d7e commit a3cffa1
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 25 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,15 @@ class ConvolutionLayerCPUTest : public testing::WithParamInterface<convLayerCPUT
ASSERT_TRUE(foundConv) << "Can't find Convolution node";
}

int calculateQuantizeInHigh(const InferenceEngine::SizeVector& kernel, const int ic, const int maxIn0 = 10, const int maxIn1 = 10) const {
auto quantizeInHigh = maxIn0 * maxIn1;
quantizeInHigh *= ic;
for (int i = 0; i < kernel.size(); i++) {
quantizeInHigh *= kernel[i];
}
return quantizeInHigh;
}

void SetUp() override {
using namespace ngraph;
convLayerTestParamsSet basicParamsSet;
Expand Down Expand Up @@ -123,23 +132,19 @@ class ConvolutionLayerCPUTest : public testing::WithParamInterface<convLayerCPUT
auto convolutionNode = builder::makeConvolutionRelaxed(paramOuts.front(), weiPrc, kernel, stride, padBegin,
padEnd, dilation, padType, convOutChannels);

// todo: scale calculation
scale = inputShape[1];
for (int i = 0; i < kernel.size(); i++) {
scale *= kernel[i];
}
scale *= 10 * 10;
scale += 1;


// todo: FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(outPrc)?
function = makeNgraphFunction(element::f32, inputParams, convolutionNode, "Convolution", FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(outPrc));

if (inPrc == Precision::U8 || inPrc == Precision::I8) {
threshold = 1.001f;
outElemType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(outPrc);
quantizeInHigh = calculateQuantizeInHigh(kernel, inputShape[1]);
additionalPasses.push_back(std::make_shared<pass::ConvertPrecision<element::i8, element::f32>>());
additionalPasses.push_back(std::make_shared<pass::ConvertPrecision<element::u8, element::f32>>());
}

// todo: FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(outPrc)?
function = makeNgraphFunction(element::f32, inputParams, convolutionNode, "Convolution");



// todo: is it needed?
// if (outPrc != Precision::FP32 && outPrc != Precision::BF16) {
// additionalPasses.push_back(std::make_shared<ConvertPrecision<opset1::Convolution>>());
Expand Down Expand Up @@ -277,8 +282,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_BF16, ConvolutionLayerCPUTest,
::testing::Values(cpuBF16PluginConfig)),
ConvolutionLayerCPUTest::getTestCaseName);

// todo:


/* ============= Convolution (GEMM 3D) ============= */
const auto convParams_ExplicitPadding_GEMM_3D = ::testing::Combine(
Expand Down Expand Up @@ -528,8 +531,6 @@ const std::vector<CPUSpecificParams> CPUParams_1x1_2D = {
conv_avx512_2D_1x1_nspc
};



INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_FP32, ConvolutionLayerCPUTest,
::testing::Combine(
::testing::Combine(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -241,20 +241,19 @@ CPUTestsBase::makeCPUInfo(std::vector<cpu_memory_format_t> inFmts, std::vector<c

std::shared_ptr<ngraph::Function>
CPUTestsBase::makeNgraphFunction(const ngraph::element::Type &ngPrc, ngraph::ParameterVector &params,
const std::shared_ptr<ngraph::Node> &lastNode, std::string name,
const ngraph::element::Type outPrc) const {
const std::shared_ptr<ngraph::Node> &lastNode, std::string name) const {
auto newLastNode = modifyGraph(ngPrc, params, lastNode);
ngraph::ResultVector results;

if (outPrc == ngraph::element::u8) {
if (outElemType == ngraph::element::u8) {
const std::vector<float> inLow = {0};
const std::vector<float> inHigh = {static_cast<float>(scale)};
const std::vector<float> inHigh = {static_cast<float>(quantizeInHigh)};
const std::vector<float> outLow = {0};
const std::vector<float> outHigh = {255};
newLastNode = ngraph::builder::makeFakeQuantize(newLastNode, ngPrc, 256, {1, 1, 1, 1}, inLow, inHigh, outLow, outHigh);
} else if (outPrc == ngraph::element::i8) {
} else if (outElemType == ngraph::element::i8) {
const std::vector<float> inLow = {0};
const std::vector<float> inHigh = {static_cast<float>(scale)};
const std::vector<float> inHigh = {static_cast<float>(quantizeInHigh)};
const std::vector<float> outLow = {-127};
const std::vector<float> outHigh = {127};
newLastNode = ngraph::builder::makeFakeQuantize(newLastNode, ngPrc, 255, {1, 1, 1, 1}, inLow, inHigh, outLow, outHigh);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,8 +125,7 @@ class CPUTestsBase {
std::shared_ptr<ngraph::Function> makeNgraphFunction(const ngraph::element::Type &ngPrc,
ngraph::ParameterVector &params,
const std::shared_ptr<ngraph::Node> &lastNode,
std::string name,
const ngraph::element::Type outPrc = ngraph::element::f32) const;
std::string name) const;

protected:
virtual void CheckPluginRelatedResults(InferenceEngine::ExecutableNetwork &execNet, std::string nodeType) const;
Expand All @@ -146,7 +145,10 @@ class CPUTestsBase {
std::vector<cpu_memory_format_t> inFmts, outFmts;
std::vector<std::string> priority;
std::string selectedType;
int scale = 1;

ngraph::element::Type outElemType = ngraph::element::f32;
// only for int8 testing
int quantizeInHigh = 1;
};

template <typename NodeType>
Expand Down

0 comments on commit a3cffa1

Please sign in to comment.