Skip to content

Commit

Permalink
[IE TESTS] dynavic batch for mvn layer (openvinotoolkit#1010)
Browse files Browse the repository at this point in the history
* [ci-skip][IE TESTS] dynavic batch for mvn layer

* update instance v0

* [ci-skip][IE TESTS] update instance for mvn layer

* [ci-skip][IE TESTS] fix

* [ci-skip][IE TESTS] add dynamic batch for singleLayer basic class

* [ci-skip][IE TESTS] update dynamic batch for singleLayer basic class

* [ci-skip][IE TESTS] removing bathFlag

* [IE TESTS] removing bathSize

* [IE TESTS] refactor dynamic batch in basic class

* [IE TESTS] refactor dynamic batch in basic class
  • Loading branch information
antonzaycev96 authored Jul 8, 2020
1 parent 884389d commit 2e3378c
Show file tree
Hide file tree
Showing 8 changed files with 61 additions and 21 deletions.
5 changes: 3 additions & 2 deletions inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ MKLDNNExecNetwork::MKLDNNExecNetwork(const InferenceEngine::ICNNNetwork &network

MKLDNNGraph::ApplyUnrollPasses(static_cast<ICNNNetwork&>(*_clonedNetwork));

if (_cfg.batchLimit > 1) {
if (_cfg.enableDynamicBatch) {
// check topology for applicability
if (!CanProcessDynBatch(*_clonedNetwork)) {
THROW_IE_EXCEPTION << "MKLDNNGraph::CreateGraph: such topology cannot be compiled for dynamic batch!";
Expand Down Expand Up @@ -292,7 +292,8 @@ bool MKLDNNExecNetwork::CanProcessDynBatch(const InferenceEngine::ICNNNetwork &n
type != Eltwise &&
type != Crop &&
type != BatchNormalization &&
type != Copy) {
type != Copy &&
type != MVN) {
check_result = false;
}
}, false);
Expand Down
3 changes: 2 additions & 1 deletion inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -990,7 +990,8 @@ void MKLDNNMVNNode::mvn_blk(const in_data_t* src_data, out_data_t* dst_data, con
std::vector<float> mean_buffer(aux_buffer_size * threads_num);
std::vector<float> variance_buffer(aux_buffer_size * threads_num);

for (size_t b = 0lu; b < N; b++) {
int actual_N = batchToProcess();
for (size_t b = 0lu; b < actual_N; b++) {
size_t ccb = is_nhwc ? b * C2 : b * C3;
if (across_channels) {
// mean for this instance in batch
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,14 +38,30 @@ const std::vector<double> epsilon = {
0.000000001
};

const std::vector<std::map<std::string, std::string>> Configs = {
{}
};

const auto MvnCases = ::testing::Combine(
::testing::ValuesIn(inputShapes),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::ValuesIn(acrossChannels),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(epsilon),
::testing::Values(CommonTestUtils::DEVICE_CPU)
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(Configs)
);

INSTANTIATE_TEST_CASE_P(smoke_MKLDNN_TestsMVN, MvnLayerTest, MvnCases, MvnLayerTest::getTestCaseName);


INSTANTIATE_TEST_CASE_P(smoke_MKLDNN_MvnLayerCheckDynBatch, MvnLayerTest,
::testing::Combine(
::testing::Values(std::vector<size_t>({5, 8, 3, 5})),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::ValuesIn(acrossChannels),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(epsilon),
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::Values(std::map<std::string, std::string>({{CONFIG_KEY(DYN_BATCH_ENABLED), CONFIG_VALUE(YES)}}))),
MvnLayerTest::getTestCaseName);
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,19 @@ const std::vector<double> epsilon = {
0.000000001
};


const std::vector<std::map<std::string, std::string>> Configs = {
{}
};

const auto MvnCases = ::testing::Combine(
::testing::ValuesIn(inputShapes),
::testing::Values(InferenceEngine::Precision::FP32),
::testing::ValuesIn(acrossChannels),
::testing::ValuesIn(normalizeVariance),
::testing::ValuesIn(epsilon),
::testing::Values(CommonTestUtils::DEVICE_GPU)
::testing::Values(CommonTestUtils::DEVICE_GPU),
::testing::ValuesIn(Configs)
);

INSTANTIATE_TEST_CASE_P(smoke_CLDNN_TestsMVN, MvnLayerTest, MvnCases, MvnLayerTest::getTestCaseName);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,17 +13,18 @@
namespace LayerTestsDefinitions {

typedef std::tuple<
InferenceEngine::SizeVector, // Input shapes
InferenceEngine::Precision, // Input precision
bool, // Across channels
bool, // Normalize variance
double, // Epsilon
std::string> mvnParams; // Device name
InferenceEngine::SizeVector, // Input shapes
InferenceEngine::Precision, // Input precision
bool, // Across channels
bool, // Normalize variance
double, // Epsilon
std::string, // Device name
std::map<std::string, std::string> // Config
> mvnParams;

class MvnLayerTest : public testing::WithParamInterface<mvnParams>, public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<mvnParams> obj);

protected:
void SetUp() override;
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,20 @@ std::string MvnLayerTest::getTestCaseName(testing::TestParamInfo<mvnParams> obj)
bool acrossChannels, normalizeVariance;
double eps;
std::string targetDevice;
std::tie(inputShapes, inputPrecision, acrossChannels, normalizeVariance, eps, targetDevice) = obj.param;
std::map<std::string, std::string> configuration;
std::tie(inputShapes, inputPrecision, acrossChannels, normalizeVariance, eps, targetDevice, configuration) = obj.param;
std::ostringstream result;
result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
result << "Precision=" << inputPrecision.name() << "_";
result << "AcrossChannels=" << (acrossChannels ? "TRUE" : "FALSE") << "_";
result << "NormalizeVariance=" << (normalizeVariance ? "TRUE" : "FALSE") << "_";
result << "Epsilon=" << eps << "_";
result << "TargetDevice=" << targetDevice;
if (!configuration.empty()) {
for (auto& configItem : configuration) {
result << "configItem=" << configItem.first << "_" << configItem.second << "_";
}
}
return result.str();
}

Expand All @@ -43,7 +49,7 @@ void MvnLayerTest::SetUp() {
InferenceEngine::Precision inputPrecision;
bool acrossChanels, normalizeVariance;
double eps;
std::tie(inputShapes, inputPrecision, acrossChanels, normalizeVariance, eps, targetDevice) = this->GetParam();
std::tie(inputShapes, inputPrecision, acrossChanels, normalizeVariance, eps, targetDevice, configuration) = this->GetParam();
auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision);
auto param = ngraph::builder::makeParams(inType, {inputShapes});
auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(param));
Expand All @@ -54,6 +60,5 @@ void MvnLayerTest::SetUp() {

TEST_P(MvnLayerTest, CompareWithRefs) {
Run();
};

}
} // namespace LayerTestsDefinitions
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,14 @@ void LayerTestsCommon::Compare(const std::vector<std::uint8_t> &expected, const
const auto actualBuffer = lockedMemory.as<const std::uint8_t *>();

const auto &precision = actual->getTensorDesc().getPrecision();
const auto &size = actual->size();
auto bufferSize = actual->size();
// With dynamic batch, you need to size
if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED)) {
auto batchSize = actual->getTensorDesc().getDims()[0];
auto halfBatchSize = batchSize > 1 ? batchSize/ 2 : 1;
bufferSize = (actual->size() * halfBatchSize / batchSize);
}
const auto &size = bufferSize;
switch (precision) {
case InferenceEngine::Precision::FP32:
Compare(reinterpret_cast<const float *>(expectedBuffer), reinterpret_cast<const float *>(actualBuffer),
Expand All @@ -54,7 +61,7 @@ void LayerTestsCommon::Compare(const std::vector<std::uint8_t> &expected, const
}
}

void LayerTestsCommon::ConfigurePlugin() const {
void LayerTestsCommon::ConfigurePlugin() {
if (!configuration.empty()) {
core->SetConfig(configuration, targetDevice);
}
Expand Down Expand Up @@ -92,11 +99,15 @@ void LayerTestsCommon::Infer() {

for (const auto &input : cnnNetwork.getInputsInfo()) {
const auto &info = input.second;

auto blob = GenerateInput(*info);
inferRequest.SetBlob(info->name(), blob);
inputs.push_back(blob);
}
if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED) &&
configuration.count(InferenceEngine::PluginConfigParams::YES)) {
auto batchSize = cnnNetwork.getInputsInfo().begin()->second->getTensorDesc().getDims()[0] / 2;
inferRequest.SetBatch(batchSize);
}
inferRequest.Infer();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ class LayerTestsCommon : public CommonTestUtils::TestsCommon {
return refMode;
}

void ConfigurePlugin() const;
void ConfigurePlugin();

void LoadNetwork();

Expand All @@ -102,7 +102,6 @@ class LayerTestsCommon : public CommonTestUtils::TestsCommon {
std::vector<InferenceEngine::Blob::Ptr> inputs;
float threshold;
InferenceEngine::CNNNetwork cnnNetwork;

virtual void Validate();

virtual std::vector<std::vector<std::uint8_t>> CalculateRefs();
Expand Down

0 comments on commit 2e3378c

Please sign in to comment.