Skip to content

Commit

Permalink
[GNA] Support for cascade concat with non functional layers between c…
Browse files Browse the repository at this point in the history
…oncats

[GNA] Support for cascade concat with non functional layers between concats

[GNA] Support for cascade concat with non functional layers between concats

fix discussion

medn

test

delete commet

Rework test

minor fix

last fix

Test Align filter

fix discussion
  • Loading branch information
admitriev-gna committed Jul 2, 2020
1 parent fe4ff33 commit eee8614
Show file tree
Hide file tree
Showing 4 changed files with 180 additions and 5 deletions.
24 changes: 19 additions & 5 deletions inference-engine/src/gna_plugin/gna_graph_compiler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -534,9 +534,22 @@ void GNAGraphCompiler::ConcatPrimitive(InferenceEngine::CNNLayerPtr layer) {

auto& concatLayerInfo = concat_connection.find(concatLayer->name)->second;
for (auto &&outLayer : concatLayer->outData.front()->getInputTo()) {
if ( LayerInfo(outLayer.second).isConcat() ) {
connectOutput(layer, &concatLayerInfo.gna_ptr, concatLayerInfo.reserved_size);
auto concatCandidate = outLayer.second;
if (LayerInfo(concatCandidate).isNonFunctional()) {
// searching for next concat
auto isNonFunctional = [](CNNLayerPtr l) {
return LayerInfo(l).isNonFunctional();
};
if (!CNNNetHasNextLayerSkipCertain(concatCandidate, 0, 0, isNonFunctional)) {
continue;
}
concatCandidate = CNNNetGetNextLayerSkipCertain(concatCandidate, 0, 0, isNonFunctional).first;
}
if (!LayerInfo(concatCandidate).isConcat()) {
continue;
}
gnalog() << "Cascaded concat connection found from: " << layer->name << ", to: " << concatCandidate->name << std::endl;
connectOutput(layer, &concatLayerInfo.gna_ptr, concatLayerInfo.reserved_size);
}

size_t idx = 0;
Expand All @@ -557,21 +570,22 @@ void GNAGraphCompiler::ConcatPrimitive(InferenceEngine::CNNLayerPtr layer) {
auto layerInfo = LayerInfo(concatParent);
// auto layerInfo = LayerInfo(concatLayerInput->insData[it].lock()->getCreatorLayer().lock());
if (layerInfo.isInput()) {
auto & bytesAllocated = inputDesc->bytes_allocated_for_input[((InferenceEngine::CNNLayerPtr)layerInfo)->name];
if (concatLayerInfo.input_allocated) {
// for concat input allocated only once, so lets mark this specific input layer also as allocated
// we will bind it to offset further in connectInput
// size need to be equal to full layer in order to pass checks
inputDesc->bytes_allocated_for_input[((InferenceEngine::CNNLayerPtr)layerInfo)->name] = concatLayerInfo.reserved_size;
bytesAllocated = concatLayerInfo.reserved_size;
}

connectInput(layer, &concatLayerInfo.gna_ptr,
concatLayerInfo.reserved_size, -static_cast<int32_t>(inputLayer.offset), idx);

// TODO: currently connectInput api accept only total size, for concat we need extension for allocated, and actual sizes
inputDesc->bytes_allocated_for_input[((InferenceEngine::CNNLayerPtr) layerInfo)->name] = inputLayer.tensorSize;
bytesAllocated = inputLayer.tensorSize;

concatLayerInfo.input_allocated = true;
} else if (layerInfo.isMemory()) {
} else if (layerInfo.isMemory()) {
connectInput(layer, &concatLayerInfo.gna_ptr, concatLayerInfo.reserved_size, -static_cast<int>(inputLayer.offset), idx);

concatLayerInfo.input_allocated = true;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include <vector>
#include <ie_precision.hpp>
#include <subgraph_tests/cascade_concat.hpp>
#include "common_test_utils/test_constants.hpp"

using namespace LayerTestsDefinitions;

namespace {
std::vector<std::vector<std::vector<size_t>>> shape1{
{{1, 64}},
{{1, 128}},
{{1, 32}},
{{1, 16}},
{{1, 8}}
};

std::vector<std::vector<std::vector<size_t>>> shape2{
{{1, 72}},
{{1, 128}},
{{1, 32}},
{{1, 16}},
{{1, 8}}
};

std::vector<std::vector<std::vector<size_t>>> shape3{
{{1, 80}},
{{1, 128}},
{{1, 32}},
{{1, 16}},
{{1, 8}}
};

std::vector<InferenceEngine::Precision> netPrecisions = {InferenceEngine::Precision::FP32,
};

std::map<std::string, std::string> additional_config = {
{"GNA_SCALE_FACTOR_0", "1"},
{"GNA_SCALE_FACTOR_1", "1"},
{"GNA_SCALE_FACTOR_2", "1"}
};

INSTANTIATE_TEST_CASE_P(cascade_concat, CascadeConcat,
::testing::Combine(
::testing::ValuesIn(shape1),
::testing::ValuesIn(shape2),
::testing::ValuesIn(shape3),
::testing::ValuesIn(netPrecisions),
::testing::Values(false),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::Values(additional_config)),
CascadeConcat::getTestCaseName);

INSTANTIATE_TEST_CASE_P(cascade_concat_multioutput, CascadeConcat,
::testing::Combine(
::testing::ValuesIn(shape1),
::testing::ValuesIn(shape2),
::testing::ValuesIn(shape3),
::testing::ValuesIn(netPrecisions),
::testing::Values(true),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::Values(additional_config)),
CascadeConcat::getTestCaseName);
} // namespace
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once

#include <tuple>
#include <string>
#include <vector>
#include "functional_test_utils/layer_test_utils.hpp"
#include "ngraph_functions/builders.hpp"

namespace LayerTestsDefinitions {

typedef std::tuple<
std::vector<std::vector<size_t>>, //input shapes 1
std::vector<std::vector<size_t>>, //input shapes 2
std::vector<std::vector<size_t>>, //input shapes 3
InferenceEngine::Precision, //Network precision
bool, //Multioutput -> True, Single out ->false
std::string, //Device name
std::map<std::string, std::string>//config
> CascadeConcatTuple;

class CascadeConcat
: public testing::WithParamInterface<CascadeConcatTuple>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(const testing::TestParamInfo<CascadeConcatTuple> &obj);
protected:
void SetUp() override;
};
} // namespace LayerTestsDefinitions
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <debug.h>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/precision_utils.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include "subgraph_tests/cascade_concat.hpp"

namespace LayerTestsDefinitions {
std::string CascadeConcat::getTestCaseName(const testing::TestParamInfo<CascadeConcatTuple> &obj) {
std::vector<std::vector<size_t>> input1, input2, input3;
InferenceEngine::Precision netPrecision;
std::string targetName;
bool multioutput;
std::map<std::string, std::string> additional_config;
std::tie(input1, input2, input3, netPrecision, multioutput, targetName, additional_config) = obj.param;
std::ostringstream results;

results << "IS=" << CommonTestUtils::vec2str(input1[0]) << "_";
results << CommonTestUtils::vec2str(input2[0]) << "_";
results << CommonTestUtils::vec2str(input3[0]) << "_";
results << "netPRC=" << netPrecision.name() << "_";
results << "Multioutput=" << multioutput << "_";
results << "targetDevice=" << targetName << "_";
return results.str();
}

void CascadeConcat::SetUp() {
std::vector<std::vector<size_t>> input1, input2, input3;
InferenceEngine::Precision netPrecision;
std::map<std::string, std::string> additional_config;
bool multioutput;
std::tie(input1, input2, input3, netPrecision, multioutput, targetDevice, additional_config) = this->GetParam();
configuration.insert(additional_config.begin(), additional_config.end());
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto input = ngraph::builder::makeParams(ngPrc, {input1[0], input2[0], input2[0]});
auto relu1 = std::make_shared<ngraph::opset1::Relu>(input[0]);
auto relu2 = std::make_shared<ngraph::opset1::Relu>(input[1]);
auto relu3 = std::make_shared<ngraph::opset1::Relu>(input[2]);
auto concat = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{relu1->output(0),
relu2->output(0)},
1);
auto reshape = ngraph::builder::makeSqueezeUnsqueeze(concat, ngPrc, {0}, ngraph::helpers::SqueezeOpType::UNSQUEEZE);
auto reshape2 = ngraph::builder::makeSqueezeUnsqueeze(reshape, ngPrc, {0}, ngraph::helpers::SqueezeOpType::SQUEEZE);
auto concat2 = std::make_shared<ngraph::opset1::Concat>(ngraph::OutputVector{reshape2->output(0),
relu3->output(0)},
1);
ngraph::ResultVector results;
if (multioutput) {
auto const_mult = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{1, input1[0][1]+input2[0][1]},
std::vector<float>{1.01f});
auto mult = std::make_shared<ngraph::op::v0::Multiply>(concat, const_mult);
results = ngraph::ResultVector{std::make_shared<ngraph::opset1::Result>(concat2),
std::make_shared<ngraph::opset1::Result>(mult)};
} else {
results = ngraph::ResultVector{std::make_shared<ngraph::opset1::Result>(concat2)};
}
function = std::make_shared<ngraph::Function>(results, input, "concat_reshape_reshape_concat_mul");
}

TEST_P(CascadeConcat, CompareWithRefs){
Run();
};
} // namespace LayerTestsDefinitions

0 comments on commit eee8614

Please sign in to comment.