Skip to content

Commit

Permalink
[GNA] Fix KW issues (#2)
Browse files Browse the repository at this point in the history
  • Loading branch information
dorloff committed Jan 16, 2020
1 parent ac4b11e commit 98937d7
Show file tree
Hide file tree
Showing 8 changed files with 34 additions and 10 deletions.
1 change: 0 additions & 1 deletion inference-engine/src/gna_plugin/backend/am_intel_dnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1188,7 +1188,6 @@ void GNAPluginNS::backend::AMIntelDNN::InitGNAStruct(intel_nnet_type_t *ptr_nnet
pLayer = ptr_nnet->pLayers;
#endif
for (int i = 0; i < component.size(); i++) {
auto& comp = component[i];
// std::cout << "Component + " << i <<"=GNA_" << std::distance(ptr_nnet->pLayers, pLayer) << "\n";
switch (component[i].operation) {
case kDnnAffineOp:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ class ModelQuantizer {
if (scaleFactor.size() <= scaleIndex) {
THROW_GNA_EXCEPTION << "Index of scale factor element is incorrect";
}
IE_ASSERT(quantData != nullptr);
quantData->_src_quant.scale = scaleFactor[scaleIndex];
scaleIndex++;
}
Expand Down
19 changes: 19 additions & 0 deletions inference-engine/src/gna_plugin/gna_graph_compiler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ void GNAGraphCompiler::fillSplitConnections(InferenceEngine::CNNLayerPtr layer)
GNAPluginNS::GNASplitLayer layerInfoItem(layer);
size_t split_size = 0;
std::string& id = layer->name;
IE_ASSERT(!layer->insData.empty());
auto dataInput = layer->insData.begin()->lock();
if (!dataInput) {
THROW_GNA_EXCEPTION << "Input layer pointer for split/slice is unexpectedly absent";
Expand Down Expand Up @@ -199,6 +200,8 @@ void GNAGraphCompiler::ConvolutionPrimitive(InferenceEngine::CNNLayerPtr layer)
auto& convolution = dynamic_cast<ConvolutionLayer&>(*layer.get());
auto quantized = InferenceEngine::getInjectedData<QuantizedLayerParams>(layer);

IE_ASSERT(!layer->insData.empty());
IE_ASSERT(!layer->outData.empty());
auto inputs = layer->insData.begin()->lock();
auto outputs = *layer->outData.begin();

Expand Down Expand Up @@ -414,6 +417,8 @@ void GNAGraphCompiler::PoolingPrimitive(InferenceEngine::CNNLayerPtr layer) {
auto& pooling = dynamic_cast<PoolingLayer&>(*layer.get());
auto quantized = InferenceEngine::getInjectedData<QuantizedLayerParams>(layer);

IE_ASSERT(!layer->insData.empty());
IE_ASSERT(!layer->outData.empty());
auto inputs = layer->insData.begin()->lock();
auto outputs = *layer->outData.begin();

Expand Down Expand Up @@ -477,6 +482,8 @@ void GNAGraphCompiler::PoolingPrimitive(InferenceEngine::CNNLayerPtr layer) {
void GNAGraphCompiler::CopyPrimitive(InferenceEngine::CNNLayerPtr layer) {
auto quantized = InferenceEngine::getInjectedData<QuantizedLayerParams>(layer);

IE_ASSERT(!layer->insData.empty());
IE_ASSERT(!layer->outData.empty());
auto inputs = layer->insData.begin()->lock();
auto outputs = *layer->outData.begin();

Expand Down Expand Up @@ -625,6 +632,8 @@ void GNAGraphCompiler::CropPrimitive(InferenceEngine::CNNLayerPtr layer) {
}
} else {
gnalog() << "Crop " << layer->name << " is being replaced by Affine layer...\n";
IE_ASSERT(!layer->outData.empty());
IE_ASSERT(!layer->insData.empty());
auto outputs = *layer->outData.begin();
auto inputs = layer->insData.begin()->lock();

Expand Down Expand Up @@ -785,6 +794,8 @@ void GNAGraphCompiler::AffinePrimitive(InferenceEngine::CNNLayerPtr layer, bool
auto& weightable = dynamic_cast<WeightableLayer&>(*layer.get());
auto quantized = InferenceEngine::getInjectedData<QuantizedLayerParams>(layer);

IE_ASSERT(!layer->insData.empty());
IE_ASSERT(!layer->outData.empty());
auto inputs = layer->insData.begin()->lock();
auto outputs = *layer->outData.begin();
auto inputPrecision = quantized ? Precision(Precision::I16) : inputs->getPrecision();
Expand Down Expand Up @@ -930,6 +941,8 @@ void GNAGraphCompiler::AffinePrimitive(InferenceEngine::CNNLayerPtr layer, bool
}

void GNAGraphCompiler::FillWeightOfAligningFilter(InferenceEngine::CNNLayerPtr layer, void* ptrWeights, size_t offset, bool isQuantized) {
IE_ASSERT(!layer->outData.empty());
IE_ASSERT(!layer->insData.empty());
auto outputs = *layer->outData.begin();
auto inputs = layer->insData.begin()->lock();

Expand Down Expand Up @@ -970,6 +983,8 @@ void GNAGraphCompiler::ConcatAlignFilterPrimitive(InferenceEngine::CNNLayerPtr l
void* ptr_weights = nullptr;
void* ptr_biases = nullptr;

IE_ASSERT(!layer->outData.empty());
IE_ASSERT(!layer->insData.empty());
auto outputs = *layer->outData.begin();
auto inputs = layer->insData.begin()->lock();

Expand Down Expand Up @@ -1059,6 +1074,8 @@ void GNAGraphCompiler::AffineFilterPrimitive(InferenceEngine::CNNLayerPtr layer)
void* ptr_weights = nullptr;
void* ptr_biases = nullptr;

IE_ASSERT(!layer->outData.empty());
IE_ASSERT(!layer->insData.empty());
auto outputs = *layer->outData.begin();
auto inputs = layer->insData.begin()->lock();

Expand Down Expand Up @@ -1151,6 +1168,8 @@ void GNAGraphCompiler::PWLPrimitive(InferenceEngine::CNNLayerPtr layer) {
}
} while (false);

IE_ASSERT(!layer->insData.empty());
IE_ASSERT(!layer->outData.empty());
auto inputs = layer->insData.begin()->lock();
auto outputs = *layer->outData.begin();
auto quantized = InferenceEngine::getInjectedData<QuantizedLayerParams>(layer);
Expand Down
6 changes: 4 additions & 2 deletions inference-engine/src/gna_plugin/gna_graph_tools.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -227,8 +227,10 @@ inline void CNNNetSwapLayers(InferenceEngine::CNNLayerPtr lhs,
auto interConnectBackR2L = false;
if (!interConnectBackL2R) {
details::erase_if(rhs->insData, [&interConnectBackR2L, &lhs](DataWeakPtr weakData) {
interConnectBackR2L |= weakData.lock()->getCreatorLayer().lock() == lhs;
return weakData.lock()->getCreatorLayer().lock() == lhs;
auto data = weakData.lock();
IE_ASSERT(data != nullptr);
interConnectBackR2L |= data->getCreatorLayer().lock() == lhs;
return data->getCreatorLayer().lock() == lhs;
});
}

Expand Down
1 change: 1 addition & 0 deletions inference-engine/src/gna_plugin/gna_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1012,6 +1012,7 @@ void GNAPlugin::Infer(const InferenceEngine::Blob &input, InferenceEngine::Blob
THROW_GNA_EXCEPTION << "cannot infer using Infer(Blob&, Blob&)"<< "model accepts " << inputsDataMap.size() << " inputs";
}

IE_ASSERT(!inputsDataMap.empty());
bmInput[inputsDataMap.begin()->first] = std::shared_ptr<Blob>(const_cast<Blob*>(&input), [](Blob*){});
bmOutput[outputsDataMap.begin()->first] = std::shared_ptr<Blob>(&output, [](Blob*){});
Infer(bmInput, bmOutput);
Expand Down
2 changes: 1 addition & 1 deletion inference-engine/src/gna_plugin/layers/gna_layer_info.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ class LayerInfo {
try {
size_t cropOffset = cropLayer->offset.back() * cropLayer->precision.size();
return (ALIGN64(cropOffset) != cropOffset);
} catch (InferenceEngine::details::InferenceEngineException& e) {}
} catch (InferenceEngine::details::InferenceEngineException) {}
}
return false;
}
Expand Down
1 change: 1 addition & 0 deletions inference-engine/src/gna_plugin/layers/gna_layer_type.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ bool GNAPluginNS::AreLayersSupported(InferenceEngine::ICNNNetwork& network, std:
std::unordered_set<InferenceEngine::CNNLayer *> allLayers;
auto network_precision = network.getPrecision();
network.getInputsInfo(inputs);
IE_ASSERT(!inputs.empty());
auto network_input_precision = inputs.begin()->second->getPrecision();
auto batch_size = network.getBatchSize();

Expand Down
13 changes: 7 additions & 6 deletions inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ static void insertDiagonalLayerBetween(InferenceEngine::CNNLayerPtr prevLayer,
// TODO: diagonal size
auto dimsIndex = nextLayer->outData[0]->getTensorDesc().getDims().size() - 1;
std::vector<float> weightsValues(nextLayer->outData[0]->getTensorDesc().getDims()[dimsIndex], fillValue);
IE_ASSERT(diagLayer != nullptr);
diagLayer->_weights = make_shared_blob<float>(
TensorDesc(
nextLayer->outData[0]->getTensorDesc().getPrecision(),
Expand Down Expand Up @@ -186,8 +187,6 @@ static std::vector<CNNLayerPtr> getCandidatesForIdentityInsertion(const CNNLayer
}

void InsertDiagonalLayerPass::run() {
int numOfDiagLayers = 0;
auto quantized = InferenceEngine::getInjectedData<QuantizedLayerParams>(pLayers->front());
for (auto & l : *pLayers) {
if (l->insData.empty()) continue;
auto prevLayer = CNNNetPrevLayer(l);
Expand Down Expand Up @@ -327,11 +326,12 @@ void SubstitutePReluPass::run() {
// sum
auto sum = getNext(negate);
if (!LayerInfo(sum).isEltwiseSum()) continue;
if (sum->insData.size() != 2) continue;

auto inData_0 = sum->insData[0].lock();
IE_ASSERT(inData_0 != nullptr);
auto inData_1 = sum->insData[1].lock();
if (sum->insData.size() != 2
|| inData_0 == nullptr
|| inData_1 == nullptr) continue;
IE_ASSERT(inData_1 != nullptr);

auto s1 = inData_0->getCreatorLayer().lock().get();
auto s2 = inData_1->getCreatorLayer().lock().get();
Expand Down Expand Up @@ -791,6 +791,8 @@ void InsertSplitAligningFilterPass::run() {
size_t newOutputSize = (currentOffset + ALIGN(outputSize, 8) * bytesPerSplitElement - aligned64_offset)
/ bytesPerSplitElement;

IE_ASSERT(filterLayer != nullptr);

// encodes offset to beginning of split layer input
filterLayer->params["offset"] = std::to_string(aligned64_offset / bytesPerSplitElement);

Expand Down Expand Up @@ -838,7 +840,6 @@ void InsertSplitAligningFilterPass::run() {
}

void SubstituteScaleShiftBroadCastPass::run() {
auto quantized = InferenceEngine::getInjectedData<QuantizedLayerParams>(pLayers->front());
for (auto & l : *pLayers) {
LayerInfo layerInfo(l);

Expand Down

0 comments on commit 98937d7

Please sign in to comment.