diff --git a/inference-engine/src/gna_plugin/backend/dnn_types.h b/inference-engine/src/gna_plugin/backend/dnn_types.h index 223fb224203bcf..38082c2fd5773b 100644 --- a/inference-engine/src/gna_plugin/backend/dnn_types.h +++ b/inference-engine/src/gna_plugin/backend/dnn_types.h @@ -71,7 +71,7 @@ struct DnnActivation { return type; } static DnnActivation fromType(DnnActivationType type) { - DnnActivation activation; + DnnActivation activation{}; activation.type = type; activation.args = {}; return activation; diff --git a/inference-engine/src/gna_plugin/layers/gna_fake_quantize_layer.hpp b/inference-engine/src/gna_plugin/layers/gna_fake_quantize_layer.hpp index a9064292355649..bd0fa55490292c 100644 --- a/inference-engine/src/gna_plugin/layers/gna_fake_quantize_layer.hpp +++ b/inference-engine/src/gna_plugin/layers/gna_fake_quantize_layer.hpp @@ -26,7 +26,7 @@ class GNAFakeQuantizeLayer { * @brief convert FQ layer directly to gna-pwl activation layer */ DnnActivation parseAsActivation() const { - DnnActivation fqActivation; + DnnActivation fqActivation{}; fqActivation.fqParams.levels = fqLayer->GetParamAsSizeT("levels"); auto inputShape = getShapeForRange(fqLayer, 1); diff --git a/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp b/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp index f12135f66b27db..72467598318c3c 100644 --- a/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp +++ b/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp @@ -2083,6 +2083,7 @@ void MoveFakeQuantizeLayerIntoQuantParamsPass :: run() { }; auto quantParams = InferenceEngine::getInjectedData(layer); + IE_ASSERT(quantParams != nullptr); // Find all output layers connected to FQ auto nextLayers = CNNNetGetAllNextLayersSkipCertain(layer.get(), -1, donotSkip); @@ -2296,7 +2297,7 @@ void TransposeWeightsFromNCHWToNHWCPass::run() { } } // Find a convolution in next layers to rotate weights columns - if (!l->outData.empty() && !getInputTo(l->outData[0]).empty() && !l->outData.empty() && !getInputTo(l->outData[0]).empty()) { + if (!l->outData.empty() && !getInputTo(l->outData[0]).empty()) { std::vector transpositionInfo; auto nextLayer = getInputTo(l->outData[0]).begin()->second; transpositionInfo = FindTranspositionInfoFromNextLayers(nextLayer); @@ -2337,7 +2338,7 @@ void TransposeWeightsFromNCHWToNHWCPass::run() { } // Find a convolution in previous or next layers auto transpositionInfo = FindTranspositionInfoFromPrevLayers(firstInput); - if (!FoundPartToTranspose(transpositionInfo)) { + if (!FoundPartToTranspose(transpositionInfo) && !l->outData.empty() && !getInputTo(l->outData[0]).empty()) { transpositionInfo = FindTranspositionInfoFromNextLayers(getInputTo(l->outData[0]).begin()->second); } if (FoundPartToTranspose(transpositionInfo)) {