Skip to content

Commit

Permalink
Revert "[gna] Fixed export/import precision"
Browse files Browse the repository at this point in the history
This reverts commit d381a2e.
  • Loading branch information
mryzhov committed Jun 23, 2021
1 parent c11de99 commit 6b5d513
Show file tree
Hide file tree
Showing 11 changed files with 109 additions and 39 deletions.
5 changes: 3 additions & 2 deletions inference-engine/src/gna_plugin/gna_model_serial.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -831,7 +831,7 @@ std::vector<HeaderLatest::RuntimeEndPoint> GNAModelSerial::serializeOutputs(cons
}
uint32_t elementsCount = static_cast<uint32_t>(InferenceEngine::details::product(outputDims.begin(), outputDims.end()));
InferenceEngine::Layout outputLayout = output.second->getLayout();
uint8_t outputPrecision = output.second->getPrecision().getPrecVal();
InferenceEngine::Precision::ePrecision outputPrecision = InferenceEngine::Precision::FP32;
HeaderLatest::RuntimeEndPoint endPoint(outputsDesc[outputIndex].scale_factor,
outputsDesc[outputIndex].ptrs[0],
outputsDesc[outputIndex].num_bytes_per_element,
Expand Down Expand Up @@ -866,7 +866,7 @@ std::vector<HeaderLatest::RuntimeEndPoint> GNAModelSerial::serializeInputs(const
uint32_t elementsCount = static_cast<uint32_t>(InferenceEngine::details::product(inputDims.begin(), inputDims.end()));
intel_dnn_orientation_t orientation = inputDesc->getOrientation(inputName);
InferenceEngine::Layout inputLayout = input.second->getLayout();
uint8_t inputPrecision = input.second->getPrecision().getPrecVal();
InferenceEngine::Precision::ePrecision inputPrecision = InferenceEngine::Precision::FP32;
HeaderLatest::RuntimeEndPoint endPoint(scaleFactor,
descriptor_ptr[0],
element_size,
Expand All @@ -886,6 +886,7 @@ void GNAModelSerial::ImportInputs(std::istream &is,
std::shared_ptr<GNAPluginNS::InputDesc> inputsDesc,
InferenceEngine::InputsDataMap& dataMap) {
dataMap.clear();

for (uint32_t inputIndex = 0; inputIndex < modelHeader.nInputs; inputIndex++) {
const std::string& name = (modelHeader.version.major == 2 && modelHeader.version.minor >= 3)
? inputNames.at(inputIndex) : std::string("input" + std::to_string(inputIndex));
Expand Down
7 changes: 4 additions & 3 deletions inference-engine/src/gna_plugin/gna_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -714,7 +714,7 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
convertedNetwork = InferenceEngine::details::convertFunctionToICNNNetwork(graph, clonedNetwork);
}
IE_SUPPRESS_DEPRECATED_START
network = convertedNetwork ? InferenceEngine::CNNNetwork{convertedNetwork} : _network;
InferenceEngine::CNNNetwork network = convertedNetwork ? InferenceEngine::CNNNetwork{convertedNetwork} : _network;
IE_SUPPRESS_DEPRECATED_END

NetPass::ConvertPrecision(network, Precision::I64, Precision::I32);
Expand Down Expand Up @@ -1632,6 +1632,7 @@ void GNAPlugin::Export(std::ostream &outStream) {
THROW_GNA_EXCEPTION << " exporting network with multiple inputs not supported";
}
#endif

// TODO: nnet group parameter looks only used in application - so can we move this line into load network.
IE_ASSERT(!inputsDataMap.empty());
auto inputDims = inputsDataMap.begin()->second->getTensorDesc().getDims();
Expand All @@ -1648,8 +1649,8 @@ void GNAPlugin::Export(std::ostream &outStream) {
auto serial = GNAModelSerial(modelToSerial,
inputsDesc,
outputsDesc,
network.getInputsInfo(),
network.getOutputsInfo())
inputsDataMap,
outputsDataMap)
.SetInputRotation(transpose_inputs_info)
.SetOutputRotation(transpose_outputs_info);

Expand Down
1 change: 0 additions & 1 deletion inference-engine/src/gna_plugin/gna_plugin.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@ class GNAPlugin : public InferenceEngine::IInferencePlugin {
*/
uint32_t rwSegmentSize = 0;

InferenceEngine::CNNNetwork network;
InferenceEngine::InputsDataMap inputsDataMap;
InferenceEngine::OutputsDataMap outputsDataMap;
std::vector<InferenceEngine::IVariableStateInternal::Ptr> memoryStates;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,21 +11,113 @@

#include <ie_core.hpp>
#include <ie_layouts.h>
#include "base/import_export_base/import_export_base.hpp"

#include "shared_test_classes/base/layer_test_utils.hpp"
#include "functional_test_utils/blob_utils.hpp"
#include "ngraph_functions/utils/ngraph_helpers.hpp"
#include "ngraph_functions/builders.hpp"

typedef std::tuple<
std::vector<size_t>, // Input shape
InferenceEngine::Precision, // Network Precision
std::string, // Target Device
std::map<std::string, std::string>, // Export Configuration
std::map<std::string, std::string> // Import Configuration
> exportImportNetworkParams;

namespace LayerTestsDefinitions {

class ImportActConvActTest : public FuncTestUtils::ImportNetworkTestBase {
class ImportActConvActTest : public testing::WithParamInterface<exportImportNetworkParams>,
public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<exportImportNetworkParams> obj) {
std::vector<size_t> inputShape;
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> exportConfiguration;
std::map<std::string, std::string> importConfiguration;
std::tie(inputShape, netPrecision, targetDevice, exportConfiguration, importConfiguration) = obj.param;

std::ostringstream result;
result << "netPRC=" << netPrecision.name() << "_";
result << "targetDevice=" << targetDevice << "_";
for (auto const &configItem : exportConfiguration) {
result << "_exportConfigItem=" << configItem.first << "_" << configItem.second;
}
for (auto const &configItem : importConfiguration) {
result << "_importConfigItem=" << configItem.first << "_" << configItem.second;
}
result << CommonTestUtils::vec2str(inputShape);
return result.str();
}

void Run() override {
SKIP_IF_CURRENT_TEST_IS_DISABLED()

configuration.insert(exportConfiguration.begin(), exportConfiguration.end());
LoadNetwork();
GenerateInputs();
Infer();

executableNetwork.Export("exported_model.blob");
for (auto const &configItem : importConfiguration) {
configuration[configItem.first] = configItem.second;
}
std::fstream inputStream("exported_model.blob", std::ios_base::in | std::ios_base::binary);
if (inputStream.fail()) {
FAIL() << "Cannot open file to import model: exported_model.blob";
}

auto importedNetwork = core->ImportNetwork(inputStream, targetDevice, configuration);

// Generate inputs
std::vector<InferenceEngine::Blob::Ptr> inputs;
auto inputsInfo = importedNetwork.GetInputsInfo();
auto functionParams = function->get_parameters();
for (int i = 0; i < functionParams.size(); ++i) {
const auto& param = functionParams[i];
const auto infoIt = inputsInfo.find(param->get_friendly_name());
GTEST_ASSERT_NE(infoIt, inputsInfo.cend());

const auto& info = infoIt->second;
auto blob = GenerateInput(*info);
inputs.push_back(blob);
}

// Infer imported network
InferenceEngine::InferRequest importInfer = importedNetwork.CreateInferRequest();
inputsInfo = importedNetwork.GetInputsInfo();
functionParams = function->get_parameters();
for (int i = 0; i < functionParams.size(); ++i) {
const auto& param = functionParams[i];
const auto infoIt = inputsInfo.find(param->get_friendly_name());
GTEST_ASSERT_NE(infoIt, inputsInfo.cend());

const auto& info = infoIt->second;
auto blob = inputs[i];
importInfer.SetBlob(info->name(), blob);
}
importInfer.Infer();

// Validate
auto expectedOutputs = CalculateRefs();
auto actualOutputs = std::vector<InferenceEngine::Blob::Ptr>{};
for (const auto &output : importedNetwork.GetOutputsInfo()) {
const auto &name = output.first;
actualOutputs.push_back(importInfer.GetBlob(name));
}
IE_ASSERT(actualOutputs.size() == expectedOutputs.size())
<< "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size();
Compare(expectedOutputs, actualOutputs);
}

protected:
void SetUp() override {
std::vector<size_t> inputShape;
InferenceEngine::Precision netPrecision;
std::tie(inputShape, netPrecision, targetDevice, exportConfiguration, importConfiguration, applicationHeader) = this->GetParam();

std::tie(inputShape, netPrecision, targetDevice, exportConfiguration, importConfiguration) = this->GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);

auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
auto relu1 = std::make_shared<ngraph::opset1::Relu>(params[0]);

Expand Down Expand Up @@ -73,18 +165,13 @@ const std::vector<std::map<std::string, std::string>> importConfigs = {
}
};

const std::vector<std::string> appHeaders = {
""
};

INSTANTIATE_TEST_CASE_P(smoke_ImportActConvAct, ImportActConvActTest,
::testing::Combine(
::testing::ValuesIn(inputShape),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(exportConfigs),
::testing::ValuesIn(importConfigs),
::testing::ValuesIn(appHeaders)),
::testing::ValuesIn(importConfigs)),
ImportActConvActTest::getTestCaseName);

} // namespace LayerTestsDefinitions
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,6 @@ TEST_P(ImportReshapePermuteConvGNA, CompareWithRefImpl) {
Run();
};

const std::vector<std::vector<size_t>> inputShape = {
{}
};

const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
Expand Down Expand Up @@ -80,7 +76,6 @@ const std::vector<std::string> appHeaders = {

INSTANTIATE_TEST_CASE_P(smoke_ImportNetworkCase, ImportReshapePermuteConvGNA,
::testing::Combine(
::testing::ValuesIn(inputShape),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(exportConfigs),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*ConvolutionLayerTest.CompareWithRefs.*D=\(3.1\).*)",
R"(.*ConstantResultSubgraphTest.*IS=\(2\.3\.4\.5\).*)",
R"(.*ConstantResultSubgraphTest.*inPrc=(U8|I8|I32|U64|I64|BOOL).*)",
// TODO: Issue 51528
R"(.*CachingSupport.*_(u8|i16)_.*)",
// TODO: Issue 51525
R"(.*CachingSupport.*KSOFunction.*)",
// TODO: Issue 57363 (Param -> Result subgraphs)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
#include <ie_core.hpp>

typedef std::tuple<
std::vector<size_t>, // Input shape
InferenceEngine::Precision, // Network Precision
std::string, // Target Device
std::map<std::string, std::string>, // Export Configuration
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,12 @@
namespace FuncTestUtils {

std::string ImportNetworkTestBase::getTestCaseName(testing::TestParamInfo<exportImportNetworkParams> obj) {
std::vector<size_t> inputShape;
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> exportConfiguration;
std::map<std::string, std::string> importConfiguration;
std::string appHeader;
std::tie(inputShape, netPrecision, targetDevice, exportConfiguration, importConfiguration, appHeader) = obj.param;
std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration, appHeader) = obj.param;

std::ostringstream result;
result << "netPRC=" << netPrecision.name() << "_";
Expand All @@ -27,8 +26,6 @@ std::string ImportNetworkTestBase::getTestCaseName(testing::TestParamInfo<export
result << "_importConfigItem=" << configItem.first << "_" << configItem.second;
}
result << "_appHeader=" << appHeader;
result << CommonTestUtils::vec2str(inputShape);

return result.str();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,8 @@
namespace LayerTestsDefinitions {

void ImportNonZero::SetUp() {
std::vector<size_t> inputShape;
InferenceEngine::Precision netPrecision;
std::tie(inputShape, netPrecision, targetDevice, exportConfiguration, importConfiguration, applicationHeader) = this->GetParam();
std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration, applicationHeader) = this->GetParam();
const auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);

const auto parameter = std::make_shared<ngraph::opset5::Parameter>(ngPrc, ngraph::Shape{1000});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,8 @@
namespace LayerTestsDefinitions {

void ImportReshapePermuteConv::SetUp() {
std::vector<size_t> inputShape;
InferenceEngine::Precision netPrecision;
std::tie(inputShape, netPrecision, targetDevice, exportConfiguration, importConfiguration, applicationHeader) = this->GetParam();
std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration, applicationHeader) = this->GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);

auto params = ngraph::builder::makeParams(ngPrc, { {1, 336} });
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -269,14 +269,6 @@ void LayerTestsCommon::Compare(const InferenceEngine::Blob::Ptr &expected, const
Compare(reinterpret_cast<const std::int32_t *>(expectedBuffer),
reinterpret_cast<const std::int32_t *>(actualBuffer), size, 0);
break;
case InferenceEngine::Precision::I16:
Compare(reinterpret_cast<const std::int16_t *>(expectedBuffer),
reinterpret_cast<const std::int16_t *>(actualBuffer), size, 0);
break;
case InferenceEngine::Precision::U8:
Compare(reinterpret_cast<const std::uint8_t *>(expectedBuffer),
reinterpret_cast<const std::uint8_t *>(actualBuffer), size, 0);
break;
default:
FAIL() << "Comparator for " << precision << " precision isn't supported";
}
Expand Down Expand Up @@ -321,7 +313,6 @@ void LayerTestsCommon::LoadNetwork() {
}

void LayerTestsCommon::GenerateInputs() {
inputs.clear();
const auto& inputsInfo = executableNetwork.GetInputsInfo();
const auto& functionParams = function->get_parameters();
for (int i = 0; i < functionParams.size(); ++i) {
Expand Down

0 comments on commit 6b5d513

Please sign in to comment.