Skip to content

Commit

Permalink
Revert "[gna] Fixed export/import precision"
Browse files Browse the repository at this point in the history
This reverts commit d381a2e.
  • Loading branch information
mryzhov committed Jul 1, 2021
1 parent 2b26035 commit 7906a79
Show file tree
Hide file tree
Showing 10 changed files with 12 additions and 29 deletions.
5 changes: 3 additions & 2 deletions inference-engine/src/gna_plugin/gna_model_serial.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -831,7 +831,7 @@ std::vector<HeaderLatest::RuntimeEndPoint> GNAModelSerial::serializeOutputs(cons
}
uint32_t elementsCount = static_cast<uint32_t>(InferenceEngine::details::product(outputDims.begin(), outputDims.end()));
InferenceEngine::Layout outputLayout = output.second->getLayout();
uint8_t outputPrecision = output.second->getPrecision().getPrecVal();
InferenceEngine::Precision::ePrecision outputPrecision = InferenceEngine::Precision::FP32;
HeaderLatest::RuntimeEndPoint endPoint(outputsDesc[outputIndex].scale_factor,
outputsDesc[outputIndex].ptrs[0],
outputsDesc[outputIndex].num_bytes_per_element,
Expand Down Expand Up @@ -866,7 +866,7 @@ std::vector<HeaderLatest::RuntimeEndPoint> GNAModelSerial::serializeInputs(const
uint32_t elementsCount = static_cast<uint32_t>(InferenceEngine::details::product(inputDims.begin(), inputDims.end()));
intel_dnn_orientation_t orientation = inputDesc->getOrientation(inputName);
InferenceEngine::Layout inputLayout = input.second->getLayout();
uint8_t inputPrecision = input.second->getPrecision().getPrecVal();
InferenceEngine::Precision::ePrecision inputPrecision = InferenceEngine::Precision::FP32;
HeaderLatest::RuntimeEndPoint endPoint(scaleFactor,
descriptor_ptr[0],
element_size,
Expand All @@ -886,6 +886,7 @@ void GNAModelSerial::ImportInputs(std::istream &is,
std::shared_ptr<GNAPluginNS::InputDesc> inputsDesc,
InferenceEngine::InputsDataMap& dataMap) {
dataMap.clear();

for (uint32_t inputIndex = 0; inputIndex < modelHeader.nInputs; inputIndex++) {
const std::string& name = (modelHeader.version.major == 2 && modelHeader.version.minor >= 3)
? inputNames.at(inputIndex) : std::string("input" + std::to_string(inputIndex));
Expand Down
7 changes: 4 additions & 3 deletions inference-engine/src/gna_plugin/gna_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -714,7 +714,7 @@ void GNAPlugin::LoadNetwork(CNNNetwork & _network) {
convertedNetwork = InferenceEngine::details::convertFunctionToICNNNetwork(graph, clonedNetwork);
}
IE_SUPPRESS_DEPRECATED_START
network = convertedNetwork ? InferenceEngine::CNNNetwork{convertedNetwork} : _network;
InferenceEngine::CNNNetwork network = convertedNetwork ? InferenceEngine::CNNNetwork{convertedNetwork} : _network;
IE_SUPPRESS_DEPRECATED_END

NetPass::ConvertPrecision(network, Precision::I64, Precision::I32);
Expand Down Expand Up @@ -1623,6 +1623,7 @@ void GNAPlugin::Export(std::ostream &outStream) {
THROW_GNA_EXCEPTION << " exporting network with multiple inputs not supported";
}
#endif

// TODO: nnet group parameter looks only used in application - so can we move this line into load network.
IE_ASSERT(!inputsDataMap.empty());
auto inputDims = inputsDataMap.begin()->second->getTensorDesc().getDims();
Expand All @@ -1639,8 +1640,8 @@ void GNAPlugin::Export(std::ostream &outStream) {
auto serial = GNAModelSerial(modelToSerial,
inputsDesc,
outputsDesc,
network.getInputsInfo(),
network.getOutputsInfo())
inputsDataMap,
outputsDataMap)
.SetInputRotation(transpose_inputs_info)
.SetOutputRotation(transpose_outputs_info);

Expand Down
1 change: 0 additions & 1 deletion inference-engine/src/gna_plugin/gna_plugin.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,6 @@ class GNAPlugin : public InferenceEngine::IInferencePlugin {
*/
uint32_t rwSegmentSize = 0;

InferenceEngine::CNNNetwork network;
InferenceEngine::InputsDataMap inputsDataMap;
InferenceEngine::OutputsDataMap outputsDataMap;
std::vector<InferenceEngine::IVariableStateInternal::Ptr> memoryStates;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,6 @@ TEST_P(ImportReshapePermuteConvGNA, CompareWithRefImpl) {
Run();
};

const std::vector<std::vector<size_t>> inputShape = {
{}
};

const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
Expand Down Expand Up @@ -80,7 +76,6 @@ const std::vector<std::string> appHeaders = {

INSTANTIATE_TEST_SUITE_P(smoke_ImportNetworkCase, ImportReshapePermuteConvGNA,
::testing::Combine(
::testing::ValuesIn(inputShape),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA),
::testing::ValuesIn(exportConfigs),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ std::vector<std::string> disabledTestPatterns() {
R"(.*ConvolutionLayerTest.CompareWithRefs.*D=\(3.1\).*)",
R"(.*ConstantResultSubgraphTest.*IS=\(2\.3\.4\.5\).*)",
R"(.*ConstantResultSubgraphTest.*inPrc=(U8|I8|I32|U64|I64|BOOL).*)",
// TODO: Issue 51528
R"(.*CachingSupport.*_(u8|i16)_.*)",
// TODO: Issue 51525
R"(.*CachingSupport.*KSOFunction.*)",
// TODO: Issue 57363 (Param -> Result subgraphs)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
#include <ie_core.hpp>

typedef std::tuple<
std::vector<size_t>, // Input shape
InferenceEngine::Precision, // Network Precision
std::string, // Target Device
std::map<std::string, std::string>, // Export Configuration
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,12 @@
namespace FuncTestUtils {

std::string ImportNetworkTestBase::getTestCaseName(testing::TestParamInfo<exportImportNetworkParams> obj) {
std::vector<size_t> inputShape;
InferenceEngine::Precision netPrecision;
std::string targetDevice;
std::map<std::string, std::string> exportConfiguration;
std::map<std::string, std::string> importConfiguration;
std::string appHeader;
std::tie(inputShape, netPrecision, targetDevice, exportConfiguration, importConfiguration, appHeader) = obj.param;
std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration, appHeader) = obj.param;

std::ostringstream result;
result << "netPRC=" << netPrecision.name() << "_";
Expand All @@ -27,8 +26,6 @@ std::string ImportNetworkTestBase::getTestCaseName(testing::TestParamInfo<export
result << "_importConfigItem=" << configItem.first << "_" << configItem.second;
}
result << "_appHeader=" << appHeader;
result << CommonTestUtils::vec2str(inputShape);

return result.str();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,8 @@
namespace LayerTestsDefinitions {

void ImportNonZero::SetUp() {
std::vector<size_t> inputShape;
InferenceEngine::Precision netPrecision;
std::tie(inputShape, netPrecision, targetDevice, exportConfiguration, importConfiguration, applicationHeader) = this->GetParam();
std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration, applicationHeader) = this->GetParam();
const auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);

const auto parameter = std::make_shared<ngraph::opset5::Parameter>(ngPrc, ngraph::Shape{1000});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,8 @@
namespace LayerTestsDefinitions {

void ImportReshapePermuteConv::SetUp() {
std::vector<size_t> inputShape;
InferenceEngine::Precision netPrecision;
std::tie(inputShape, netPrecision, targetDevice, exportConfiguration, importConfiguration, applicationHeader) = this->GetParam();
std::tie(netPrecision, targetDevice, exportConfiguration, importConfiguration, applicationHeader) = this->GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);

auto params = ngraph::builder::makeParams(ngPrc, { {1, 336} });
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -269,14 +269,6 @@ void LayerTestsCommon::Compare(const InferenceEngine::Blob::Ptr &expected, const
Compare(reinterpret_cast<const std::int32_t *>(expectedBuffer),
reinterpret_cast<const std::int32_t *>(actualBuffer), size, 0);
break;
case InferenceEngine::Precision::I16:
Compare(reinterpret_cast<const std::int16_t *>(expectedBuffer),
reinterpret_cast<const std::int16_t *>(actualBuffer), size, 0);
break;
case InferenceEngine::Precision::U8:
Compare(reinterpret_cast<const std::uint8_t *>(expectedBuffer),
reinterpret_cast<const std::uint8_t *>(actualBuffer), size, 0);
break;
default:
FAIL() << "Comparator for " << precision << " precision isn't supported";
}
Expand Down Expand Up @@ -321,7 +313,6 @@ void LayerTestsCommon::LoadNetwork() {
}

void LayerTestsCommon::GenerateInputs() {
inputs.clear();
const auto& inputsInfo = executableNetwork.GetInputsInfo();
const auto& functionParams = function->get_parameters();
for (int i = 0; i < functionParams.size(); ++i) {
Expand Down

0 comments on commit 7906a79

Please sign in to comment.