Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Single layer tests for Assign/ReadValue ops #5735

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <vector>

#include "single_layer_tests/memory.h"

using namespace LayerTestsDefinitions;

namespace {

const std::vector<InferenceEngine::SizeVector> inShapes = {
{3},
{100, 100},
};

const std::vector<InferenceEngine::Precision> inputPrecisions = {
InferenceEngine::Precision::FP32,
};

const std::vector<int64_t> iterationCount {
1,
3,
10
};

INSTANTIATE_TEST_CASE_P(smoke_MemoryTest, MemoryTest,
::testing::Combine(
::testing::ValuesIn(iterationCount),
::testing::ValuesIn(inShapes),
::testing::ValuesIn(inputPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
MemoryTest::getTestCaseName);

} // namespace

Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <vector>

#include "single_layer_tests/memory.h"

using namespace LayerTestsDefinitions;

namespace {

const std::vector<InferenceEngine::SizeVector> inShapes = {
{1, 1},
{1, 2}
};

const std::vector<InferenceEngine::Precision> inputPrecisions = {
InferenceEngine::Precision::FP32
};

const std::vector<int64_t> iterationCount {
1,
3,
10
};

INSTANTIATE_TEST_CASE_P(smoke_MemoryTest, MemoryTest,
::testing::Combine(
::testing::ValuesIn(iterationCount),
::testing::ValuesIn(inShapes),
::testing::ValuesIn(inputPrecisions),
::testing::Values(CommonTestUtils::DEVICE_GNA)),
MemoryTest::getTestCaseName);

} // namespace
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include "shared_test_classes/single_layer/memory.hpp"

namespace LayerTestsDefinitions {

TEST_P(MemoryTest, CompareWithRefs) {
Run();
};

} // namespace LayerTestsDefinitions
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include <memory>
#include <string>
#include <tuple>
#include <vector>

#include "shared_test_classes/base/layer_test_utils.hpp"

namespace LayerTestsDefinitions {

using MemoryTestParams = std::tuple<
int64_t, // iterationCount
InferenceEngine::SizeVector, // inputShape
InferenceEngine::Precision, // netPrecision
std::string // targetDevice
>;

class MemoryTest : public testing::WithParamInterface<MemoryTestParams>, virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(const testing::TestParamInfo<MemoryTestParams> &obj);
void Run() override;
protected:
std::vector<std::pair<ngraph::element::Type, std::vector<std::uint8_t>>> CalculateRefs() override;
void SetUp() override;
private:
InferenceEngine::Precision netPrecision;
ngraph::EvaluationContext eval_context;
int64_t iteration_count;
};

} // namespace LayerTestsDefinitions
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <signal.h>
#include "ngraph/opsets/opset7.hpp"
#include "ngraph_functions/builders.hpp"
#include "shared_test_classes/single_layer/memory.hpp"

namespace LayerTestsDefinitions {

std::string MemoryTest::getTestCaseName(const testing::TestParamInfo<MemoryTestParams> &obj) {
int64_t iteration_count;
InferenceEngine::Precision netPrecision;
InferenceEngine::SizeVector inputShape;
std::string targetDevice;
std::tie(iteration_count, inputShape, netPrecision, targetDevice) = obj.param;

std::ostringstream result;
result << "iteration_count=" << iteration_count << "_";
result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_";
result << "netPRC=" << netPrecision.name() << "_";
result << "trgDev=" << targetDevice;
result << ")";
return result.str();
}

void MemoryTest::SetUp() {
using namespace ngraph;
InferenceEngine::SizeVector inputShape;
std::tie(iteration_count, inputShape, netPrecision, targetDevice) = this->GetParam();
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);

auto param = ngraph::builder::makeParams(ngPrc, {inputShape});
auto variable = std::make_shared<Variable>(VariableInfo{PartialShape::dynamic(), element::dynamic, "v0"});
auto read_value = std::make_shared<opset7::ReadValue>(param.at(0), variable);
auto add = std::make_shared<opset7::Add>(read_value, param.at(0));
auto assign = std::make_shared<opset7::Assign>(add, variable);
auto res = std::make_shared<opset7::Result>(add);
function = std::make_shared<Function>(ResultVector{res}, SinkVector{assign}, param, "TestMemory");

auto hostTensor = std::make_shared<ngraph::HostTensor>(ngPrc, inputShape);
auto variable_context = std::make_shared<VariantWrapper<VariableContext>>(VariableContext());
auto variable_value = std::make_shared<VariableValue>(hostTensor);
variable_context->get().set_variable_value(function->get_variable_by_id("v0"), variable_value);
eval_context["VariableContext"] = variable_context;
}


void MemoryTest::Run() {
using namespace LayerTestsUtils;
auto crashHandler = [](int errCode) {
auto &s = Summary::getInstance();
s.saveReport();
std::cout << "Unexpected application crash!" << std::endl;
std::abort();
};
signal(SIGSEGV, crashHandler);

auto &s = LayerTestsUtils::Summary::getInstance();
s.setDeviceName(targetDevice);

if (FuncTestUtils::SkipTestsConfig::currentTestIsDisabled()) {
s.updateOPsStats(function, PassRate::Statuses::SKIPPED);
GTEST_SKIP() << "Disabled test due to configuration" << std::endl;
} else {
s.updateOPsStats(function, PassRate::Statuses::CRASHED);
}

try {
LoadNetwork();
GenerateInputs();
for (int64_t i = 0; i < iteration_count; ++i) {
Infer();
Validate();
}
s.updateOPsStats(function, PassRate::Statuses::PASSED);
}
catch (const std::runtime_error &re) {
s.updateOPsStats(function, PassRate::Statuses::FAILED);
GTEST_FATAL_FAILURE_(re.what());
} catch (const std::exception &ex) {
s.updateOPsStats(function, PassRate::Statuses::FAILED);
GTEST_FATAL_FAILURE_(ex.what());
} catch (...) {
s.updateOPsStats(function, PassRate::Statuses::FAILED);
GTEST_FATAL_FAILURE_("Unknown failure occurred.");
}
}

std::vector<std::pair<ngraph::element::Type, std::vector<std::uint8_t>>> MemoryTest::CalculateRefs() {
using namespace ngraph;
function->validate_nodes_and_infer_types();

auto referenceInputs = std::vector<std::vector<uint8_t>>(inputs.size());
auto refInputsTypes = std::vector<ngraph::element::Type>(inputs.size());
HostTensorVector inputTensors;
for (auto & input : inputs) {
const auto &dataSize = input->byteSize();
const auto &tensorDesc = input->getTensorDesc();

auto memory = InferenceEngine::as<InferenceEngine::MemoryBlob>(input);
IE_ASSERT(memory);
const auto lockedMemory = memory->wmap();
const auto buffer = lockedMemory.as<const std::uint8_t *>();

auto hostTensor = std::make_shared<ngraph::HostTensor>(FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(tensorDesc.getPrecision()),
tensorDesc.getDims());
hostTensor->write(buffer, dataSize);
inputTensors.push_back(hostTensor);
}

const auto &outInfo = executableNetwork.GetOutputsInfo();
HostTensorVector outputTensors(outInfo.size(), std::make_shared<ngraph::HostTensor>());
function->evaluate(outputTensors, inputTensors, eval_context);

std::vector<std::pair<ngraph::element::Type, std::vector<std::uint8_t>>> outputs(outInfo.size());
for (size_t idx = 0; idx < outInfo.size(); ++idx) {
outputs[idx].first = outputTensors[idx]->get_element_type();
outputs[idx].second.resize(outputTensors[idx]->get_size_in_bytes());
outputTensors[idx]->read(outputs[idx].second.data(), outputTensors[idx]->get_size_in_bytes());
}
return outputs;
}

} // namespace LayerTestsDefinitions

4 changes: 4 additions & 0 deletions ngraph/core/src/function.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -528,6 +528,10 @@ bool Function::evaluate(const HostTensorVector& output_tensors,
output_tensor_map[result] = output_tensors.at(i);
outputs.push_back(result);
}
for (const auto& m_sink : m_sinks)
{
outputs.push_back(m_sink);
}
evaluate_nodes(value_map, output_tensor_map, outputs, evaluation_context);
return true;
}
Expand Down