diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/memory.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/memory.cpp new file mode 100644 index 00000000000000..750a0e4af5e430 --- /dev/null +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/memory.cpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "single_layer_tests/memory.h" + +using namespace LayerTestsDefinitions; + +namespace { + +const std::vector inShapes = { + {3}, + {100, 100}, +}; + +const std::vector inputPrecisions = { + InferenceEngine::Precision::FP32, +}; + +const std::vector iterationCount { + 1, + 3, + 10 +}; + +INSTANTIATE_TEST_CASE_P(smoke_MemoryTest, MemoryTest, + ::testing::Combine( + ::testing::ValuesIn(iterationCount), + ::testing::ValuesIn(inShapes), + ::testing::ValuesIn(inputPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + MemoryTest::getTestCaseName); + +} // namespace + diff --git a/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/memory.cpp b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/memory.cpp new file mode 100644 index 00000000000000..2ab1357f6748e4 --- /dev/null +++ b/inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/memory.cpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "single_layer_tests/memory.h" + +using namespace LayerTestsDefinitions; + +namespace { + +const std::vector inShapes = { + {1, 1}, + {1, 2} +}; + +const std::vector inputPrecisions = { + InferenceEngine::Precision::FP32 +}; + +const std::vector iterationCount { + 1, + 3, + 10 +}; + +INSTANTIATE_TEST_CASE_P(smoke_MemoryTest, MemoryTest, + ::testing::Combine( + ::testing::ValuesIn(iterationCount), + ::testing::ValuesIn(inShapes), + ::testing::ValuesIn(inputPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GNA)), + MemoryTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/memory.h b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/memory.h new file mode 100644 index 00000000000000..e284ef86cb59ef --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/memory.h @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_layer/memory.hpp" + +namespace LayerTestsDefinitions { + +TEST_P(MemoryTest, CompareWithRefs) { + Run(); +}; + +} // namespace LayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/memory.hpp b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/memory.hpp new file mode 100644 index 00000000000000..ca16e30148e68f --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/memory.hpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "shared_test_classes/base/layer_test_utils.hpp" + +namespace LayerTestsDefinitions { + +using MemoryTestParams = std::tuple< + int64_t, // iterationCount + InferenceEngine::SizeVector, // inputShape + InferenceEngine::Precision, // netPrecision + std::string // targetDevice +>; + +class MemoryTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { +public: + static std::string getTestCaseName(const testing::TestParamInfo &obj); + void Run() override; +protected: + std::vector>> CalculateRefs() override; + void SetUp() override; +private: + InferenceEngine::Precision netPrecision; + ngraph::EvaluationContext eval_context; + int64_t iteration_count; +}; + +} // namespace LayerTestsDefinitions diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/memory.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/memory.cpp new file mode 100644 index 00000000000000..0984a4dbeab07f --- /dev/null +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/memory.cpp @@ -0,0 +1,127 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "ngraph/opsets/opset7.hpp" +#include "ngraph_functions/builders.hpp" +#include "shared_test_classes/single_layer/memory.hpp" + +namespace LayerTestsDefinitions { + + std::string MemoryTest::getTestCaseName(const testing::TestParamInfo &obj) { + int64_t iteration_count; + InferenceEngine::Precision netPrecision; + InferenceEngine::SizeVector inputShape; + std::string targetDevice; + std::tie(iteration_count, inputShape, netPrecision, targetDevice) = obj.param; + + std::ostringstream result; + result << "iteration_count=" << iteration_count << "_"; + result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_"; + result << "netPRC=" << netPrecision.name() << "_"; + result << "trgDev=" << targetDevice; + result << ")"; + return result.str(); + } + + void MemoryTest::SetUp() { + using namespace ngraph; + InferenceEngine::SizeVector inputShape; + std::tie(iteration_count, inputShape, netPrecision, targetDevice) = this->GetParam(); + auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + + auto param = ngraph::builder::makeParams(ngPrc, {inputShape}); + auto variable = std::make_shared(VariableInfo{PartialShape::dynamic(), element::dynamic, "v0"}); + auto read_value = std::make_shared(param.at(0), variable); + auto add = std::make_shared(read_value, param.at(0)); + auto assign = std::make_shared(add, variable); + auto res = std::make_shared(add); + function = std::make_shared(ResultVector{res}, SinkVector{assign}, param, "TestMemory"); + + auto hostTensor = std::make_shared(ngPrc, inputShape); + auto variable_context = std::make_shared>(VariableContext()); + auto variable_value = std::make_shared(hostTensor); + variable_context->get().set_variable_value(function->get_variable_by_id("v0"), variable_value); + eval_context["VariableContext"] = variable_context; + } + + + void MemoryTest::Run() { + using namespace LayerTestsUtils; + auto crashHandler = [](int errCode) { + auto &s = Summary::getInstance(); + s.saveReport(); + std::cout << "Unexpected application crash!" << std::endl; + std::abort(); + }; + signal(SIGSEGV, crashHandler); + + auto &s = LayerTestsUtils::Summary::getInstance(); + s.setDeviceName(targetDevice); + + if (FuncTestUtils::SkipTestsConfig::currentTestIsDisabled()) { + s.updateOPsStats(function, PassRate::Statuses::SKIPPED); + GTEST_SKIP() << "Disabled test due to configuration" << std::endl; + } else { + s.updateOPsStats(function, PassRate::Statuses::CRASHED); + } + + try { + LoadNetwork(); + GenerateInputs(); + for (int64_t i = 0; i < iteration_count; ++i) { + Infer(); + Validate(); + } + s.updateOPsStats(function, PassRate::Statuses::PASSED); + } + catch (const std::runtime_error &re) { + s.updateOPsStats(function, PassRate::Statuses::FAILED); + GTEST_FATAL_FAILURE_(re.what()); + } catch (const std::exception &ex) { + s.updateOPsStats(function, PassRate::Statuses::FAILED); + GTEST_FATAL_FAILURE_(ex.what()); + } catch (...) { + s.updateOPsStats(function, PassRate::Statuses::FAILED); + GTEST_FATAL_FAILURE_("Unknown failure occurred."); + } + } + + std::vector>> MemoryTest::CalculateRefs() { + using namespace ngraph; + function->validate_nodes_and_infer_types(); + + auto referenceInputs = std::vector>(inputs.size()); + auto refInputsTypes = std::vector(inputs.size()); + HostTensorVector inputTensors; + for (auto & input : inputs) { + const auto &dataSize = input->byteSize(); + const auto &tensorDesc = input->getTensorDesc(); + + auto memory = InferenceEngine::as(input); + IE_ASSERT(memory); + const auto lockedMemory = memory->wmap(); + const auto buffer = lockedMemory.as(); + + auto hostTensor = std::make_shared(FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(tensorDesc.getPrecision()), + tensorDesc.getDims()); + hostTensor->write(buffer, dataSize); + inputTensors.push_back(hostTensor); + } + + const auto &outInfo = executableNetwork.GetOutputsInfo(); + HostTensorVector outputTensors(outInfo.size(), std::make_shared()); + function->evaluate(outputTensors, inputTensors, eval_context); + + std::vector>> outputs(outInfo.size()); + for (size_t idx = 0; idx < outInfo.size(); ++idx) { + outputs[idx].first = outputTensors[idx]->get_element_type(); + outputs[idx].second.resize(outputTensors[idx]->get_size_in_bytes()); + outputTensors[idx]->read(outputs[idx].second.data(), outputTensors[idx]->get_size_in_bytes()); + } + return outputs; + } + +} // namespace LayerTestsDefinitions + diff --git a/ngraph/core/src/function.cpp b/ngraph/core/src/function.cpp index 7576a21c7dd9f7..439b000f3f88d7 100644 --- a/ngraph/core/src/function.cpp +++ b/ngraph/core/src/function.cpp @@ -528,6 +528,10 @@ bool Function::evaluate(const HostTensorVector& output_tensors, output_tensor_map[result] = output_tensors.at(i); outputs.push_back(result); } + for (const auto& m_sink : m_sinks) + { + outputs.push_back(m_sink); + } evaluate_nodes(value_map, output_tensor_map, outputs, evaluation_context); return true; }