Skip to content

Commit

Permalink
[CPU tests] migrate single layer test cases to be API 2.0 - part 1
Browse files Browse the repository at this point in the history
  • Loading branch information
riverlijunjie committed Dec 1, 2023
1 parent 7ff5174 commit a3754d7
Show file tree
Hide file tree
Showing 46 changed files with 748 additions and 782 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,18 @@
#include "activation.hpp"
#include "gtest/gtest.h"
#include "test_utils/cpu_test_utils.hpp"
#include "common_test_utils/node_builders/activation.hpp"

using namespace InferenceEngine;
using namespace CPUTestUtils;
using namespace ngraph::helpers;
using namespace ov::test;

namespace CPULayerTestsDefinitions {
using namespace ov::test::utils;

namespace ov {
namespace test {
std::string ActivationLayerCPUTest::getTestCaseName(const testing::TestParamInfo<ActivationLayerCPUTestParamSet> &obj) {
std::vector<ov::test::InputShape> inputShapes;
std::vector<size_t> activationShapes;
std::pair<ngraph::helpers::ActivationTypes, std::vector<float>> activationTypeAndConstValue;
InferenceEngine::Precision netPrecision, inPrecision, outPrecision;
std::pair<utils::ActivationTypes, std::vector<float>> activationTypeAndConstValue;
ov::element::Type netPrecision, inPrecision, outPrecision;
CPUTestUtils::CPUSpecificParams cpuParams;
std::tie(inputShapes, activationShapes, activationTypeAndConstValue, netPrecision, inPrecision, outPrecision, cpuParams) = obj.param;

Expand All @@ -39,30 +38,30 @@ std::string ActivationLayerCPUTest::getTestCaseName(const testing::TestParamInfo
}
result << "AS=" << ov::test::utils::vec2str(activationShapes) << "_";
result << "ConstantsValue=" << ov::test::utils::vec2str(activationTypeAndConstValue.second) << "_";
result << "netPRC=" << netPrecision.name() << "_";
result << "inPRC=" << inPrecision.name() << "_";
result << "outPRC=" << outPrecision.name() << "_";
result << "netPRC=" << netPrecision.to_string() << "_";
result << "inPRC=" << inPrecision.to_string() << "_";
result << "outPRC=" << outPrecision.to_string() << "_";
result << CPUTestUtils::CPUTestsBase::getTestCaseName(cpuParams);

return result.str();
}

void ActivationLayerCPUTest::generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) {
void ActivationLayerCPUTest::generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) {
int32_t startFrom = 0;
uint32_t range = 0;
int32_t resolution = 0;

if (activationType == ActivationTypes::Exp && netPrecision == Precision::BF16) {
if (activationType == utils::ActivationTypes::Exp && netPrecision == ov::element::bf16) {
startFrom = 0;
range = 2;
resolution = 32768;
} else if (activationType == ActivationTypes::Acosh) {
} else if (activationType == utils::ActivationTypes::Acosh) {
startFrom = 2;
range = 2;
resolution = 128;
} else if (activationType == ActivationTypes::Acos ||
activationType == ActivationTypes::Asin ||
activationType == ActivationTypes::Atanh) {
} else if (activationType == utils::ActivationTypes::Acos ||
activationType == utils::ActivationTypes::Asin ||
activationType == utils::ActivationTypes::Atanh) {
// range [-1. 1] is required
startFrom = -1;
range = 2;
Expand Down Expand Up @@ -92,39 +91,38 @@ void ActivationLayerCPUTest::SetUp() {

std::vector<ov::test::InputShape> inputShapes;
std::vector<size_t> activationShapes;
std::pair<ngraph::helpers::ActivationTypes, std::vector<float>> activationTypeAndConstValue;
InferenceEngine::Precision inPrecision, outPrecision;
std::pair<utils::ActivationTypes, std::vector<float>> activationTypeAndConstValue;
ov::element::Type inPrecision, outPrecision;
CPUTestUtils::CPUSpecificParams cpuParams;
std::tie(inputShapes, activationShapes, activationTypeAndConstValue, netPrecision, inPrecision, outPrecision, cpuParams) = this->GetParam();
std::tie(inFmts, outFmts, priority, selectedType) = cpuParams;
activationType = activationTypeAndConstValue.first;
auto constantsValue = activationTypeAndConstValue.second;

inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrecision);
outType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(outPrecision);
selectedType = getPrimitiveType() + "_" + netPrecision.name();
inType = inPrecision;
outType = outPrecision;
selectedType = getPrimitiveType() + "_" + netPrecision.to_string();

#if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64)
# if defined(OPENVINO_ARCH_ARM)
if (activationType == ngraph::helpers::ActivationTypes::GeluErf) // @todo tmp fallback to ref, gelu erf is disabled for 32bit ARM
selectedType = std::string("ref_") + netPrecision.name();
if (activationType == utils::ActivationTypes::GeluErf) // @todo tmp fallback to ref, gelu erf is disabled for 32bit ARM
selectedType = std::string("ref_") + netPrecision.to_string();
# endif
if (activationType == ngraph::helpers::ActivationTypes::GeluTanh || // @todo not supported by ACL, can be decomposed with ngraph transformation
activationType == ngraph::helpers::ActivationTypes::SoftSign || // @todo not supported by ACL, can be decomposed with ngraph transformation
if (activationType == utils::ActivationTypes::GeluTanh || // @todo not supported by ACL, can be decomposed with transformation
activationType == utils::ActivationTypes::SoftSign || // @todo not supported by ACL, can be decomposed with transformation
inputShapes.front().first.rank().get_length() > 5) // @todo tmp fallback to ref, remove after 6D+ ranks are properly supported
selectedType = std::string("ref_") + netPrecision.name();
selectedType = std::string("ref_") + netPrecision.to_string();
#else
if (activationType == ngraph::helpers::ActivationTypes::Log) // @todo tmp fallback to ref, remove after Log is supported in emitters
selectedType = std::string("ref_") + netPrecision.name();
if (activationType == utils::ActivationTypes::Log) // @todo tmp fallback to ref, remove after Log is supported in emitters
selectedType = std::string("ref_") + netPrecision.to_string();
#endif

init_input_shapes(inputShapes);

auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = std::make_shared<ov::op::v0::Parameter>(ngPrc, inputDynamicShapes.front());
auto activation = ngraph::builder::makeActivation(params, ngPrc, activationType, activationShapes, constantsValue);
auto params = std::make_shared<ov::op::v0::Parameter>(netPrecision, inputDynamicShapes.front());
auto activation = utils::make_activation(params, netPrecision, activationType, activationShapes, constantsValue);
activation->get_rt_info() = getCPUInfo();
function = std::make_shared<ngraph::Function>(ngraph::NodeVector{activation}, ov::ParameterVector{params}, "Activation");
function = std::make_shared<ov::Model>(ov::NodeVector{activation}, ov::ParameterVector{params}, "Activation");
}

TEST_P(ActivationLayerCPUTest, CompareWithRefs) {
Expand All @@ -138,8 +136,8 @@ const std::vector<size_t> activationShapes() {
return {};
}

const std::map<ActivationTypes, std::vector<std::vector<float>>>& activationTypes() {
static const std::map<ActivationTypes, std::vector<std::vector<float>>> activationTypes {
const std::map<utils::ActivationTypes, std::vector<std::vector<float>>>& activationTypes() {
static const std::map<utils::ActivationTypes, std::vector<std::vector<float>>> activationTypes {
{Sqrt, {{}}},
{Sigmoid, {{}}},
{Tanh, {{}}},
Expand All @@ -160,8 +158,8 @@ const std::map<ActivationTypes, std::vector<std::vector<float>>>& activationType
return activationTypes;
}

const std::vector<Precision>& netPrc() {
static const std::vector<Precision> netPrc{Precision::FP32};
const std::vector<ov::element::Type>& netPrc() {
static const std::vector<ov::element::Type> netPrc{ov::element::f32};

return netPrc;
}
Expand Down Expand Up @@ -223,8 +221,8 @@ const std::vector<std::vector<ov::Shape>>& basic5D() {
return basic5D;
}

const std::map<ActivationTypes, std::vector<std::vector<float>>>& activationTypesDynamicMath() {
static const std::map<ActivationTypes, std::vector<std::vector<float>>> activationTypesDynamicMath {
const std::map<utils::ActivationTypes, std::vector<std::vector<float>>>& activationTypesDynamicMath() {
static const std::map<utils::ActivationTypes, std::vector<std::vector<float>>> activationTypesDynamicMath {
{Log, {{}}},
{Sign, {{}}},
{Acos, {{}}},
Expand All @@ -245,9 +243,9 @@ const std::map<ActivationTypes, std::vector<std::vector<float>>>& activationType
return activationTypesDynamicMath;
}

const std::vector<Precision>& netPrecisions() {
static const std::vector<Precision> netPrecisions {
InferenceEngine::Precision::FP32
const std::vector<ov::element::Type>& netPrecisions() {
static const std::vector<ov::element::Type> netPrecisions {
ov::element::f32
};

return netPrecisions;
Expand All @@ -269,5 +267,6 @@ const std::vector<std::vector<InputShape>>& dynamicMathBasic() {
return dynamicMathBasic;
}

} // namespace Activation
} // namespace CPULayerTestsDefinitions
} // namespace Activation
} // namespace test
} // namespace ov
Original file line number Diff line number Diff line change
Expand Up @@ -7,43 +7,44 @@
#include "shared_test_classes/single_layer/activation.hpp"

#include "shared_test_classes/base/ov_subgraph.hpp"
#include <common_test_utils/ov_tensor_utils.hpp>
#include "common_test_utils/ov_tensor_utils.hpp"
#include "test_utils/cpu_test_utils.hpp"
#include "gtest/gtest.h"

namespace CPULayerTestsDefinitions {
namespace ov {
namespace test {

using ActivationLayerCPUTestParamSet =
std::tuple<std::vector<ov::test::InputShape>, // Input shapes
std::vector<size_t>, // Activation shapes
std::pair<ngraph::helpers::ActivationTypes, std::vector<float>>, // Activation type and constant value
InferenceEngine::Precision, // Net precision
InferenceEngine::Precision, // Input precision
InferenceEngine::Precision, // Output precision
std::tuple<std::vector<ov::test::InputShape>, // Input shapes
std::vector<size_t>, // Activation shapes
std::pair<utils::ActivationTypes, std::vector<float>>, // Activation type and constant value
ov::element::Type, // Net precision
ov::element::Type, // Input precision
ov::element::Type, // Output precision
CPUTestUtils::CPUSpecificParams>;

class ActivationLayerCPUTest : public testing::WithParamInterface<ActivationLayerCPUTestParamSet>,
virtual public ov::test::SubgraphBaseTest,
public CPUTestUtils::CPUTestsBase {
public:
static std::string getTestCaseName(const testing::TestParamInfo<ActivationLayerCPUTestParamSet> &obj);
void generate_inputs(const std::vector<ngraph::Shape>& targetInputStaticShapes) override;
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override;

protected:
void SetUp() override;

private:
InferenceEngine::Precision netPrecision = InferenceEngine::Precision::UNSPECIFIED;
ngraph::helpers::ActivationTypes activationType = ngraph::helpers::None;
ov::element::Type netPrecision = ov::element::undefined;
utils::ActivationTypes activationType = utils::ActivationTypes::None;
};

namespace Activation {

const std::vector<size_t> activationShapes();

const std::map<ngraph::helpers::ActivationTypes, std::vector<std::vector<float>>>& activationTypes();
const std::map<utils::ActivationTypes, std::vector<std::vector<float>>>& activationTypes();

const std::vector<InferenceEngine::Precision>& netPrc();
const std::vector<ov::element::Type>& netPrc();

/* ============= Activation (1D) ============= */
const std::vector<CPUTestUtils::CPUSpecificParams>& cpuParams3D();
Expand All @@ -60,13 +61,14 @@ const std::vector<CPUTestUtils::CPUSpecificParams>& cpuParams5D();

const std::vector<std::vector<ov::Shape>>& basic5D();

const std::map<ngraph::helpers::ActivationTypes, std::vector<std::vector<float>>>& activationTypesDynamicMath();
const std::map<utils::ActivationTypes, std::vector<std::vector<float>>>& activationTypesDynamicMath();

const std::vector<InferenceEngine::Precision>& netPrecisions();
const std::vector<ov::element::Type>& netPrecisions();

const std::vector<CPUTestUtils::CPUSpecificParams>& cpuParamsDynamicMath();

const std::vector<std::vector<ov::test::InputShape>>& dynamicMathBasic();

} // namespace Activation
} // namespace CPULayerTestsDefinitions
} // namespace Activation
} // namespace test
} // namespace ov
Loading

0 comments on commit a3754d7

Please sign in to comment.