Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into plugin-api-headers
Browse files Browse the repository at this point in the history
  • Loading branch information
ilya-lavrenov committed Aug 6, 2020
2 parents e6f7f2d + 21c4312 commit ce579fb
Show file tree
Hide file tree
Showing 14 changed files with 107 additions and 64 deletions.
3 changes: 2 additions & 1 deletion inference-engine/src/mkldnn_plugin/nodes/reduce.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,8 @@ class ReduceImpl: public ExtLayerBase {
int32_t *idx_data = inputs[REDUCE_INDEXES]->cbuffer().as<int32_t *>() +
inputs[REDUCE_INDEXES]->getTensorDesc().getBlockingDesc().getOffsetPadding();
SizeVector axes;
for (size_t i = 0; i < idx_dims[0]; i++) {
const size_t axesIter = idx_dims.empty() ? 1 : idx_dims[0];
for (size_t i = 0; i < axesIter; i++) {
int32_t axis = idx_data[i];
if (axis < 0)
axis += data_dims.size();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
#include "common_test_utils/test_constants.hpp"

using namespace LayerTestsDefinitions;
using namespace LayerTestsDefinitions::EltwiseParams;

namespace {
std::vector<std::vector<std::vector<size_t>>> inShapes = {
Expand All @@ -34,9 +33,9 @@ std::vector<ngraph::helpers::InputLayerType> secondaryInputTypes = {
ngraph::helpers::InputLayerType::PARAMETER,
};

std::vector<OpType> opTypes = {
OpType::SCALAR,
OpType::VECTOR,
std::vector<CommonTestUtils::OpType> opTypes = {
CommonTestUtils::OpType::SCALAR,
CommonTestUtils::OpType::VECTOR,
};

std::vector<ngraph::helpers::EltwiseTypes> eltwiseOpTypes = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,20 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
};

const std::vector<std::vector<size_t>> inputShapes = {
std::vector<size_t>{10, 20, 30, 40},
std::vector<size_t>{10, 20, 40},
std::vector<size_t>{5, 6, 10, 11},
};

const std::vector<std::vector<int>> axes = {
{0},
{0, 3},
{1, -1},
{0, 2},
{1, -1}
};

std::vector<CommonTestUtils::OpType> opTypes = {
CommonTestUtils::OpType::SCALAR,
CommonTestUtils::OpType::VECTOR,
};

const std::vector<ngraph::helpers::ReductionType> reductionTypes = {
ngraph::helpers::ReductionType::Mean,
ngraph::helpers::ReductionType::Min,
Expand All @@ -34,20 +40,38 @@ const std::vector<ngraph::helpers::ReductionType> reductionTypes = {
ngraph::helpers::ReductionType::LogicalAnd,
};

const auto paramsOneAxis = testing::Combine(
testing::Values(std::vector<int>{0}),
testing::ValuesIn(opTypes),
testing::Values(true, false),
testing::ValuesIn(reductionTypes),
testing::ValuesIn(netPrecisions),
testing::ValuesIn(inputShapes),
testing::Values(CommonTestUtils::DEVICE_CPU)
);

INSTANTIATE_TEST_CASE_P(
ReduceOneAxis,
ReduceOpsLayerTest,
paramsOneAxis,
ReduceOpsLayerTest::getTestCaseName
);

const auto params = testing::Combine(
testing::ValuesIn(axes),
testing::Values(opTypes[1]),
testing::Values(true, false),
testing::ValuesIn(reductionTypes),
testing::ValuesIn(netPrecisions),
testing::ValuesIn(inputShapes),
testing::Values(CommonTestUtils::DEVICE_CPU)
);


INSTANTIATE_TEST_CASE_P(
Reduce,
ReduceOpsLayerTest,
params,
ReduceOpsLayerTest::getTestCaseName
);

} // namespace
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
#include "common_test_utils/test_constants.hpp"

using namespace LayerTestsDefinitions;
using namespace LayerTestsDefinitions::EltwiseParams;

namespace {
std::vector<std::vector<std::vector<size_t>>> inShapes = {
Expand All @@ -31,9 +30,9 @@ std::vector<ngraph::helpers::InputLayerType> secondaryInputTypes = {
ngraph::helpers::InputLayerType::PARAMETER,
};

std::vector<OpType> opTypes = {
OpType::SCALAR,
OpType::VECTOR,
std::vector<CommonTestUtils::OpType> opTypes = {
CommonTestUtils::OpType::SCALAR,
CommonTestUtils::OpType::VECTOR,
};

std::vector<ngraph::helpers::EltwiseTypes> eltwiseOpTypes = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
#include "common_test_utils/test_constants.hpp"

using namespace LayerTestsDefinitions;
using namespace LayerTestsDefinitions::EltwiseParams;

namespace {
std::vector<std::vector<std::vector<size_t>>> inShapes = {
Expand Down Expand Up @@ -35,9 +34,9 @@ std::vector<ngraph::helpers::InputLayerType> secondaryInputTypes = {
ngraph::helpers::InputLayerType::PARAMETER,
};

std::vector<OpType> opTypes = {
OpType::SCALAR,
OpType::VECTOR,
std::vector<CommonTestUtils::OpType> opTypes = {
CommonTestUtils::OpType::SCALAR,
CommonTestUtils::OpType::VECTOR,
};

std::vector<ngraph::helpers::EltwiseTypes> eltwiseOpTypes = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
#include <vector>

using namespace LayerTestsDefinitions;
using namespace LayerTestsDefinitions::EltwiseParams;

namespace {

Expand All @@ -36,9 +35,9 @@ std::vector<ngraph::helpers::InputLayerType> secondaryInputTypes = {
ngraph::helpers::InputLayerType::PARAMETER,
};

std::vector<OpType> opTypes = {
OpType::SCALAR,
OpType::VECTOR,
std::vector<CommonTestUtils::OpType> opTypes = {
CommonTestUtils::OpType::SCALAR,
CommonTestUtils::OpType::VECTOR,
};

std::vector<ngraph::helpers::EltwiseTypes> eltwiseOpTypes = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,21 +12,16 @@
#include "common_test_utils/common_utils.hpp"
#include "common_test_utils/test_common.hpp"
#include "common_test_utils/test_constants.hpp"
#include "common_test_utils/common_layers_params.hpp"
#include "ie_core.hpp"

namespace LayerTestsDefinitions {
namespace EltwiseParams {
enum class OpType {
SCALAR,
VECTOR
};
} // namespace EltwiseParams

typedef std::tuple<
std::vector<std::vector<size_t>>, // input shapes
ngraph::helpers::EltwiseTypes, // eltwise op type
ngraph::helpers::InputLayerType, // secondary input type
EltwiseParams::OpType, // op type
CommonTestUtils::OpType, // op type
InferenceEngine::Precision, // Net precision
std::string, // Device name
std::map<std::string, std::string> // Additional network configuration
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,13 @@

#include "functional_test_utils/layer_test_utils.hpp"
#include "ngraph_functions/builders.hpp"
#include "common_test_utils/common_layers_params.hpp"

namespace LayerTestsDefinitions {

typedef std::tuple<
std::vector<int>, // Axis to reduce order
CommonTestUtils::OpType, // Scalar or vector type axis
bool, // Keep dims
ngraph::helpers::ReductionType, // Reduce operation type
InferenceEngine::Precision, // Net precision
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,25 +14,12 @@
#include "single_layer_tests/eltwise.hpp"

namespace LayerTestsDefinitions {
std::ostream& operator<<(std::ostream & os, EltwiseParams::OpType type) {
switch (type) {
case EltwiseParams::OpType::SCALAR:
os << "SCALAR";
break;
case EltwiseParams::OpType::VECTOR:
os << "VECTOR";
break;
default:
THROW_IE_EXCEPTION << "NOT_SUPPORTED_OP_TYPE";
}
return os;
}

std::string EltwiseLayerTest::getTestCaseName(testing::TestParamInfo<EltwiseTestParams> obj) {
std::vector<std::vector<size_t>> inputShapes;
InferenceEngine::Precision netPrecision;
ngraph::helpers::InputLayerType secondaryInputType;
EltwiseParams::OpType opType;
CommonTestUtils::OpType opType;
ngraph::helpers::EltwiseTypes eltwiseOpType;
std::string targetName;
std::map<std::string, std::string> additional_config;
Expand All @@ -52,7 +39,7 @@ void EltwiseLayerTest::SetUp() {
std::vector<std::vector<size_t>> inputShapes;
InferenceEngine::Precision netPrecision;
ngraph::helpers::InputLayerType secondaryInputType;
EltwiseParams::OpType opType;
CommonTestUtils::OpType opType;
ngraph::helpers::EltwiseTypes eltwiseType;
std::map<std::string, std::string> additional_config;
std::tie(inputShapes, eltwiseType, secondaryInputType, opType, netPrecision, targetDevice, additional_config) = this->GetParam();
Expand All @@ -73,11 +60,11 @@ void EltwiseLayerTest::SetUp() {

std::vector<size_t> shape_input_secondary;
switch (opType) {
case EltwiseParams::OpType::SCALAR: {
case CommonTestUtils::OpType::SCALAR: {
shape_input_secondary = std::vector<size_t>({1});
break;
}
case EltwiseParams::OpType::VECTOR:
case CommonTestUtils::OpType::VECTOR:
shape_input_secondary = inputShape2;
break;
default:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,13 @@ std::string ReduceOpsLayerTest::getTestCaseName(testing::TestParamInfo<reduceMea
ngraph::helpers::ReductionType reductionType;
std::vector<size_t> inputShape;
std::vector<int> axes;
CommonTestUtils::OpType opType;
std::string targetDevice;
std::tie(axes, keepDims, reductionType, netPrecision, inputShape, targetDevice) = obj.param;
std::tie(axes, opType, keepDims, reductionType, netPrecision, inputShape, targetDevice) = obj.param;
std::ostringstream result;
result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_";
result << "axes=" << CommonTestUtils::vec2str(axes) << "_";
result << "opType=" << opType << "_";
result << "type=" << reductionType << "_";
if (keepDims) result << "KeepDims_";
result << "netPRC=" << netPrecision.name() << "_";
Expand All @@ -46,13 +48,32 @@ void ReduceOpsLayerTest::SetUp() {
ngraph::helpers::ReductionType reductionType;
std::vector<size_t> inputShape;
std::vector<int> axes;
std::tie(axes, keepDims, reductionType, netPrecision, inputShape, targetDevice) = GetParam();
CommonTestUtils::OpType opType;
std::tie(axes, opType, keepDims, reductionType, netPrecision, inputShape, targetDevice) = GetParam();

auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
auto paramOuts = ngraph::helpers::convert2OutputVector(
ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
const auto reduce = ngraph::builder::makeReduce(paramOuts, axes, keepDims, reductionType);

std::vector<size_t> shapeAxes;
switch (opType) {
case CommonTestUtils::OpType::SCALAR: {
if (axes.size() > 1)
FAIL() << "In reduce op if op type is scalar, 'axis' input's must contain 1 element";
break;
}
case CommonTestUtils::OpType::VECTOR: {
shapeAxes.push_back(axes.size());
break;
}
default:
FAIL() << "Reduce op doesn't support operation type: " << opType;
}
auto reductionAxesNode = std::dynamic_pointer_cast<ngraph::Node>(
std::make_shared<ngraph::opset3::Constant>(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes));

const auto reduce = ngraph::builder::makeReduce(paramOuts[0], reductionAxesNode, keepDims, reductionType);
const ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(reduce)};
function = std::make_shared<ngraph::Function>(results, params, "Reduce");
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -209,4 +209,18 @@ void get_common_dims(const InferenceEngine::Blob &blob,
}
}

std::ostream& operator<<(std::ostream & os, OpType type) {
switch (type) {
case OpType::SCALAR:
os << "SCALAR";
break;
case OpType::VECTOR:
os << "VECTOR";
break;
default:
THROW_IE_EXCEPTION << "NOT_SUPPORTED_OP_TYPE";
}
return os;
}

} // namespace CommonTestUtils
Original file line number Diff line number Diff line change
Expand Up @@ -96,4 +96,10 @@ void get_common_dims(const InferenceEngine::Blob &blob,
int32_t &dimz,
int32_t &dimn);

enum class OpType {
SCALAR,
VECTOR
};
std::ostream& operator<<(std::ostream & os, OpType type);

} // namespace CommonTestUtils
Original file line number Diff line number Diff line change
Expand Up @@ -271,8 +271,8 @@ std::shared_ptr<Node> makeMatMul(const Output<Node> &A,
bool transpose_a = false,
bool transpose_b = false);

std::shared_ptr<ngraph::Node> makeReduce(std::vector<ngraph::Output<Node>> &in,
const std::vector<int> &reductionAxes,
std::shared_ptr<ngraph::Node> makeReduce(const ngraph::Output<Node>& data,
const ngraph::Output<Node>& axes,
bool keepDims,
ngraph::helpers::ReductionType reductionType);

Expand Down
23 changes: 10 additions & 13 deletions inference-engine/tests/ngraph_functions/src/reduce.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,30 +9,27 @@

namespace ngraph {
namespace builder {
std::shared_ptr<ngraph::Node> makeReduce(std::vector<ngraph::Output<Node>> &in,
const std::vector<int> &reductionAxes,
std::shared_ptr<ngraph::Node> makeReduce(const ngraph::Output<Node>& data,
const ngraph::Output<Node>& axes,
bool keepDims,
ngraph::helpers::ReductionType reductionType) {
auto reductionAxesNode = std::make_shared<ngraph::opset3::Constant>(ngraph::element::Type_t::i64,
ngraph::Shape({reductionAxes.size()}),
reductionAxes);
switch (reductionType) {
case helpers::Mean:
return std::make_shared<ngraph::opset3::ReduceMean>(in.at(0), reductionAxesNode, keepDims);
return std::make_shared<ngraph::opset3::ReduceMean>(data, axes, keepDims);
case helpers::Max:
return std::make_shared<ngraph::opset3::ReduceMax>(in.at(0), reductionAxesNode, keepDims);
return std::make_shared<ngraph::opset3::ReduceMax>(data, axes, keepDims);
case helpers::Min:
return std::make_shared<ngraph::opset3::ReduceMin>(in.at(0), reductionAxesNode, keepDims);
return std::make_shared<ngraph::opset3::ReduceMin>(data, axes, keepDims);
case helpers::Prod:
return std::make_shared<ngraph::opset3::ReduceProd>(in.at(0), reductionAxesNode, keepDims);
return std::make_shared<ngraph::opset3::ReduceProd>(data, axes, keepDims);
case helpers::Sum:
return std::make_shared<ngraph::opset3::ReduceSum>(in.at(0), reductionAxesNode, keepDims);
return std::make_shared<ngraph::opset3::ReduceSum>(data, axes, keepDims);
case helpers::LogicalOr:
return std::make_shared<ngraph::opset3::LogicalOr>(in.at(0), reductionAxesNode);
return std::make_shared<ngraph::opset3::LogicalOr>(data, axes);
case helpers::LogicalAnd:
return std::make_shared<ngraph::opset3::LogicalAnd>(in.at(0), reductionAxesNode);
return std::make_shared<ngraph::opset3::LogicalAnd>(data, axes);
case helpers::LogicalXor:
return std::make_shared<ngraph::opset3::LogicalXor>(in.at(0), reductionAxesNode);
return std::make_shared<ngraph::opset3::LogicalXor>(data, axes);
default:
throw std::runtime_error("Can't create layer for this reduction type");
}
Expand Down

0 comments on commit ce579fb

Please sign in to comment.