Skip to content

Commit

Permalink
[CPU] Add MaxPool-14 and AvgPool-14 (openvinotoolkit#23582)
Browse files Browse the repository at this point in the history
### Details:
 - Added `AvgPool-14` and `MaxPool-14` to CPU plugin
 - Added shape inference tests
 - Added layer tests for `MaxPool-14` and `AvgPool-14`
 - Refactored shape inference tests to avoid code duplication

### Tickets:
 - CVS-136259

### Related PRs
 - openvinotoolkit#22930
 - openvinotoolkit#22796
 - openvinotoolkit#23381
 - openvinotoolkit#22966

---------

Co-authored-by: Michal Lukaszewski <michal.lukaszewski@intel.com>
  • Loading branch information
2 people authored and spran180 committed Jul 27, 2024
1 parent 92f1868 commit df4e971
Show file tree
Hide file tree
Showing 18 changed files with 870 additions and 138 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -441,6 +441,7 @@ bool ov::pass::ConvertPrecision::run_on_model(const std::shared_ptr<ov::Model>&
{ov::op::v3::TopK::get_type_info_static(), fuse_type_to_topk},
{ov::op::v11::TopK::get_type_info_static(), fuse_type_to_topk},
{ov::op::v8::MaxPool::get_type_info_static(), fuse_type_to_maxpool},
{ov::op::v14::MaxPool::get_type_info_static(), fuse_type_to_maxpool},
{ov::op::v3::NonZero::get_type_info_static(), fuse_type_to_nonzero},
{ov::op::v3::Bucketize::get_type_info_static(), fuse_type_to_bucketize},
{ov::op::v1::Equal::get_type_info_static(), fuse_type_to_binary_comparision<ov::op::v1::Equal>},
Expand Down Expand Up @@ -924,9 +925,15 @@ bool fuse_type_to_topk(const std::shared_ptr<ov::Node>& node, const precisions_m
}

bool fuse_type_to_maxpool(const std::shared_ptr<ov::Node>& node, const precisions_map& precisions) {
if (auto maxpool = ov::as_type_ptr<ov::op::v8::MaxPool>(node)) {
auto maxpool_v8 = ov::as_type_ptr<ov::op::v8::MaxPool>(node);
auto maxpool_v14 = ov::as_type_ptr<ov::op::v14::MaxPool>(node);
if (maxpool_v14) {
return update_type(1, node, precisions, [&](const element::Type& to) {
maxpool->set_index_element_type(to);
maxpool_v14->set_index_element_type(to);
});
} else if (maxpool_v8) {
return update_type(1, node, precisions, [&](const element::Type& to) {
maxpool_v8->set_index_element_type(to);
});
}
return false;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,11 @@ ov::pass::ConvertAvgPool14ToAvgPool1::ConvertAvgPool14ToAvgPool1() {

const auto avg_pool_v14_pattern = pattern::wrap_type<ov::op::v14::AvgPool>();

const matcher_pass_callback callback = [](pattern::Matcher& m) {
const matcher_pass_callback callback = [OV_CAPTURE_CPY_AND_THIS](pattern::Matcher& m) {
const auto avg_pool_v14 = std::dynamic_pointer_cast<ov::op::v14::AvgPool>(m.get_match_root());
if (!avg_pool_v14 || transformation_callback(avg_pool_v14)) {
return false;
}
const auto rounding_type_v14 = avg_pool_v14->get_rounding_type();
const auto rounding_type_v1 =
rounding_type_v14 == ov::op::RoundingType::CEIL_TORCH ? ov::op::RoundingType::CEIL : rounding_type_v14;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ ov::pass::ConvertMaxPool14ToMaxPool8::ConvertMaxPool14ToMaxPool8() {
const auto selected_pads = node_registry.make<Select>(in_gt_out, padding_end_node, zero);

// apply padding on input clear pads attribute
const auto pb = node_registry.make<Concat>(OutputVector{pads_remaining->output(0), padding_end_node}, 0);
const auto pb = node_registry.make<Concat>(OutputVector{pads_remaining->output(0), padding_begin_node}, 0);
const auto pe = node_registry.make<Concat>(OutputVector{pads_remaining, selected_pads}, 0);
auto minus_inf =
node_registry.make<Constant>(element::f32, Shape{}, -std::numeric_limits<float>::infinity());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ std::shared_ptr<ov::Model> create_ceil_torch_workaround_model(const ov::op::Roun
const auto selected_pads = std::make_shared<Select>(in_gt_out, padding_end_node, zero);

// apply padding on input clear pads attribute
const auto pb = std::make_shared<Concat>(ov::OutputVector{pads_remaining, padding_end_node}, 0);
const auto pb = std::make_shared<Concat>(ov::OutputVector{pads_remaining, padding_begin_node}, 0);
const auto pe = std::make_shared<Concat>(ov::OutputVector{pads_remaining, selected_pads}, 0);
auto minus_inf = Constant::create(ov::element::f32, ov::Shape{}, {-std::numeric_limits<float>::infinity()});
std::shared_ptr<ov::Node> convert_like_node = std::make_shared<ConvertLike>(minus_inf, input);
Expand Down
4 changes: 2 additions & 2 deletions src/core/src/pass/serialize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -873,9 +873,9 @@ class PaddingsFixer {
if (pad_agnostic_types.count(op->get_auto_pad())) {
clone_op_and_fix_paddings<ov::opset1::BinaryConvolution, ov::CoordinateDiff>(op);
}
} else if (auto op = ov::as_type<ov::opset1::AvgPool>(node)) {
} else if (auto op = ov::as_type<ov::op::util::AvgPoolBase>(node)) {
if (pad_agnostic_types.count(op->get_auto_pad())) {
clone_op_and_fix_paddings<ov::opset1::AvgPool, ov::Shape>(op);
clone_op_and_fix_paddings<ov::op::util::AvgPoolBase, ov::Shape>(op);
}
} else if (auto op = ov::as_type<ov::op::util::MaxPoolBase>(node)) {
if (pad_agnostic_types.count(op->get_auto_pad())) {
Expand Down
1 change: 1 addition & 0 deletions src/plugins/intel_cpu/src/extension.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ class TypeRelaxedExtension : public ov::OpExtension<ov::op::TypeRelaxed<Op>> {
#define TYPE_RELAXED_EXTENSIONS \
TYPE_RELAXED_OP_EXTENSION(ov::op::v1::Add) \
TYPE_RELAXED_OP_EXTENSION(ov::op::v1::AvgPool) \
TYPE_RELAXED_OP_EXTENSION(ov::op::v14::AvgPool) \
TYPE_RELAXED_OP_EXTENSION(ov::op::v0::Clamp) \
TYPE_RELAXED_OP_EXTENSION(ov::op::v0::Concat) \
TYPE_RELAXED_OP_EXTENSION(ov::op::v1::Convolution) \
Expand Down
66 changes: 29 additions & 37 deletions src/plugins/intel_cpu/src/nodes/pooling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -146,13 +146,15 @@ dnnl::pooling_forward::primitive_desc createDescriptorHelper(const dnnl::engine&

bool Pooling::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try {
if (ov::is_type<const ov::op::v8::MaxPool>(op)) {
if (ov::is_type<const ov::op::v8::MaxPool>(op) || ov::is_type<const ov::op::v14::MaxPool>(op)) {
if (!op->get_output_target_inputs(1).empty()) {
errorMessage = "MaxPool from opset8 is supported only with one output";
errorMessage = "MaxPool from opset8 and opset14 is supported only with one output";
return false;
}
} else if (!ov::is_type<const ov::op::v1::MaxPool>(op) && !ov::is_type<const ov::op::v1::AvgPool>(op)) {
errorMessage = "MaxPool and AvgPool from opset1 and MaxPool from opset8 are supported";
} else if (!ov::is_type<const ov::op::v1::MaxPool>(op) && !ov::is_type<const ov::op::v8::MaxPool>(op) &&
!ov::is_type<const ov::op::v14::MaxPool>(op) && !ov::is_type<const ov::op::v1::AvgPool>(op) &&
!ov::is_type<const ov::op::v14::AvgPool>(op)) {
errorMessage = "Supported ops are MaxPool-1, MaxPool-8, MaxPool-14, AvgPool-1 and AvgPool-14";
return false;
}
} catch (...) {
Expand All @@ -174,47 +176,37 @@ Pooling::Pooling(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr c
}
};

if (auto maxPoolOp_v8 = ov::as_type_ptr<const ov::op::v8::MaxPool>(op)) {
isMaxPool8 = true;
if (auto maxPoolOpBase = ov::as_type_ptr<const ov::op::util::MaxPoolBase>(op)) {
algorithm = Algorithm::PoolingMax;
poolingAttrs.exclude_pad = false;
poolingAttrs.rounding = maxPoolOp_v8->get_rounding_type();
poolingAttrs.pad_type = maxPoolOp_v8->get_auto_pad();
poolingAttrs.rounding = maxPoolOpBase->get_rounding_type();
poolingAttrs.pad_type = maxPoolOpBase->get_auto_pad();
get_attributes(poolingAttrs.stride, maxPoolOpBase->get_strides());
get_attributes(poolingAttrs.kernel, maxPoolOpBase->get_kernel());
get_attributes(poolingAttrs.data_pad_begin, maxPoolOpBase->get_pads_begin());
get_attributes(poolingAttrs.data_pad_end, maxPoolOpBase->get_pads_end());
poolingAttrs.auto_pad = (poolingAttrs.pad_type == ov::op::PadType::SAME_LOWER || poolingAttrs.pad_type == ov::op::PadType::SAME_UPPER);
}

if (auto maxPoolOp_v14 = ov::as_type_ptr<const ov::op::v14::MaxPool>(op)) {
isNotMaxPool1 = true;
get_attributes(poolingAttrs.dilation, maxPoolOp_v14->get_dilations());
} else if (auto maxPoolOp_v8 = ov::as_type_ptr<const ov::op::v8::MaxPool>(op)) {
isNotMaxPool1 = true;
get_attributes(poolingAttrs.dilation, maxPoolOp_v8->get_dilations());
get_attributes(poolingAttrs.stride, maxPoolOp_v8->get_strides());
get_attributes(poolingAttrs.kernel, maxPoolOp_v8->get_kernel());
get_attributes(poolingAttrs.data_pad_begin, maxPoolOp_v8->get_pads_begin());
get_attributes(poolingAttrs.data_pad_end, maxPoolOp_v8->get_pads_end());

poolingAttrs.auto_pad = (maxPoolOp_v8->get_auto_pad() == ov::op::PadType::SAME_LOWER || maxPoolOp_v8->get_auto_pad() == ov::op::PadType::SAME_UPPER);
} else if (auto maxPoolOp_v1 = ov::as_type_ptr<const ov::op::v1::MaxPool>(op)) {
algorithm = Algorithm::PoolingMax;
poolingAttrs.exclude_pad = false;
poolingAttrs.pad_type = maxPoolOp_v1->get_auto_pad();
poolingAttrs.rounding = maxPoolOp_v1->get_rounding_type();

get_attributes(poolingAttrs.stride, maxPoolOp_v1->get_strides());
get_attributes(poolingAttrs.kernel, maxPoolOp_v1->get_kernel());
get_attributes(poolingAttrs.data_pad_begin, maxPoolOp_v1->get_pads_begin());
get_attributes(poolingAttrs.data_pad_end, maxPoolOp_v1->get_pads_end());
poolingAttrs.dilation.resize(poolingAttrs.kernel.size(), 1);

poolingAttrs.auto_pad = (maxPoolOp_v1->get_auto_pad() == ov::op::PadType::SAME_LOWER || maxPoolOp_v1->get_auto_pad() == ov::op::PadType::SAME_UPPER);
} else if (auto avgPoolOp = ov::as_type_ptr<const ov::op::v1::AvgPool>(op)) {
} else if (auto avgPoolOpBase = ov::as_type_ptr<const ov::op::util::AvgPoolBase>(op)) {
algorithm = Algorithm::PoolingAvg;
poolingAttrs.exclude_pad = avgPoolOp->get_exclude_pad();
poolingAttrs.rounding = avgPoolOp->get_rounding_type();

get_attributes(poolingAttrs.stride, avgPoolOp->get_strides());
get_attributes(poolingAttrs.kernel, avgPoolOp->get_kernel());
get_attributes(poolingAttrs.data_pad_begin, avgPoolOp->get_pads_begin());
get_attributes(poolingAttrs.data_pad_end, avgPoolOp->get_pads_end());
poolingAttrs.exclude_pad = avgPoolOpBase->get_exclude_pad();
poolingAttrs.rounding = avgPoolOpBase->get_rounding_type();
get_attributes(poolingAttrs.stride, avgPoolOpBase->get_strides());
get_attributes(poolingAttrs.kernel, avgPoolOpBase->get_kernel());
get_attributes(poolingAttrs.data_pad_begin, avgPoolOpBase->get_pads_begin());
get_attributes(poolingAttrs.data_pad_end, avgPoolOpBase->get_pads_end());
poolingAttrs.dilation.resize(poolingAttrs.kernel.size(), 1);

poolingAttrs.auto_pad = (avgPoolOp->get_auto_pad() == ov::op::PadType::SAME_LOWER || avgPoolOp->get_auto_pad() == ov::op::PadType::SAME_UPPER);
poolingAttrs.auto_pad = (avgPoolOpBase->get_auto_pad() == ov::op::PadType::SAME_LOWER || avgPoolOpBase->get_auto_pad() == ov::op::PadType::SAME_UPPER);
}

poolingAttrs.algorithm = algorithm;
}

Expand Down Expand Up @@ -642,7 +634,7 @@ void Pooling::initSupportedPrimitiveDescriptors() {
}

// CPU plugin doesn't support second output of MaxPool-8, but anyway we should have out config for second port as stub
if (isMaxPool8) {
if (isNotMaxPool1) {
const auto& creatorsMap = BlockedDescCreator::getCommonCreators();
const auto outputPrecision = outConfs.front().getMemDesc()->getPrecision();
auto desc = creatorsMap.at(LayoutType::ncsp)->createSharedDesc(outputPrecision, getOutputShapeAtPort(1));
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/pooling.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ class Pooling : public Node {

Shape inShape;

bool isMaxPool8 = false;
bool isNotMaxPool1 = false;
bool useACL = false;
};

Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -407,6 +407,8 @@ const IStaticShapeInferFactory::TRegistry IStaticShapeInferFactory::registry{
_OV_OP_SHAPE_INFER_MASK_REG(op::v15::Col2Im, ShapeInferTA, util::bit::mask(1, 2)),
// opset14
_OV_OP_SHAPE_INFER_MASK_REG(opset14::Inverse, ShapeInferTA, util::bit::mask()),
_OV_OP_SHAPE_INFER_MASK_REG(opset14::MaxPool, ShapeInferPaddingTA, util::bit::mask()),
_OV_OP_SHAPE_INFER_MASK_REG(opset14::AvgPool, ShapeInferPaddingTA, util::bit::mask()),
// opset13
_OV_OP_SHAPE_INFER_MASK_REG(opset13::Multinomial, ShapeInferTA, util::bit::mask(1)),
_OV_OP_SHAPE_INFER_MASK_REG(opset13::ScaledDotProductAttention, ShapeInferTA, util::bit::mask(3, 5)),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,8 @@ bool isSuitableMiscParent(const std::shared_ptr<const Node> &node) {
ov::is_type<ov::opset1::ConvolutionBackpropData>(node) ||
ov::is_type<ov::op::util::ArithmeticReductionKeepDims>(node) ||
ov::is_type<ov::opset1::GroupConvolutionBackpropData>(node) ||
ov::is_type<ov::opset1::AvgPool>(node);
ov::is_type<ov::opset1::AvgPool>(node) ||
ov::is_type<ov::op::v14::AvgPool>(node);
// has a single output, connected to a single child
const auto out = node->outputs();
const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
#include "transformations/common_optimizations/move_eltwise_up_data_movement.hpp"
#include "transformations/control_flow/unroll_tensor_iterator.hpp"
#include "transformations/fp16_compression/mark_decompression_convert_constant_folding.hpp"
#include "transformations/op_conversions/convert_avgpool_downgrade.hpp"
#include "transformations/op_conversions/convert_batch_to_space.hpp"
#include "transformations/op_conversions/convert_bitwise_to_logical_bool.hpp"
#include "transformations/op_conversions/convert_broadcast_to_tiles.hpp"
Expand Down Expand Up @@ -468,10 +469,18 @@ void Transformations::PreLpt(const std::vector<ov::element::Type>& defaultPrecis
manager,
[](const_node_ptr& node) -> bool {
const auto maxpool = std::dynamic_pointer_cast<const ov::op::v14::MaxPool>(node);
return !maxpool || maxpool->get_rounding_type() == ov::op::RoundingType::CEIL_TORCH;
return !maxpool || maxpool->get_rounding_type() == ov::op::RoundingType::CEIL_TORCH;
},
ov::pass::ConvertMaxPool14ToMaxPool8);

CPU_SET_CALLBACK_COMMON(
manager,
[](const_node_ptr& node) -> bool {
const auto avgpool = std::dynamic_pointer_cast<const ov::op::v14::AvgPool>(node);
return !avgpool || avgpool->get_rounding_type() == ov::op::RoundingType::CEIL_TORCH;
},
ov::pass::ConvertAvgPool14ToAvgPool1);

CPU_SET_CALLBACK_COMMON(manager,
[](const_node_ptr &node) -> bool {
std::string msg;
Expand Down
Loading

0 comments on commit df4e971

Please sign in to comment.