From 257cb9dc49e4a86eacaae96b317a0fe570a422a6 Mon Sep 17 00:00:00 2001 From: Vladislav Golubev Date: Thu, 14 Dec 2023 16:34:54 +0100 Subject: [PATCH] Softmax decomposition moved to data flow pipeline --- .../lowered/pass/reduce_decomposition.hpp | 33 +++++ .../snippets/include/snippets/op/reduce.hpp | 59 ++++++++ .../snippets/pass/softmax_decomposition.hpp | 27 ++++ .../shape_inference/shape_infer_instances.hpp | 7 + .../include/snippets/snippets_isa.hpp | 1 + .../include/snippets/snippets_isa_tbl.hpp | 2 + .../src/lowered/pass/reduce_decomposition.cpp | 135 ++++++++++++++++++ src/common/snippets/src/op/reduce.cpp | 43 ++++++ src/common/snippets/src/op/subgraph.cpp | 10 +- .../src/pass/softmax_decomposition.cpp | 75 ++++++++++ .../shape_inference/shape_infer_instances.cpp | 13 ++ .../src/shape_inference/shape_inference.cpp | 2 + .../src/emitters/x64/cpu_generator.cpp | 11 ++ 13 files changed, 417 insertions(+), 1 deletion(-) create mode 100644 src/common/snippets/include/snippets/lowered/pass/reduce_decomposition.hpp create mode 100644 src/common/snippets/include/snippets/op/reduce.hpp create mode 100644 src/common/snippets/include/snippets/pass/softmax_decomposition.hpp create mode 100644 src/common/snippets/src/lowered/pass/reduce_decomposition.cpp create mode 100644 src/common/snippets/src/op/reduce.cpp create mode 100644 src/common/snippets/src/pass/softmax_decomposition.cpp diff --git a/src/common/snippets/include/snippets/lowered/pass/reduce_decomposition.hpp b/src/common/snippets/include/snippets/lowered/pass/reduce_decomposition.hpp new file mode 100644 index 00000000000000..d6c3d6ea5c82af --- /dev/null +++ b/src/common/snippets/include/snippets/lowered/pass/reduce_decomposition.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "pass.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +/** + * @interface ReduceDecomposition + * @brief Decomposes snippets::Reduce operations to a range of low-level operations on linear IR + * @attention Only Reduce by last dimension is supported + * @ingroup snippets + */ +class ReduceDecomposition : public Pass { +public: + OPENVINO_RTTI("ReduceDecomposition", "Pass") + explicit ReduceDecomposition(size_t vector_size); + bool run(LinearIR& linear_ir) override; + +private: + size_t m_vector_size; +}; + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/include/snippets/op/reduce.hpp b/src/common/snippets/include/snippets/op/reduce.hpp new file mode 100644 index 00000000000000..16b4e57f9268e4 --- /dev/null +++ b/src/common/snippets/include/snippets/op/reduce.hpp @@ -0,0 +1,59 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "snippets/shape_inference/shape_infer_instances.hpp" + +namespace ov { +namespace snippets { +namespace op { + +/** + * @interface ReduceBase + * @brief Base class for reduce operations. + * @arg m_axis reduce axis. + * @ingroup snippets + */ +class ReduceBase : public ov::op::Op { +public: + OPENVINO_OP("ReduceBase", "SnippetsOpset"); + + ReduceBase(const Output& x, size_t axis); + ReduceBase() = default; + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + size_t get_axis() const { return m_axis; } + +protected: + size_t m_axis; +}; + +class ReduceSum : public ReduceBase { +public: + OPENVINO_OP("ReduceSum", "SnippetsOpset", ReduceBase); + ReduceSum(const Output& x, size_t axis) : ReduceBase(x, axis) {} + ReduceSum() = default; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + static std::set get_supported_precisions(const std::shared_ptr& node) { + return {{ov::element::f32}}; + } +}; + +class ReduceMax : public ReduceBase { +public: + OPENVINO_OP("ReduceMax", "SnippetsOpset", ReduceBase); + ReduceMax(const Output& x, size_t axis) : ReduceBase(x, axis) {} + ReduceMax() = default; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + static std::set get_supported_precisions(const std::shared_ptr& node) { + return {{ov::element::f32}}; + } +}; + +} // namespace op +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/include/snippets/pass/softmax_decomposition.hpp b/src/common/snippets/include/snippets/pass/softmax_decomposition.hpp new file mode 100644 index 00000000000000..51d80520d4991f --- /dev/null +++ b/src/common/snippets/include/snippets/pass/softmax_decomposition.hpp @@ -0,0 +1,27 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/graph_rewrite.hpp" +#include "openvino/pass/pattern/matcher.hpp" + +namespace ov { +namespace snippets { +namespace pass { + +/** + * @interface SoftmaxDecomposition + * @brief Decomposes Softmax to a range of low-level operations + * @ingroup snippets + */ +class SoftmaxDecomposition: public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("SoftmaxDecomposition", "0"); + SoftmaxDecomposition(); +}; + +} // namespace pass +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/include/snippets/shape_inference/shape_infer_instances.hpp b/src/common/snippets/include/snippets/shape_inference/shape_infer_instances.hpp index 43ad0aa3d5ac97..f6cd6f0626f798 100644 --- a/src/common/snippets/include/snippets/shape_inference/shape_infer_instances.hpp +++ b/src/common/snippets/include/snippets/shape_inference/shape_infer_instances.hpp @@ -68,5 +68,12 @@ class BrgemmShapeInfer : public IShapeInferSnippets { Result infer(const std::vector& input_shapes) override; }; +class ReduceShapeInfer : public IShapeInferSnippets { + size_t m_axis; +public: + explicit ReduceShapeInfer(const std::shared_ptr& n); + Result infer(const std::vector& input_shapes) override; +}; + } // namespace snippets } // namespace ov diff --git a/src/common/snippets/include/snippets/snippets_isa.hpp b/src/common/snippets/include/snippets/snippets_isa.hpp index b2c6d46b722b0b..cfb191a080cd69 100644 --- a/src/common/snippets/include/snippets/snippets_isa.hpp +++ b/src/common/snippets/include/snippets/snippets_isa.hpp @@ -26,6 +26,7 @@ #include "op/vector_buffer.hpp" #include "op/rank_normalization.hpp" #include "op/perf_count.hpp" +#include "op/reduce.hpp" namespace ov { namespace snippets { diff --git a/src/common/snippets/include/snippets/snippets_isa_tbl.hpp b/src/common/snippets/include/snippets/snippets_isa_tbl.hpp index 06a8d942e258c2..8de082781f131b 100644 --- a/src/common/snippets/include/snippets/snippets_isa_tbl.hpp +++ b/src/common/snippets/include/snippets/snippets_isa_tbl.hpp @@ -23,6 +23,8 @@ OV_OP(BroadcastMove, ov::snippets::op) OV_OP(Scalar, ov::snippets::op) OV_OP(Nop, ov::snippets::op) OV_OP(RankNormalization, ov::snippets::op) +OV_OP(ReduceMax, ov::snippets::op) +OV_OP(ReduceSum, ov::snippets::op) #ifdef SNIPPETS_DEBUG_CAPS OV_OP(PerfCountBegin, ov::snippets::op) diff --git a/src/common/snippets/src/lowered/pass/reduce_decomposition.cpp b/src/common/snippets/src/lowered/pass/reduce_decomposition.cpp new file mode 100644 index 00000000000000..4acec504225335 --- /dev/null +++ b/src/common/snippets/src/lowered/pass/reduce_decomposition.cpp @@ -0,0 +1,135 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "snippets/lowered/pass/reduce_decomposition.hpp" + +#include "snippets/lowered/linear_ir.hpp" +#include "snippets/lowered/loop_manager.hpp" +#include "snippets/lowered/pass/mark_loops.hpp" +#include "snippets/lowered/pass/iter_handler.hpp" +#include "snippets/snippets_isa.hpp" +#include "snippets/itt.hpp" + +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "openvino/pass/pattern/matcher.hpp" + + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +namespace { +uint32_t get_initial_value(const ov::DiscreteTypeInfo& type_info) { + static const std::map reduce_initial_values { + {op::ReduceMax::get_type_info_static(), uint32_t(0xff7fffff)}, + {op::ReduceSum::get_type_info_static(), uint32_t(0x00000000)}, + }; + OPENVINO_ASSERT(reduce_initial_values.count(type_info), "Unexpected ReduceType"); + return reduce_initial_values.at(type_info); +} + +std::shared_ptr get_accumulation_node(const ov::Output& input0, + const ov::Output& input1, + const ov::DiscreteTypeInfo& type_info) { + if (type_info == op::ReduceMax::get_type_info_static()) { + return std::make_shared(input0, input1); + } else if (type_info == op::ReduceSum::get_type_info_static()) { + return std::make_shared(input0, input1); + } else { + OPENVINO_THROW("Unsupported reduce type: ", type_info); + } +} + +std::shared_ptr get_horizon_node(const ov::Output& input, const ov::DiscreteTypeInfo& type_info) { + if (type_info == op::ReduceMax::get_type_info_static()) { + return std::make_shared(input); + } else if (type_info == op::ReduceSum::get_type_info_static()) { + return std::make_shared(input); + } else { + OPENVINO_THROW("Unsupported reduce type: ", type_info); + } +} +} // namespace + +using LoopInfo = LinearIR::LoopManager::LoopInfo; + +ReduceDecomposition::ReduceDecomposition(size_t vector_size) : m_vector_size{vector_size} {} + +bool ReduceDecomposition::run(LinearIR& linear_ir) { + OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::ReduceMaxDecompositionLowered") + const auto& loop_manager = linear_ir.get_loop_manager(); + bool modified = false; + for (auto expr_it = linear_ir.begin(); expr_it != linear_ir.end(); expr_it++) { + const auto& reduce_expr = *expr_it; + const auto& reduce = ov::as_type_ptr(reduce_expr->get_node()); + if (!reduce) + continue; + + const auto& reduce_type_info = reduce->get_type_info(); + const auto& input_shape = reduce_expr->get_input_port_descriptor(0)->get_shape(); + const auto work_amount = *(input_shape.rbegin()); + const auto increment = m_vector_size <= work_amount ? m_vector_size : work_amount; + const bool is_dynamic = reduce->is_dynamic(); + OPENVINO_ASSERT(reduce->get_axis() == input_shape.size() - 1, "ReduceDecomposition supports only Reduce by last dimension."); + + // We need an iterator to the inserted element + auto push_node = [&](const std::shared_ptr& n) { + const auto expr = linear_ir.insert(expr_it, n); + if (is_dynamic) + expr->get()->updateShapes(); + return std::make_pair(expr, n); + }; + // Float constant values in byte representation + const auto fill_value = get_initial_value(reduce_type_info); + // Note: VectorBuffer is a special case, since it should go before the initial Load. + // The buffer must be initialized with fill_value before reduction + const auto vector_buffer = push_node(std::make_shared()); + const auto initial_fill = push_node(std::make_shared(vector_buffer.second, 0, fill_value)); + + // Reduce loop + const auto fill = push_node(std::make_shared(reduce->get_input_source_output(0), increment, fill_value)); + const auto accumulation = push_node(get_accumulation_node(fill.second, initial_fill.second, reduce_type_info)); + + const auto reduce_loop_id = loop_manager->mark_loop( + fill.first, + expr_it, + work_amount, + increment, + 0, + std::vector{(*fill.first)->get_input_port(0), (*accumulation.first)->get_input_port(1)}, + std::vector{(*accumulation.first)->get_output_port(0)}); + const auto loop_info = loop_manager->get_loop_info(reduce_loop_id); + const auto tail_size = work_amount % increment; + if (tail_size != 0) { + loop_info->handlers[LoopInfo::LAST_ITER].register_pass(tail_size); + } + const auto horizon = push_node(get_horizon_node(accumulation.second, reduce_type_info)); + + // Transfer original ExpressionPorts + linear_ir.replace_input((*fill.first)->get_input_port(0), reduce_expr->get_input_port_connector(0)); + linear_ir.replace_input(reduce_expr->get_output_port_connector(0)->get_consumers(), (*horizon.first)->get_output_port_connector(0)); + + // Update Loop info for outer loops + const std::vector entry_points{(*fill.first)->get_input_port(0)}; + const std::vector exit_points{(*horizon.first)->get_output_port(0)}; + for (auto loop_id : reduce_expr->get_loop_ids()) { + loop_manager->expression_replacement(vector_buffer.first, + expr_it, + reduce_expr, + loop_id, + entry_points, + exit_points); + } + + expr_it = linear_ir.erase(expr_it); + modified = true; + } + return modified; +} + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/src/op/reduce.cpp b/src/common/snippets/src/op/reduce.cpp new file mode 100644 index 00000000000000..724d4733207aef --- /dev/null +++ b/src/common/snippets/src/op/reduce.cpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "snippets/itt.hpp" + +#include "snippets/op/reduce.hpp" + + +namespace ov { +namespace snippets { +namespace op { + +ReduceBase::ReduceBase(const Output& x, size_t axis) : Op({x}), m_axis(axis) { + constructor_validate_and_infer_types(); +} + +bool ReduceBase::visit_attributes(AttributeVisitor& visitor) { + visitor.on_attribute("axis", m_axis); + return true; +} + +void ReduceBase::validate_and_infer_types() { + auto result_shape = get_input_partial_shape(0); + result_shape[m_axis] = 1; + set_output_type(0, get_input_element_type(0), result_shape); +} + +std::shared_ptr ReduceSum::clone_with_new_inputs(const OutputVector& new_args) const { + INTERNAL_OP_SCOPE(ReduceSum); + check_new_args_count(this, new_args); + return std::make_shared(new_args.at(0), m_axis); +} + +std::shared_ptr ReduceMax::clone_with_new_inputs(const OutputVector& new_args) const { + INTERNAL_OP_SCOPE(ReduceMax); + check_new_args_count(this, new_args); + return std::make_shared(new_args.at(0), m_axis); +} + +} // namespace op +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/src/op/subgraph.cpp b/src/common/snippets/src/op/subgraph.cpp index 5e4ec30a618156..b3529d93c209df 100644 --- a/src/common/snippets/src/op/subgraph.cpp +++ b/src/common/snippets/src/op/subgraph.cpp @@ -12,6 +12,7 @@ #include "snippets/pass/convert_constants.hpp" #include "snippets/pass/convert_power_to_powerstatic.hpp" #include "snippets/pass/transpose_decomposition.hpp" +#include "snippets/pass/softmax_decomposition.hpp" #include "snippets/pass/matmul_to_brgemm.hpp" #include "snippets/pass/fuse_transpose_brgemm.hpp" #include "snippets/pass/set_softmax_ports.hpp" @@ -43,6 +44,7 @@ #include "snippets/lowered/pass/insert_perf_count.hpp" #include "snippets/lowered/pass/validate_shapes.hpp" #include "snippets/lowered/pass/pass_config.hpp" +#include "snippets/lowered/pass/reduce_decomposition.hpp" #include "transformations/utils/utils.hpp" @@ -404,7 +406,11 @@ void Subgraph::data_flow_transformations(const BlockedShapeVector& blocked_input manager.register_pass(); manager.register_pass(); manager.register_pass(); - manager.register_pass(); + if (getenv("DISABLE_DATA_FLOW_DECOMPOSITION")) { + manager.register_pass(); + } else { + manager.register_pass(); + } } manager.register_pass(); manager.register_pass(); @@ -435,7 +441,9 @@ void Subgraph::control_flow_transformations(lowered::LinearIR& linear_ir, lowered::pass::PassPipeline pipeline(lowered_pass_config); pipeline.register_pass(vector_size); + // TODO: remove SoftmaxDecomposition pass pipeline.register_pass(vector_size); + pipeline.register_pass(vector_size); pipeline.register_pass(); pipeline.register_pass(); pipeline.register_pass(); diff --git a/src/common/snippets/src/pass/softmax_decomposition.cpp b/src/common/snippets/src/pass/softmax_decomposition.cpp new file mode 100644 index 00000000000000..7f0ec70fbbed62 --- /dev/null +++ b/src/common/snippets/src/pass/softmax_decomposition.cpp @@ -0,0 +1,75 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "snippets/pass/softmax_decomposition.hpp" + +#include "openvino/op/softmax.hpp" +#include "openvino/pass/pattern/op/or.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "snippets/itt.hpp" +#include "snippets/lowered/port_descriptor.hpp" +#include "snippets/op/reduce.hpp" +#include "snippets/snippets_isa.hpp" + +namespace ov { +namespace snippets { +namespace pass { +using namespace lowered; + +SoftmaxDecomposition::SoftmaxDecomposition() { + MATCHER_SCOPE(SoftmaxDecomposition); + auto softmax_v1_m = ov::pass::pattern::wrap_type(); + auto softmax_v8_m = ov::pass::pattern::wrap_type(); + auto softmax_m = std::make_shared(ov::OutputVector{softmax_v1_m, softmax_v8_m}); + + ov::matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { + OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::op::SoftmaxDecomposition") + auto softmax = m.get_match_root(); + + const auto& pshape = softmax->get_input_partial_shape(0); + OPENVINO_ASSERT(!pshape.rank().is_dynamic(), "SetSoftmaxPorts doesn't support dynamic ranks"); + const auto rank = pshape.size(); + + size_t axis; + if (const auto softmax_v8 = ov::as_type_ptr(softmax)) { + OPENVINO_SUPPRESS_DEPRECATED_START + axis = ov::normalize_axis(softmax->get_friendly_name(), softmax_v8->get_axis(), rank); + OPENVINO_SUPPRESS_DEPRECATED_END + } else if (const auto softmax_v1 = ov::as_type_ptr(softmax)) { + axis = softmax_v1->get_axis(); + } else { + OPENVINO_THROW("Unexpected node matched"); + } + + const auto& softmax_input = softmax->input_value(0); + const auto reduce_max = std::make_shared(softmax_input, axis); + const auto subtract = std::make_shared(softmax_input, reduce_max); + const auto exp = std::make_shared(subtract); + + const auto reduce_sum = std::make_shared(exp, axis); + const auto power = std::make_shared(reduce_sum, -1.f); + const auto multiply = std::make_shared(exp, power); + + OPENVINO_ASSERT(axis < rank, "Softmax has incorrect axis"); + std::vector subtensor(rank, 1); + for (size_t i = axis; i < rank; ++i) + subtensor[i] = PortDescriptor::ServiceDimensions::FULL_DIM; + + PortDescriptorUtils::set_port_descriptor_ptr(reduce_max->input(0), std::make_shared(reduce_max->input(0), subtensor)); + PortDescriptorUtils::set_port_descriptor_ptr(reduce_max->output(0), std::make_shared(reduce_max->output(0), subtensor)); + PortDescriptorUtils::set_port_descriptor_ptr(reduce_sum->input(0), std::make_shared(reduce_sum->input(0), subtensor)); + PortDescriptorUtils::set_port_descriptor_ptr(reduce_sum->output(0), std::make_shared(reduce_sum->output(0), subtensor)); + PortDescriptorUtils::set_port_descriptor_ptr(power->input(0), std::make_shared(power->input(0), subtensor)); + PortDescriptorUtils::set_port_descriptor_ptr(power->output(0), std::make_shared(power->output(0), subtensor)); + + return ov::replace_node_update_name(softmax, multiply); + }; + + auto m = std::make_shared(softmax_m, matcher_name); + register_matcher(m, callback); +} + +} // namespace pass +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/src/shape_inference/shape_infer_instances.cpp b/src/common/snippets/src/shape_inference/shape_infer_instances.cpp index 44c1065d8260a7..b3fa14e4116c12 100644 --- a/src/common/snippets/src/shape_inference/shape_infer_instances.cpp +++ b/src/common/snippets/src/shape_inference/shape_infer_instances.cpp @@ -233,5 +233,18 @@ Result BrgemmShapeInfer::infer(const std::vector& input_shapes) { return {{output_shape}, snippets::ShapeInferStatus::success}; } +ReduceShapeInfer::ReduceShapeInfer(const std::shared_ptr& n) { + const auto& reduce = as_type_ptr(n); + OPENVINO_ASSERT(reduce, "Invalid node passed to ReduceShapeInfer."); + m_axis = reduce->get_axis(); +} + +Result ReduceShapeInfer::infer(const std::vector& input_shapes) { + OPENVINO_ASSERT(input_shapes.size() == 1, "Invalid number of shapes passed ReduceShapeInfer"); + VectorDims result_shape = input_shapes[0].get(); + result_shape[m_axis] = 1; + return {{result_shape}, ShapeInferStatus::success}; +} + } // namespace snippets } // namespace ov diff --git a/src/common/snippets/src/shape_inference/shape_inference.cpp b/src/common/snippets/src/shape_inference/shape_inference.cpp index 5864972b94b9c7..fd9eb6f2e03307 100644 --- a/src/common/snippets/src/shape_inference/shape_inference.cpp +++ b/src/common/snippets/src/shape_inference/shape_inference.cpp @@ -88,6 +88,8 @@ std::shared_ptr make_shape_inference(const std::shared_ptr< ov::is_type(op) || ov::is_type(op)) { return std::make_shared(); + } else if (ov::is_type(op)) { + return std::make_shared(op); } else { OPENVINO_THROW("Operation type " + std::string(op->get_type_info().name) + " is not supported in Snippets shape inference pipeline"); } diff --git a/src/plugins/intel_cpu/src/emitters/x64/cpu_generator.cpp b/src/plugins/intel_cpu/src/emitters/x64/cpu_generator.cpp index 532055099a5271..ba8812b00c8650 100644 --- a/src/plugins/intel_cpu/src/emitters/x64/cpu_generator.cpp +++ b/src/plugins/intel_cpu/src/emitters/x64/cpu_generator.cpp @@ -52,6 +52,15 @@ namespace ov { } \ } +#define CREATE_UNDEFINED_EMITTER(node_type) { \ + [](const snippets::lowered::ExpressionPtr& expr) -> std::shared_ptr { \ + return nullptr; \ + }, \ + [](const std::shared_ptr& n) -> std::set> { \ + return node_type::get_supported_precisions(n); \ + } \ +} + class jit_snippet : public dnnl::impl::cpu::x64::jit_generator { public: DECLARE_CPU_JIT_AUX_FUNCTIONS(jit_snippet) @@ -165,6 +174,8 @@ intel_cpu::CPUTargetMachine::CPUTargetMachine(dnnl::impl::cpu::x64::cpu_isa_t ho jitters[snippets::op::LoopEnd::get_type_info_static()] = CREATE_SNIPPETS_EMITTER(LoopEndEmitter); jitters[intel_cpu::BrgemmCPU::get_type_info_static()] = CREATE_SNIPPETS_EMITTER(BrgemmEmitter); jitters[intel_cpu::BrgemmCopyB::get_type_info_static()] = CREATE_SNIPPETS_EMITTER(BrgemmCopyBEmitter); + jitters[snippets::op::ReduceMax::get_type_info_static()] = CREATE_UNDEFINED_EMITTER(snippets::op::ReduceMax); + jitters[snippets::op::ReduceSum::get_type_info_static()] = CREATE_UNDEFINED_EMITTER(snippets::op::ReduceSum); #ifdef SNIPPETS_DEBUG_CAPS jitters[snippets::op::PerfCountBegin::get_type_info_static()] = CREATE_CPU_EMITTER(ov::intel_cpu::jit_perf_count_chrono_start_emitter);