diff --git a/src/common/snippets/include/snippets/op/broadcastload.hpp b/src/common/snippets/include/snippets/op/broadcastload.hpp index 0d90fb15a84b97..25ffa209da58c4 100644 --- a/src/common/snippets/include/snippets/op/broadcastload.hpp +++ b/src/common/snippets/include/snippets/op/broadcastload.hpp @@ -24,22 +24,9 @@ class BroadcastLoad : public BroadcastMove { BroadcastLoad(const Output& x, Shape output_shape); BroadcastLoad() = default; - bool visit_attributes(AttributeVisitor& visitor) override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; void validate_and_infer_types() override; - - void set_broadcast_info(const Shape& bct) { - broadcast_info = bct; - } - - bool is_broadcast(size_t idx) { - return broadcast_info[idx] == 1; - } - -private: - Shape broadcast_info; }; } // namespace op diff --git a/src/common/snippets/include/snippets/op/powerstatic.hpp b/src/common/snippets/include/snippets/op/powerstatic.hpp index 3ca930836d02c6..d3a5d3d200f750 100644 --- a/src/common/snippets/include/snippets/op/powerstatic.hpp +++ b/src/common/snippets/include/snippets/op/powerstatic.hpp @@ -20,6 +20,7 @@ namespace op { class PowerStatic : public ov::op::util::UnaryElementwiseArithmetic { public: OPENVINO_OP("PowerStatic", "SnippetsOpset", ov::op::util::UnaryElementwiseArithmetic); + BWDCMP_RTTI_DECLARATION; PowerStatic() = default; PowerStatic(const Output &arg, float power) : UnaryElementwiseArithmetic(arg), power(power) { diff --git a/src/common/snippets/include/snippets/op/scalar.hpp b/src/common/snippets/include/snippets/op/scalar.hpp index a8de072be50f10..5a5e6cd5871538 100644 --- a/src/common/snippets/include/snippets/op/scalar.hpp +++ b/src/common/snippets/include/snippets/op/scalar.hpp @@ -19,6 +19,9 @@ namespace op { class Scalar : public ov::op::v0::Constant { public: OPENVINO_OP("Scalar", "SnippetsOpset", ov::op::v0::Constant); + BWDCMP_RTTI_DECLARATION; + + Scalar() = default; template ::value>::type> Scalar(const element::Type& type, Shape shape, T value) : Constant(type, shape, value) { diff --git a/src/common/snippets/include/snippets/op/subgraph.hpp b/src/common/snippets/include/snippets/op/subgraph.hpp index 43c6376ad607c1..22f057e3f2a338 100644 --- a/src/common/snippets/include/snippets/op/subgraph.hpp +++ b/src/common/snippets/include/snippets/op/subgraph.hpp @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -22,9 +23,10 @@ namespace op { * @brief An operation that is implemented by a model * @ingroup snippets */ -class Subgraph : public ngraph::op::Op { +class Subgraph : public ov::op::util::SubGraphOp { public: - OPENVINO_OP("Subgraph", "SnippetsOpset"); + OPENVINO_OP("Subgraph", "SnippetsOpset", ov::op::util::SubGraphOp); + BWDCMP_RTTI_DECLARATION; // < 1, 42, 17, 15, 16> < 0, 1, 2, 3, 1> // should be: @@ -70,6 +72,8 @@ class Subgraph : public ngraph::op::Op { using BlockedShape = std::tuple; using BlockedShapeVector = std::vector; + Subgraph() = default; + Subgraph(const OutputVector& args, std::shared_ptr body); Subgraph(const NodeVector& args, std::shared_ptr body); @@ -80,11 +84,29 @@ class Subgraph : public ngraph::op::Op { std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; - std::shared_ptr get_body() const { - return m_body; + // we introduce this method instead of using SubGraphOp::get_function() + // to align naming with other methods + const std::shared_ptr & body_ptr() const { + return m_bodies[0]; + } + + std::shared_ptr & body_ptr() { + return m_bodies[0]; } - std::shared_ptr get_generator() const { + const ov::Model & body() const { + return *m_bodies[0]; + } + + ov::Model & body() { + return *m_bodies[0]; + } + + const std::shared_ptr & get_generator() const { + return m_generator; + } + + std::shared_ptr & get_generator() { return m_generator; } @@ -123,13 +145,13 @@ class Subgraph : public ngraph::op::Op { private: void align_element_types(const BlockedShapeVector& outputShapes, const BlockedShapeVector& inputShapes); void convert_to_snippet_dialect(); + // Count of potentional non-scalar Consants that will be created after some tranformations // At the moment it's relevant only for FakeQuantize decomposition // NOTE: To avoid overheads in each calcution of this count (for example, in validate_and_type_infer()), // we should MANUALLY calculate it where it needed. size_t m_non_scalar_constants_count = 0; Shape exec_domain = {}; - std::shared_ptr m_body = nullptr; std::shared_ptr m_generator = nullptr; // TODO: Change logic of insert Converts. This exec element type can be different for plugins diff --git a/src/common/snippets/src/op/broadcastload.cpp b/src/common/snippets/src/op/broadcastload.cpp index 893cae32831c51..99e77fe7259ff0 100644 --- a/src/common/snippets/src/op/broadcastload.cpp +++ b/src/common/snippets/src/op/broadcastload.cpp @@ -12,20 +12,14 @@ using namespace std; using namespace ngraph; snippets::op::BroadcastLoad::BroadcastLoad(const Output& x, Shape shape) -: BroadcastMove(x, shape), broadcast_info(x.get_shape().size(), 0) { +: BroadcastMove(x, shape) { constructor_validate_and_infer_types(); } -bool snippets::op::BroadcastLoad::visit_attributes(AttributeVisitor& visitor) { - return true; -} - std::shared_ptr snippets::op::BroadcastLoad::clone_with_new_inputs(const OutputVector& new_args) const { INTERNAL_OP_SCOPE(BroadcastLoad); check_new_args_count(this, new_args); - auto other = std::make_shared(new_args.at(0), output_shape); - other->set_broadcast_info(this->broadcast_info); - return other; + return std::make_shared(new_args.at(0), output_shape); } void snippets::op::BroadcastLoad::validate_and_infer_types() { diff --git a/src/common/snippets/src/op/broadcastmove.cpp b/src/common/snippets/src/op/broadcastmove.cpp index 089cd8f2abd70b..4a821f427480d4 100644 --- a/src/common/snippets/src/op/broadcastmove.cpp +++ b/src/common/snippets/src/op/broadcastmove.cpp @@ -17,6 +17,7 @@ snippets::op::BroadcastMove::BroadcastMove(const Output& x, Shape shape) : } bool snippets::op::BroadcastMove::visit_attributes(AttributeVisitor& visitor) { + visitor.on_attribute("output_shape", output_shape); return true; } diff --git a/src/common/snippets/src/op/powerstatic.cpp b/src/common/snippets/src/op/powerstatic.cpp new file mode 100644 index 00000000000000..57a55289020d7f --- /dev/null +++ b/src/common/snippets/src/op/powerstatic.cpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "snippets/op/powerstatic.hpp" + +namespace ngraph { +namespace snippets { +namespace op { + +BWDCMP_RTTI_DEFINITION(PowerStatic); + +} // namespace op +} // namespace snippets +} // namespace ngraph diff --git a/src/common/snippets/src/op/scalar.cpp b/src/common/snippets/src/op/scalar.cpp index c788c341a3e02f..8c1df4486c9381 100644 --- a/src/common/snippets/src/op/scalar.cpp +++ b/src/common/snippets/src/op/scalar.cpp @@ -6,6 +6,8 @@ using namespace ngraph; +BWDCMP_RTTI_DEFINITION(snippets::op::Scalar); + std::shared_ptr snippets::op::Scalar::clone_with_new_inputs(const OutputVector& new_args) const { check_new_args_count(this, new_args); return std::make_shared(*this); diff --git a/src/common/snippets/src/op/subgraph.cpp b/src/common/snippets/src/op/subgraph.cpp index 72573f5519a089..a841a04838908a 100644 --- a/src/common/snippets/src/op/subgraph.cpp +++ b/src/common/snippets/src/op/subgraph.cpp @@ -32,6 +32,9 @@ using namespace std; using namespace ngraph; +using namespace ov::op::util; + +BWDCMP_RTTI_DEFINITION(snippets::op::Subgraph); void snippets::op::Subgraph::set_generator(std::shared_ptr generator) { m_generator = generator; @@ -42,8 +45,9 @@ void snippets::op::Subgraph::set_non_scalar_constants_count(const size_t count) } snippets::op::Subgraph::Subgraph(const OutputVector& args, std::shared_ptr body) - : Op(args), m_body(body), m_generator(nullptr) { - const auto ops = m_body->get_ops(); + : SubGraphOp(args) { + set_function(body); + const auto ops = body_ptr()->get_ops(); for (const auto& op : ops) { config.m_is_quantized = config.m_is_quantized || ov::is_type(op); config.m_has_type_relaxed_ops = config.m_has_type_relaxed_ops || std::dynamic_pointer_cast(op); @@ -52,6 +56,11 @@ snippets::op::Subgraph::Subgraph(const OutputVector& args, std::shared_ptrget_parameters().size(); ++i) + m_input_descriptions[0].push_back(std::make_shared(i, i)); + for (size_t i = 0; i < body->get_output_size(); ++i) + m_output_descriptions[0].push_back(std::make_shared(i, i)); + m_transformations_allowed = false; } snippets::op::Subgraph::Subgraph(const NodeVector& args, std::shared_ptr body) @@ -59,34 +68,37 @@ snippets::op::Subgraph::Subgraph(const NodeVector& args, std::shared_ptr snippets::op::Subgraph::clone_with_new_inputs(const OutputVector& inputs) const { INTERNAL_OP_SCOPE(Subgraph); - return make_shared(inputs, ov::clone_model(*m_body.get())); + return make_shared(inputs, ov::clone_model(body())); } void snippets::op::Subgraph::validate_and_infer_types() { INTERNAL_OP_SCOPE(Subgraph); OV_ITT_SCOPED_TASK(ngraph::pass::itt::domains::SnippetsTransform, "Snippets::validate_and_infer_types") ngraph::ParameterVector old_parameters; - for (auto op : m_body->get_parameters()) { + for (auto op : body_ptr()->get_parameters()) { old_parameters.push_back(op); } for (size_t i = 0; i < get_input_size(); ++i) { - m_body->replace_parameter(i, std::make_shared(get_input_element_type(i), get_input_partial_shape(i))); + body_ptr()->replace_parameter(i, std::make_shared(get_input_element_type(i), get_input_partial_shape(i))); } - m_body->validate_nodes_and_infer_types(); + body_ptr()->validate_nodes_and_infer_types(); - for (size_t i = 0; i < m_body->get_parameters().size(); i++) { - m_body->get_parameters()[i]->set_friendly_name(old_parameters[i]->get_friendly_name()); + for (size_t i = 0; i < body_ptr()->get_parameters().size(); i++) { + body_ptr()->get_parameters()[i]->set_friendly_name(old_parameters[i]->get_friendly_name()); } - set_output_size(m_body->get_output_size()); + set_output_size(body_ptr()->get_output_size()); for (size_t i = 0; i < get_output_size(); ++i) { - set_output_type(i, m_body->get_output_element_type(i), m_body->get_output_partial_shape(i)); + set_output_type(i, body_ptr()->get_output_element_type(i), body_ptr()->get_output_partial_shape(i)); } } bool snippets::op::Subgraph::visit_attributes(AttributeVisitor& visitor) { + visitor.on_attribute("body", body_ptr()); + visitor.on_attribute("input_descriptions", m_input_descriptions[0]); + visitor.on_attribute("output_descriptions", m_output_descriptions[0]); return true; } @@ -172,11 +184,11 @@ void snippets::op::Subgraph::fill_empty_output_names(const Output& target_ Shape snippets::op::Subgraph::canonicalize(const BlockedShapeVector& outputShapes, const BlockedShapeVector& inputShapes) { INTERNAL_OP_SCOPE(Subgraph); OV_ITT_SCOPED_TASK(ngraph::pass::itt::domains::SnippetsTransform, "Snippets::canonicalize") - NODE_VALIDATION_CHECK(this, inputShapes.size() == m_body->get_parameters().size(), - "Number of parameters for snippet doesn't match passed to generate method: ", inputShapes.size(), " vs ", m_body->get_parameters().size(), "."); + NODE_VALIDATION_CHECK(this, inputShapes.size() == body_ptr()->get_parameters().size(), + "Number of parameters for snippet doesn't match passed to generate method: ", inputShapes.size(), " vs ", body_ptr()->get_parameters().size(), "."); - NODE_VALIDATION_CHECK(this, outputShapes.size() == m_body->get_results().size(), - "number of results for snippet doesn't match passed to generate method: ", outputShapes.size(), " vs ", m_body->get_results().size(), "."); + NODE_VALIDATION_CHECK(this, outputShapes.size() == body_ptr()->get_results().size(), + "number of results for snippet doesn't match passed to generate method: ", outputShapes.size(), " vs ", body_ptr()->get_results().size(), "."); auto getMaxRankBlockedShape = [](const BlockedShapeVector& blockedShapes) -> const BlockedShape& { return *std::max_element(blockedShapes.begin(), blockedShapes.end(), @@ -219,13 +231,13 @@ Shape snippets::op::Subgraph::canonicalize(const BlockedShapeVector& outputShape NODE_VALIDATION_CHECK(this, PartialShape::broadcast_merge_into(tmpPShape, inShape, ::ngraph::op::AutoBroadcastType::NUMPY), "Failed to create broadcastable shapes in snippets canonicalization"); - const auto paramShape = m_body->get_parameters()[i]->get_shape(); - const auto paramType = m_body->get_parameters()[i]->get_element_type(); + const auto paramShape = body_ptr()->get_parameters()[i]->get_shape(); + const auto paramType = body_ptr()->get_parameters()[i]->get_element_type(); if (paramShape.size() != inShape.size() || !equal(paramShape.begin(), paramShape.end(), inShape.begin())) - m_body->replace_parameter(i, std::make_shared(paramType, inShape)); + body_ptr()->replace_parameter(i, std::make_shared(paramType, inShape)); } - m_body->validate_nodes_and_infer_types(); + body_ptr()->validate_nodes_and_infer_types(); auto skipStartEndOnes = [](const Shape& shape) { auto begin = shape.begin(); auto end = shape.end(); @@ -239,7 +251,7 @@ Shape snippets::op::Subgraph::canonicalize(const BlockedShapeVector& outputShape }; // Check that output shapes are broadcastable => can be scheduled - const auto& body_results = m_body->get_results(); + const auto& body_results = body_ptr()->get_results(); PartialShape outPShape = body_results[0]->get_shape(); for (size_t i = 0; i < body_results.size(); i++) { auto shape_i = body_results[i]->get_shape(); @@ -270,7 +282,7 @@ Shape snippets::op::Subgraph::canonicalize(const BlockedShapeVector& outputShape void snippets::op::Subgraph::align_element_types(const BlockedShapeVector& outputShapes, const BlockedShapeVector& inputShapes) { // We should insert Convert before Results to set original output element type if needed - const auto& body_results = m_body->get_results(); + const auto& body_results = body_ptr()->get_results(); for (size_t i = 0; i < outputShapes.size(); i++) { const auto needed_out_type = std::get<2>(outputShapes[i]); if (body_results[i]->get_input_element_type(0) != needed_out_type) { @@ -281,7 +293,7 @@ void snippets::op::Subgraph::align_element_types(const BlockedShapeVector& outpu } // We should change existing element type to original for Parameters if needed - const auto& body_parameters = m_body->get_parameters(); + const auto& body_parameters = body_ptr()->get_parameters(); for (size_t i = 0; i < inputShapes.size(); ++i) { const auto needed_in_type = std::get<2>(inputShapes[i]); if (body_parameters[i]->get_element_type() != needed_in_type) { @@ -300,7 +312,7 @@ void snippets::op::Subgraph::align_element_types(const BlockedShapeVector& outpu manager.register_pass(execution_element_type); manager.register_pass(); } - manager.run_passes(m_body); + manager.run_passes(body_ptr()); } void snippets::op::Subgraph::convert_to_snippet_dialect() { @@ -344,7 +356,7 @@ void snippets::op::Subgraph::convert_to_snippet_dialect() { manager.get_pass_config()-> set_callback(skip_matching_domain); } - manager.run_passes(m_body); + manager.run_passes(body_ptr()); } snippets::Schedule snippets::op::Subgraph::generate(const BlockedShapeVector& output_shapes, @@ -372,19 +384,19 @@ snippets::Schedule snippets::op::Subgraph::generate(ngraph::pass::Manager& opt, OV_ITT_SCOPED_TASK(ngraph::pass::itt::domains::SnippetsTransform, "Snippets::op::generate") NGRAPH_CHECK(m_generator != nullptr, "generate is called while generator is not set"); convert_to_snippet_dialect(); - opt.run_passes(m_body); + opt.run_passes(body_ptr()); // generation flow - snippets::pass::AssignRegisters().run_on_model(m_body); + snippets::pass::AssignRegisters().run_on_model(body_ptr()); // schedule generation should go here and be target agnostic // actual code emission - ngraph::snippets::code ptr = m_generator->generate(m_body, compile_params); + ngraph::snippets::code ptr = m_generator->generate(body_ptr(), compile_params); // check that body doesn't have constants for scheduling std::vector> constants; - for (auto op : m_body->get_ordered_ops()) { + for (auto op : body_ptr()->get_ordered_ops()) { if (auto constant = ov::as_type_ptr(op)) { if (ngraph::shape_size(constant->get_shape()) != 1 && constant->get_shape() != Shape()) { constants.push_back(constant); @@ -400,10 +412,10 @@ void snippets::op::Subgraph::print() const { INTERNAL_OP_SCOPE(Subgraph); remark(13) << "subgraph " << this->get_friendly_name() << " " << this->get_type_name() - << " which contains " << this->get_body()->get_ops().size() << " nodes" << std::endl; + << " which contains " << body_ptr()->get_ops().size() << " nodes" << std::endl; int qqq = 0; - for (auto op : this->get_body()->get_ordered_ops()) { + for (auto op : body_ptr()->get_ordered_ops()) { remark(13) << "op " << qqq++ << " " << op->get_friendly_name() << " (" << op->get_type_name() << ") " << op << std::endl; } @@ -434,7 +446,7 @@ void snippets::op::Subgraph::print_statistics(bool verbose) { } if (auto subgraph = ngraph::as_type_ptr(n)) { - for (auto op : subgraph->get_body()->get_ordered_ops()) { + for (auto op : subgraph->body_ptr()->get_ordered_ops()) { if (ngraph::as_type_ptr(op)) { total += op->output(0).get_tensor().size(); } @@ -444,9 +456,9 @@ void snippets::op::Subgraph::print_statistics(bool verbose) { return total; }; - auto getModelInventory = [getNodeInventory](std::shared_ptr f) -> size_t { + auto getModelInventory = [getNodeInventory](const ov::Model & f) -> size_t { size_t total = 0; - for (auto op : f->get_ordered_ops()) { + for (auto op : f.get_ordered_ops()) { // Results and parameters are artificially introduced, // while Constants are already considered if they are inputs of other operation // this should lead to 1:1 inventory for single node operations @@ -459,24 +471,22 @@ void snippets::op::Subgraph::print_statistics(bool verbose) { return total; }; - auto countConstants = [](std::shared_ptr f) -> size_t { + auto countConstants = [](const ov::Model & f) -> size_t { size_t count = 0; - for (auto op : f->get_ordered_ops()) { + for (auto op : f.get_ordered_ops()) { count += !!ngraph::as_type_ptr(op) ? 1 : 0; } return count; }; - auto body = this->get_body(); - - std::cout << this->get_friendly_name() + std::cout << get_friendly_name() << ";" << this - << ";" << body->get_ops().size() - << ";" << body->get_parameters().size() - << ";" << body->get_results().size() - << ";" << countConstants(body) - << ";" << getModelInventory(body) - << ";" << getNodeInventory(this->shared_from_this()) << std::endl; + << ";" << body_ptr()->get_ops().size() + << ";" << body_ptr()->get_parameters().size() + << ";" << body_ptr()->get_results().size() + << ";" << countConstants(body()) + << ";" << getModelInventory(body()) + << ";" << getNodeInventory(shared_from_this()) << std::endl; if (verbose) { this->print(); @@ -486,7 +496,7 @@ void snippets::op::Subgraph::print_statistics(bool verbose) { void snippets::op::Subgraph::serialize() const { std::stringstream xmlFile, binFile; ov::pass::Serialize serializer(xmlFile, xmlFile, ov::pass::Serialize::Version::IR_V10); - serializer.run_on_model(get_body()); + serializer.run_on_model(body_ptr()); auto m_constants = binFile.str(); auto m_model = xmlFile.str(); std::cout << m_model << std::endl; diff --git a/src/common/snippets/src/pass/collapse_subgraph.cpp b/src/common/snippets/src/pass/collapse_subgraph.cpp index 57c737f992b89e..0f3dc5e8d808cd 100644 --- a/src/common/snippets/src/pass/collapse_subgraph.cpp +++ b/src/common/snippets/src/pass/collapse_subgraph.cpp @@ -162,7 +162,7 @@ auto update_out_tensor_name(std::shared_ptr &sub for (unsigned int i = 0; i < subgraph->get_output_size() && not_set; i++) { for (const auto &in : subgraph->get_output_target_inputs(i)) { if (ov::is_type(in.get_node())) { - const auto& body_result = subgraph->get_body()->get_output_op(i); + const auto& body_result = subgraph->body_ptr()->get_output_op(i); const auto& body_result_input = body_result->get_input_source_output(0); op::Subgraph::fill_empty_output_names(subgraph->output(i), body_result_input); not_set = false; @@ -318,8 +318,8 @@ TokenizeSnippets::TokenizeSnippets() { for (const auto &input_node : ngraph::as_node_vector(input_values)) { if (auto subgraph = ov::as_type_ptr(input_node)) { if (!clones.count(input_node)) { - auto f = ov::clone_model(*subgraph->get_body().get()); - f->set_friendly_name(subgraph->get_body()->get_friendly_name()); + auto f = ov::clone_model(subgraph->body()); + f->set_friendly_name(subgraph->body_ptr()->get_friendly_name()); clones[input_node] = f; } } @@ -332,6 +332,7 @@ TokenizeSnippets::TokenizeSnippets() { << " outputs" << std::endl; return true; } + std::string subgraph_name = node->get_friendly_name(); std::string fusedNames{}; size_t num_result_children = 0; std::pair currentTopoBounds {-1, LONG_MAX}; @@ -347,7 +348,12 @@ TokenizeSnippets::TokenizeSnippets() { fusedNames += getFusedNames(subgraph); - num_result_children += has_result_child(subgraph); + if (has_result_child(subgraph)) { + // we set input subgraph name to the current subgraph + // in order to save node friendly name before result + subgraph_name = subgraph->get_friendly_name(); + num_result_children += 1; + } auto f = clones[input_node]; const auto& input_body_parameters = f->get_parameters(); // Todo: @@ -546,10 +552,10 @@ TokenizeSnippets::TokenizeSnippets() { for (size_t i = 0; i < body->get_parameters().size(); i++) { body->get_parameters()[i]->set_friendly_name(body_parameters[i]->get_friendly_name()); } - auto subgraph = op::build_subgraph(node, external_inputs, body); - auto act_body = subgraph->get_body(); - for (size_t i = 0; i < act_body->get_parameters().size(); i++) { - act_body->get_parameters()[i]->set_friendly_name(body_parameters[i]->get_friendly_name()); + auto subgraph = op::build_subgraph(node, external_inputs, body, subgraph_name); + const auto & act_body = subgraph->body(); + for (size_t i = 0; i < act_body.get_parameters().size(); i++) { + act_body.get_parameters()[i]->set_friendly_name(body_parameters[i]->get_friendly_name()); } if (subgraph->get_output_size() != subgraph_result_inputs.size()) { @@ -568,9 +574,9 @@ TokenizeSnippets::TokenizeSnippets() { subgraph->validate_and_infer_types(); - auto act_body1 = subgraph->get_body(); - for (size_t i = 0; i < act_body1->get_parameters().size(); i++) { - act_body1->get_parameters()[i]->set_friendly_name(body_parameters[i]->get_friendly_name()); + const auto & act_body1 = subgraph->body(); + for (size_t i = 0; i < act_body1.get_parameters().size(); i++) { + act_body1.get_parameters()[i]->set_friendly_name(body_parameters[i]->get_friendly_name()); } subgraph->get_rt_info()["originalLayersNames"] = fusedNames; subgraph->set_non_scalar_constants_count(hidden_non_scalar_constant_count); @@ -579,7 +585,7 @@ TokenizeSnippets::TokenizeSnippets() { << subgraph->get_friendly_name() << " with " << subgraph->inputs().size() << " inputs and " << subgraph->outputs().size() - << " outputs and " << subgraph->get_body()->get_ops().size() << " ops total\n"; + << " outputs and " << subgraph->body_ptr()->get_ops().size() << " ops total\n"; return true; }; diff --git a/src/common/snippets/src/pass/common_optimizations.cpp b/src/common/snippets/src/pass/common_optimizations.cpp index c81ec235bb7ea8..795778e802aa7c 100644 --- a/src/common/snippets/src/pass/common_optimizations.cpp +++ b/src/common/snippets/src/pass/common_optimizations.cpp @@ -23,7 +23,7 @@ namespace pass { // Move up Constants which aren't scalars from body to Subgraph and replace them with Parameters inside body void ConvertConstantsToParameters(const std::shared_ptr& subgraph) { OV_ITT_SCOPED_TASK(ngraph::pass::itt::domains::SnippetsTransform, "Snippets::ConvertConstantsToParameters"); - auto body = subgraph->get_body(); + auto body = subgraph->body_ptr(); ParameterVector new_parameters; OutputVector new_external_inputs = subgraph->input_values(); @@ -59,7 +59,7 @@ CommonOptimizations::CommonOptimizations() { return false; } - auto body = subgraph->get_body(); + auto body = subgraph->body_ptr(); const auto is_quantized = subgraph->is_quantized(); // Firsly we should transform all original Converts inside body to ConvertTruncation to save original behavior. diff --git a/src/common/snippets/tests/include/lowering_utils.hpp b/src/common/snippets/tests/include/lowering_utils.hpp index 5af4af2a32b099..8e86d321e7e733 100644 --- a/src/common/snippets/tests/include/lowering_utils.hpp +++ b/src/common/snippets/tests/include/lowering_utils.hpp @@ -38,6 +38,8 @@ class DummyGenerator : public ngraph::snippets::Generator { }; class LoweringTests : public TransformationTestsF { +public: + LoweringTests(); protected: static std::shared_ptr getSubgraph(const std::shared_ptr& f); static std::shared_ptr getLoweredSubgraph(const std::shared_ptr& f); diff --git a/src/common/snippets/tests/src/lowering_utils.cpp b/src/common/snippets/tests/src/lowering_utils.cpp index 4aab86d5d7c07c..de46de861cae52 100644 --- a/src/common/snippets/tests/src/lowering_utils.cpp +++ b/src/common/snippets/tests/src/lowering_utils.cpp @@ -34,6 +34,13 @@ DummyTargetMachine::DummyTargetMachine() { jitters[ngraph::snippets::op::TileScheduler::get_type_info_static()] = dummy_functor; } +LoweringTests::LoweringTests() : TransformationTestsF() { + // external subgraph input shape and internal parameters shapes + // might differ due to the blocked layout + // so input & output descriptors shouldn't be checked + comparator.disable(FunctionsComparator::CmpValues::SUBGRAPH_DESCRIPTORS); +} + std::shared_ptr LoweringTests::getSubgraph(const std::shared_ptr& f) { std::shared_ptr subgraph; for (const auto &op : f->get_ops()) { diff --git a/src/common/snippets/tests/src/pass/collapse_subgraph.cpp b/src/common/snippets/tests/src/pass/collapse_subgraph.cpp index 3e578119b25d19..f5be10838d6c6c 100644 --- a/src/common/snippets/tests/src/pass/collapse_subgraph.cpp +++ b/src/common/snippets/tests/src/pass/collapse_subgraph.cpp @@ -77,6 +77,14 @@ TEST_F(CollapseSubgraphTests, smoke_Snippets_ConvertPartialInputsAndResults) { run(); } +TEST_F(CollapseSubgraphTests, smoke_Snippets_EltwiseTwoResultsFunction) { + const auto &f = EltwiseTwoResultsFunction(std::vector{{2, 5}, {2, 1}}); + function = f.getOriginal(); + function_ref = f.getReference(); + comparator.enable(FunctionsComparator::CmpValues::NAMES); + run(); +} + } // namespace snippets } // namespace test } // namespace ov \ No newline at end of file diff --git a/src/common/snippets/tests/src/pass/fake_quantize_decomposition_test.cpp b/src/common/snippets/tests/src/pass/fake_quantize_decomposition_test.cpp index 9c12987f5eef12..885e5c2304a7b6 100644 --- a/src/common/snippets/tests/src/pass/fake_quantize_decomposition_test.cpp +++ b/src/common/snippets/tests/src/pass/fake_quantize_decomposition_test.cpp @@ -24,10 +24,10 @@ class FakeQuantizeDecompositionTest : public TransformationTestsF { TransformationTestsF::TearDown(); auto subgraph = FunctionHelper::getSubgraph(function); - auto body = subgraph == nullptr ? nullptr : std::dynamic_pointer_cast(subgraph)->get_body(); + auto body = subgraph == nullptr ? nullptr : std::dynamic_pointer_cast(subgraph)->body_ptr(); auto subgraph_ref = FunctionHelper::getSubgraph(function_ref); - auto body_ref = subgraph_ref == nullptr ? nullptr : std::dynamic_pointer_cast(subgraph_ref)->get_body(); + auto body_ref = subgraph_ref == nullptr ? nullptr : std::dynamic_pointer_cast(subgraph_ref)->body_ptr(); auto res = comparator.compare(body, body_ref); ASSERT_TRUE(res.valid) << res.message; diff --git a/src/common/snippets/tests/src/pass/insert_load_store.cpp b/src/common/snippets/tests/src/pass/insert_load_store.cpp index 9913225763b729..1a2fa5a75fc652 100644 --- a/src/common/snippets/tests/src/pass/insert_load_store.cpp +++ b/src/common/snippets/tests/src/pass/insert_load_store.cpp @@ -35,7 +35,7 @@ void InsertLoadStoreTests::SetUp() { TEST_P(InsertLoadStoreTests, ThreeInputsEltwise) { auto subgraph = getLoweredSubgraph(snippets_function->getOriginal()); - function = subgraph->get_body(); + function = subgraph->body_ptr(); function_ref = snippets_function->getLowered(); } diff --git a/src/common/snippets/tests/src/pass/insert_movebroadcast.cpp b/src/common/snippets/tests/src/pass/insert_movebroadcast.cpp index 9be1c569a81b26..f97b8019239fcc 100644 --- a/src/common/snippets/tests/src/pass/insert_movebroadcast.cpp +++ b/src/common/snippets/tests/src/pass/insert_movebroadcast.cpp @@ -33,7 +33,7 @@ void InsertMoveBroadcastTests::SetUp() { TEST_P(InsertMoveBroadcastTests, AddBroadcast) { auto subgraph = getLoweredSubgraph(snippets_function->getOriginal()); - function = subgraph->get_body(); + function = subgraph->body_ptr(); function_ref = snippets_function->getLowered(); } diff --git a/src/core/include/openvino/op/util/multi_subgraph_base.hpp b/src/core/include/openvino/op/util/multi_subgraph_base.hpp index 10f21cec0cf27e..6724f7576805f4 100644 --- a/src/core/include/openvino/op/util/multi_subgraph_base.hpp +++ b/src/core/include/openvino/op/util/multi_subgraph_base.hpp @@ -295,6 +295,10 @@ class OPENVINO_API MultiSubGraphOp : public Op { return m_output_descriptions.size(); } + bool get_transformations_allowed() const { + return m_transformations_allowed; + } + MultiSubGraphOp(const MultiSubGraphOp&) = delete; MultiSubGraphOp(MultiSubGraphOp&&) = default; @@ -313,6 +317,7 @@ class OPENVINO_API MultiSubGraphOp : public Op { std::vector> m_bodies; std::vector m_input_descriptions; std::vector m_output_descriptions; + bool m_transformations_allowed = true; }; } // namespace util } // namespace op diff --git a/src/core/src/pass/graph_rewrite.cpp b/src/core/src/pass/graph_rewrite.cpp index 011990f0cbadee..1bad852ac660d4 100644 --- a/src/core/src/pass/graph_rewrite.cpp +++ b/src/core/src/pass/graph_rewrite.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -175,10 +176,12 @@ bool ov::pass::GraphRewrite::apply_matcher_passes(std::shared_ptr f, // Recursive apply Matchers for sub-graph based nodes if (auto sub_graph_node = std::dynamic_pointer_cast(node)) { - size_t sub_graphs_num = sub_graph_node->get_internal_subgraphs_size(); - for (size_t sub_graph_ind = 0; sub_graph_ind < sub_graphs_num; ++sub_graph_ind) { - auto sub_graph = sub_graph_node->get_function(static_cast(sub_graph_ind)); - run_on_model(sub_graph); + if (sub_graph_node->get_transformations_allowed()) { + size_t sub_graphs_num = sub_graph_node->get_internal_subgraphs_size(); + for (size_t sub_graph_ind = 0; sub_graph_ind < sub_graphs_num; ++sub_graph_ind) { + auto sub_graph = sub_graph_node->get_function(sub_graph_ind); + run_on_model(sub_graph); + } } } // Temporary keep this GraphRewrite property for backward compatibility diff --git a/src/inference/include/ie/ie_input_info.hpp b/src/inference/include/ie/ie_input_info.hpp index 8cee7bfc5925cc..7da44d105bc6cc 100644 --- a/src/inference/include/ie/ie_input_info.hpp +++ b/src/inference/include/ie/ie_input_info.hpp @@ -107,6 +107,18 @@ class InputInfo { return _inputData->getName(); } + /** + * @brief Changes the name of the input data provided by the user. + * + * @param newName A new name of the input data to set + */ + void setName(const std::string& newName) { + if (!_inputData) { + IE_THROW() << "Data is empty!"; + } + _inputData->setName(newName); + } + /** * @brief Gets the input data * diff --git a/src/plugins/intel_cpu/src/extension.cpp b/src/plugins/intel_cpu/src/extension.cpp index fcdda93ddbdb41..5dd012ce137082 100644 --- a/src/plugins/intel_cpu/src/extension.cpp +++ b/src/plugins/intel_cpu/src/extension.cpp @@ -20,6 +20,8 @@ #include #include +#include + #include namespace ov { @@ -125,10 +127,33 @@ std::map Extension::getOpSets() { return opset; }; + auto snippets_opset = []() { + ngraph::OpSet opset; + +#define NGRAPH_OP(NAME, NAMESPACE) opset.insert(); + NGRAPH_OP(BroadcastLoad, ngraph::snippets::op) + NGRAPH_OP(BroadcastMove, ngraph::snippets::op) + NGRAPH_OP(ConvertSaturation, ngraph::snippets::op) + NGRAPH_OP(ConvertTruncation, ngraph::snippets::op) + NGRAPH_OP(Kernel, ngraph::snippets::op) + NGRAPH_OP(Load, ngraph::snippets::op) + NGRAPH_OP(Nop, ngraph::snippets::op) + NGRAPH_OP(PowerStatic, ngraph::snippets::op) + NGRAPH_OP(Scalar, ngraph::snippets::op) + NGRAPH_OP(Store, ngraph::snippets::op) + NGRAPH_OP(Subgraph, ngraph::snippets::op) + NGRAPH_OP(Tile, ngraph::snippets::op) + NGRAPH_OP(TileScheduler, ngraph::snippets::op) +#undef NGRAPH_OP + + return opset; + }; + static std::map opsets = { { "cpu_plugin_opset", cpu_plugin_opset() }, { "type_relaxed_opset", type_relaxed_opset() }, { "ie_internal_opset", ie_internal_opset() }, + { "SnippetsOpset", snippets_opset() }, }; return opsets; diff --git a/src/plugins/intel_cpu/src/nodes/subgraph.cpp b/src/plugins/intel_cpu/src/nodes/subgraph.cpp index 503d989492f5e3..a9fb77b27ce117 100644 --- a/src/plugins/intel_cpu/src/nodes/subgraph.cpp +++ b/src/plugins/intel_cpu/src/nodes/subgraph.cpp @@ -58,10 +58,10 @@ void Snippet::copy_snippet() { if (!sharedMutex) { IE_THROW() << "Subgraph doesn't have shared mutex"; } - std::lock_guard lock(*sharedMutex.get()); - new_body = ov::clone_model(*original_snippet->get_body().get()); + std::lock_guard lock(*sharedMutex); + new_body = ov::clone_model(*original_snippet->body_ptr()); } else { - new_body = ov::clone_model(*original_snippet->get_body().get()); + new_body = ov::clone_model(*original_snippet->body_ptr()); } snippet = std::make_shared(subgraph_node_inputs, new_body); ngraph::copy_runtime_info(original_snippet, snippet); @@ -320,13 +320,13 @@ void Snippet::define_schedule() { // Canonicalization broadcasts inputs and outputs to max input rank, which can be smaller than tensorRank // prepend to enable 6D scheduler exec_domain = prependWithOnes(exec_domain); - const auto &body = snippet->get_body(); - for (const auto& p : body->get_parameters()) { + const auto &body = snippet->body(); + for (const auto& p : body.get_parameters()) { dims_in.emplace_back(prependWithOnes(p->get_shape())); } - for (size_t i = 0; i < body->get_output_size(); i++) { - dims_out.push_back(prependWithOnes(body->get_output_shape(i))); + for (size_t i = 0; i < body.get_output_size(); i++) { + dims_out.push_back(prependWithOnes(body.get_output_shape(i))); } const auto config = getSelectedPrimitiveDescriptor()->getConfig(); diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index cc438cf34ae3d2..b68a8c77ae3984 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -867,13 +867,10 @@ Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std } else { enableBF16 = engConfig.enforceBF16 && dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core); } - const auto& modelCacheProp = config.find(InferenceEngine::PluginConfigParams::KEY_CACHE_DIR); - const bool enableModelCache = (modelCacheProp != config.end() && !modelCacheProp->second.empty()) - || !engConfig.cache_dir.empty(); const auto& dynamicBatchProp = config.find(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED); const bool enableDynamicBatch = (dynamicBatchProp != config.end() && dynamicBatchProp->second == PluginConfigParams::YES) || engConfig.enableDynamicBatch; - const bool enableSnippets = !(enableModelCache || enableDynamicBatch); + const bool enableSnippets = !enableDynamicBatch; auto nGraphFunc = clonedNetwork.getFunction(); DEBUG_LOG(PrintableModel(*nGraphFunc, "org_")); @@ -1122,7 +1119,7 @@ QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const std::ma const auto& lptProp = config.find(InferenceEngine::PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE); const bool enableLPT = (lptProp != config.end() && lptProp->second == PluginConfigParams::YES) /* enabled in the orig_config*/ || Config::LPTransformsMode::On == engConfig.lpTransformsMode /* or already enabled */; - const bool enableSnippets = !(conf.cache_dir.empty() || conf.enableDynamicBatch); + const bool enableSnippets = !conf.enableDynamicBatch; auto model = network.getFunction(); if (model == nullptr) { diff --git a/src/plugins/intel_cpu/src/serialize.cpp b/src/plugins/intel_cpu/src/serialize.cpp index 27e4e9a5451740..f9013b90750c5c 100644 --- a/src/plugins/intel_cpu/src/serialize.cpp +++ b/src/plugins/intel_cpu/src/serialize.cpp @@ -41,28 +41,22 @@ namespace { IE_THROW(NetworkNotRead) << "Unknown layout with name '" << name << "'"; } - template - void setPrecisionsAndLayouts( - pugi::xml_object_range && nodes, - T && info) { - for (auto n : nodes) { - auto name_attr = n.attribute("name"); - auto precision_attr = n.attribute("precision"); - auto layout_attr = n.attribute("layout"); - - if (!name_attr - || !precision_attr - || !layout_attr) { + template + void setInfo(pugi::xml_object_range&& nodes, T&& info) { + auto nodes_it = nodes.begin(); + auto info_iter = info.begin(); + for (; nodes_it != nodes.end(); ++nodes_it, ++info_iter) { + auto name_attr = nodes_it->attribute("name"); + auto precision_attr = nodes_it->attribute("precision"); + auto layout_attr = nodes_it->attribute("layout"); + + if (!name_attr || !precision_attr || !layout_attr || info_iter == info.end()) { IE_THROW(NetworkNotRead) << "The inputs/outputs information is invalid."; } - auto it = info.find(name_attr.value()); - if (it == info.end()) { - IE_THROW(NetworkNotRead) << "The input/output with name '" << name_attr.value() << "' not found"; - } - - it->second->setPrecision(Precision::FromStr(precision_attr.value())); - it->second->setLayout(layout_from_string(layout_attr.value())); + info_iter->second->setName(name_attr.value()); + info_iter->second->setPrecision(Precision::FromStr(precision_attr.value())); + info_iter->second->setLayout(layout_from_string(layout_attr.value())); } } }; // namespace @@ -170,8 +164,8 @@ void CNNNetworkDeserializer::operator >> (InferenceEngine::CNNNetwork & network) pugi::xml_node inputs = root.child("inputs"); pugi::xml_node outputs = root.child("outputs"); - setPrecisionsAndLayouts(inputs.children("in"), network.getInputsInfo()); - setPrecisionsAndLayouts(outputs.children("out"), network.getOutputsInfo()); + setInfo(inputs.children("in"), network.getInputsInfo()); + setInfo(outputs.children("out"), network.getOutputsInfo()); } } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt index ee175baa10deaa..de6587bde99b1e 100644 --- a/src/plugins/intel_cpu/tests/functional/CMakeLists.txt +++ b/src/plugins/intel_cpu/tests/functional/CMakeLists.txt @@ -16,7 +16,7 @@ target_link_libraries(cpuSpecificRtInfo PRIVATE openvino::runtime) set(INCLUDES ${CMAKE_CURRENT_SOURCE_DIR} $/src) set(DEPENDENCIES openvino_intel_cpu_plugin) -set(LINK_LIBRARIES funcSharedTests cpuSpecificRtInfo) +set(LINK_LIBRARIES funcSharedTests cpuSpecificRtInfo inference_engine_snippets) if (ENABLE_OV_ONNX_FRONTEND) list(APPEND DEFINES TEST_MODELS="${TEST_MODEL_ZOO}") else() diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/subgraph_serialize.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/subgraph_serialize.cpp new file mode 100644 index 00000000000000..298fe8c653d519 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/subgraph_serialize.cpp @@ -0,0 +1,149 @@ +// Copyright (C) 2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/openvino.hpp" +#include "openvino/opsets/opset9.hpp" +#include "test_utils/cpu_test_utils.hpp" +#include "ngraph_functions/builders.hpp" +#include "test_utils/convolution_params.hpp" +#include "snippets/op/subgraph.hpp" + +using namespace CPUTestUtils; +using namespace ov::opset9; + +namespace SubgraphTestsDefinitions { + +class SubgraphSnippetSerializationTest : public ::testing::Test, public CPUTestsBase {}; + +TEST_F(SubgraphSnippetSerializationTest, SerializeSubgraph) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + auto model = ([] () -> std::shared_ptr { + auto shape = ov::Shape({2, 2}); + auto input0 = std::make_shared(ov::element::f32, shape); + auto input1 = std::make_shared(ov::element::f32, shape); + auto ininput0 = std::make_shared(ov::element::f32, shape); + auto ininput1 = std::make_shared(ov::element::f32, shape); + auto add = std::make_shared(ininput0, ininput1); + auto subgraph_body = std::make_shared(ov::NodeVector{add}, ov::ParameterVector{ininput0, ininput1}); + auto subgraph = std::make_shared(ov::NodeVector{input0, input1}, ov::clone_model(*subgraph_body.get())); + return std::make_shared(ov::NodeVector{subgraph}, ov::ParameterVector{input0, input1}); + })(); + ov::Core core; + ov::CompiledModel compiled_model = core.compile_model(model, "CPU"); + std::stringstream stream; + compiled_model.export_model(stream); + ov::CompiledModel imported_compiled_model = core.import_model(stream, "CPU"); + float data[] = {1.f, 1.f, 1.f, 1.f}; + ov::Tensor input_data1{ov::element::f32, ov::Shape({2, 2}), data}; + ov::Tensor input_data2{ov::element::f32, ov::Shape({2, 2}), data}; + ov::InferRequest infer_request = compiled_model.create_infer_request(); + infer_request.set_input_tensor(0, input_data1); + infer_request.set_input_tensor(1, input_data2); + infer_request.infer(); + auto out = infer_request.get_output_tensor(0); + float* out_p = static_cast(out.data(ov::element::Type_t::f32)); + auto out_val = std::vector(out_p, out_p + out.get_size()); + ov::InferRequest imported_infer_request = imported_compiled_model.create_infer_request(); + imported_infer_request.set_input_tensor(0, input_data1); + imported_infer_request.set_input_tensor(1, input_data2); + imported_infer_request.infer(); + auto imported_out = imported_infer_request.get_output_tensor(0); + float* imported_out_p = static_cast(imported_out.data(ov::element::Type_t::f32)); + auto imported_out_val = std::vector(imported_out_p, imported_out_p + imported_out.get_size()); + ASSERT_EQ(out_val, imported_out_val); + + auto compiled_model_runtime = ov::clone_model(*compiled_model.get_runtime_model()); + auto imported_compiled_model_runtime = ov::clone_model(*imported_compiled_model.get_runtime_model()); + const auto fc = FunctionsComparator::with_default() + .enable(FunctionsComparator::CONST_VALUES) + .enable(FunctionsComparator::ATTRIBUTES); + const auto results = fc.compare(compiled_model_runtime, imported_compiled_model_runtime); + + ASSERT_TRUE(results.valid) << results.message; +} + +TEST_F(SubgraphSnippetSerializationTest, SerializeSubgraphWithScalarConst) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + auto model = ([] () -> std::shared_ptr { + auto shape = ov::Shape({1}); + auto input = std::make_shared(ov::element::f32, shape); + auto internal_input = std::make_shared(ov::element::f32, shape); + auto constant = std::make_shared(ov::element::f32, shape, 2); + auto internal_constant = std::make_shared(ov::element::f32, shape, 2); + auto add = std::make_shared(input, constant); + auto internal_add = std::make_shared(internal_input, internal_constant); + auto subgraph_body = std::make_shared(ov::NodeVector{internal_add}, ov::ParameterVector{internal_input}); + auto subgraph = std::make_shared(ov::NodeVector{add}, ov::clone_model(*subgraph_body.get())); + return std::make_shared(ov::NodeVector{subgraph}, ov::ParameterVector{input}); + })(); + ov::Core core; + ov::CompiledModel compiled_model = core.compile_model(model, "CPU"); + std::stringstream stream; + compiled_model.export_model(stream); + float data[] = {1.f}; + ov::Tensor input_data1{ov::element::f32, ov::Shape({1}), data}; + ov::CompiledModel imported_compiled_model = core.import_model(stream, "CPU"); + ov::InferRequest infer_request = compiled_model.create_infer_request(); + infer_request.set_input_tensor(0, input_data1); + infer_request.infer(); + auto out = infer_request.get_output_tensor(0); + float* out_p = static_cast(out.data(ov::element::Type_t::f32)); + auto out_val = std::vector(out_p, out_p + out.get_size()); + ov::InferRequest imported_infer_request = imported_compiled_model.create_infer_request(); + imported_infer_request.set_input_tensor(0, input_data1); + imported_infer_request.infer(); + auto imported_out = imported_infer_request.get_output_tensor(0); + float* imported_out_p = static_cast(imported_out.data(ov::element::Type_t::f32)); + auto imported_out_val = std::vector(imported_out_p, imported_out_p + imported_out.get_size()); + ASSERT_EQ(out_val, imported_out_val); + + auto compiled_model_runtime = ov::clone_model(*compiled_model.get_runtime_model()); + auto imported_compiled_model_runtime = ov::clone_model(*imported_compiled_model.get_runtime_model()); + const auto fc = FunctionsComparator::with_default() + .enable(FunctionsComparator::CONST_VALUES) + .enable(FunctionsComparator::ATTRIBUTES); + const auto results = fc.compare(compiled_model_runtime, imported_compiled_model_runtime); + + ASSERT_TRUE(results.valid) << results.message; +} + +TEST_F(SubgraphSnippetSerializationTest, SerializeSubgraphWithResultAs1stOutput) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + auto precision = ov::element::f32; + auto shape = ov::Shape{1, 3, 16, 16}; + + auto model = [&] () -> std::shared_ptr { + auto input1 = std::make_shared(precision, shape); + auto input2 = std::make_shared(precision, shape); + auto sinh1 = std::make_shared(input1); + auto sinh2 = std::make_shared(input2); + + auto relu = std::make_shared(sinh2); + auto sinh_out = std::make_shared(relu); + auto result1 = std::make_shared(sinh_out); + + auto add = std::make_shared(sinh1, relu); + auto result2 = std::make_shared(add); + + ov::ParameterVector params{input1, input2}; + ov::ResultVector results{result1, result2}; + return std::make_shared(results, params); + }(); + ov::Core core; + ov::CompiledModel compiled_model = core.compile_model(model, "CPU"); + std::stringstream stream; + compiled_model.export_model(stream); + ov::CompiledModel imported_compiled_model = core.import_model(stream, "CPU"); + + auto compiled_model_runtime = ov::clone_model(*compiled_model.get_runtime_model()); + auto imported_compiled_model_runtime = ov::clone_model(*imported_compiled_model.get_runtime_model()); + const auto fc = FunctionsComparator::with_default() + .enable(FunctionsComparator::CONST_VALUES) + .enable(FunctionsComparator::ATTRIBUTES); + const auto results = fc.compare(compiled_model_runtime, imported_compiled_model_runtime); + + ASSERT_TRUE(results.valid) << results.message; +} +} // namespace SubgraphTestsDefinitions diff --git a/src/plugins/intel_cpu/tests/unit/snippets/fake_quantize_tokenization_test.cpp b/src/plugins/intel_cpu/tests/unit/snippets/fake_quantize_tokenization_test.cpp index 4746a69d82f66b..e6f83cad753b0e 100644 --- a/src/plugins/intel_cpu/tests/unit/snippets/fake_quantize_tokenization_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/snippets/fake_quantize_tokenization_test.cpp @@ -31,10 +31,10 @@ class FakeQuantizeTokenizationTest : public TransformationTestsF { TransformationTestsF::TearDown(); auto subgraph = FunctionHelper::getSubgraph(function); - auto body = subgraph == nullptr ? nullptr : std::dynamic_pointer_cast(subgraph)->get_body(); + auto body = subgraph == nullptr ? nullptr : std::dynamic_pointer_cast(subgraph)->body_ptr(); auto subgraph_ref = FunctionHelper::getSubgraph(function_ref); - auto body_ref = subgraph_ref == nullptr ? nullptr : std::dynamic_pointer_cast(subgraph_ref)->get_body(); + auto body_ref = subgraph_ref == nullptr ? nullptr : std::dynamic_pointer_cast(subgraph_ref)->body_ptr(); if ((body != nullptr) && (body_ref != nullptr)) { auto res = comparator.compare(body, body_ref); diff --git a/src/tests/ie_test_utils/common_test_utils/graph_comparator.cpp b/src/tests/ie_test_utils/common_test_utils/graph_comparator.cpp index 93bb61bcc20302..3327150cbe1281 100644 --- a/src/tests/ie_test_utils/common_test_utils/graph_comparator.cpp +++ b/src/tests/ie_test_utils/common_test_utils/graph_comparator.cpp @@ -451,7 +451,7 @@ class CompareSubGraphs { using Result = Comparator::Result; using SubGraphOp = ov::op::util::SubGraphOp; - Result compare(SubGraphOp* sub_lhs, SubGraphOp* sub_rhs) { + Result compare(SubGraphOp* sub_lhs, SubGraphOp* sub_rhs, bool compare_in_outs) { const auto lhs_it_no = get_num_iterations(sub_lhs); const auto rhs_it_no = get_num_iterations(sub_rhs); if (lhs_it_no != rhs_it_no) { @@ -460,14 +460,16 @@ class CompareSubGraphs { not_valid_input_output = lhs_it_no; - const auto result_for_inputs = compare_inputs(sub_lhs, sub_rhs); - if (!result_for_inputs.valid) { - return result_for_inputs; - } + if (compare_in_outs) { + const auto& result_for_inputs = compare_inputs(sub_lhs, sub_rhs); + if (!result_for_inputs.valid) { + return result_for_inputs; + } - const auto result_for_outputs = compare_outputs(sub_lhs, sub_rhs); - if (!result_for_outputs.valid) { - return result_for_outputs; + const auto& result_for_outputs = compare_outputs(sub_lhs, sub_rhs); + if (!result_for_outputs.valid) { + return result_for_outputs; + } } return compare_backedges(sub_lhs, sub_rhs); @@ -559,8 +561,10 @@ class CompareSubGraphs { } // namespace detail -Comparator::Result compare_io(ov::op::util::SubGraphOp* sub_lhs, ov::op::util::SubGraphOp* sub_rhs) { - return detail::CompareSubGraphs{}.compare(sub_lhs, sub_rhs); +Comparator::Result compare_io(ov::op::util::SubGraphOp* sub_lhs, + ov::op::util::SubGraphOp* sub_rhs, + bool compare_in_outs) { + return detail::CompareSubGraphs{}.compare(sub_lhs, sub_rhs, compare_in_outs); } } // namespace subgraph } // namespace @@ -698,7 +702,7 @@ Comparator::Result Comparator::compare(ngraph::Node* node1, ngraph::Node* node2, const bool subgraph_nodes = subgraph1 && subgraph2; if (subgraph_nodes) { - const auto result = subgraph::compare_io(subgraph1, subgraph2); + const auto result = subgraph::compare_io(subgraph1, subgraph2, should_compare(CmpValues::SUBGRAPH_DESCRIPTORS)); if (!result.valid) { return result; } diff --git a/src/tests/ie_test_utils/common_test_utils/graph_comparator.hpp b/src/tests/ie_test_utils/common_test_utils/graph_comparator.hpp index c96753ba65a693..81caa08735265f 100644 --- a/src/tests/ie_test_utils/common_test_utils/graph_comparator.hpp +++ b/src/tests/ie_test_utils/common_test_utils/graph_comparator.hpp @@ -27,7 +27,8 @@ class FunctionsComparator { PRECISIONS = 1 << 4, ATTRIBUTES = 1 << 5, TENSOR_NAMES = 1 << 6, - ACCURACY = 1 << 7 + ACCURACY = 1 << 7, + SUBGRAPH_DESCRIPTORS = 1 << 8 }; struct Result { @@ -50,6 +51,7 @@ class FunctionsComparator { fc.enable(NODES); fc.enable(PRECISIONS); fc.enable(TENSOR_NAMES); + fc.enable(SUBGRAPH_DESCRIPTORS); return fc; } @@ -155,11 +157,16 @@ class UniqueNamesHolder { // initialize function with unique friendly and tensor names for (auto node : f->get_ordered_ops()) { const auto& node_name = generate_friendly_name(); - node->set_friendly_name(node_name); + // this expression means that user didn't set friendly name and it was generated automatically + if (node->get_friendly_name() == node->get_name()) { + node->set_friendly_name(node_name); + } for (auto output : node->outputs()) { const auto& tensor_name = generate_tensor_name(); - output.set_names({tensor_name}); + if (output.get_names().empty()) { + output.set_names({tensor_name}); + } } } diff --git a/src/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp b/src/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp index 3e7fc449a9258d..0a8e5f493864df 100644 --- a/src/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp +++ b/src/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp @@ -12,9 +12,11 @@ TransformationTestsF::TransformationTestsF() comparator.enable(FunctionsComparator::CmpValues::NODES); comparator.enable(FunctionsComparator::CmpValues::PRECISIONS); comparator.enable(FunctionsComparator::CmpValues::RUNTIME_KEYS); - // TODO: enable attributes and constant values comparison by default XXX-68694 + comparator.enable(FunctionsComparator::CmpValues::SUBGRAPH_DESCRIPTORS); + // TODO: enable attributes and constant values comparison by default XXX-98039 // comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); // comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES); + // comparator.enable(FunctionsComparator::CmpValues::NAMES); } void TransformationTestsF::SetUp() { diff --git a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_simple.cpp b/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_simple.cpp index 237e9b717273d4..e5d54f48a9ab34 100644 --- a/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_simple.cpp +++ b/src/tests/ngraph_helpers/snippets_ngraph_functions/src/subgraph_simple.cpp @@ -232,12 +232,19 @@ std::shared_ptr EltwiseLogLoopFunction::initReference() const { std::shared_ptr EltwiseTwoResultsFunction::initOriginal() const { auto data0 = std::make_shared(precision, input_shapes[0]); + data0->set_friendly_name("data0"); auto data1 = std::make_shared(precision, input_shapes[1]); + data1->set_friendly_name("data1"); auto sinh0 = std::make_shared(data0); + sinh0->set_friendly_name("sinh0"); auto sinh1 = std::make_shared(data1); + sinh1->set_friendly_name("sinh1"); auto add = std::make_shared(sinh0, sinh1); + add->set_friendly_name("add"); auto hswish = std::make_shared(add); + hswish->set_friendly_name("hswish"); auto relu = std::make_shared(hswish); + relu->set_friendly_name("relu"); NGRAPH_SUPPRESS_DEPRECATED_START auto& out_tensor0 = add->get_output_tensor(0); @@ -249,25 +256,38 @@ std::shared_ptr EltwiseTwoResultsFunction::initOriginal() const { out_tensor1.set_names({"relu_out", "y1"}); NGRAPH_SUPPRESS_DEPRECATED_END - return std::make_shared(NodeVector{add, relu}, ParameterVector{data0, data1}); + auto res0 = std::make_shared(add); + res0->set_friendly_name("res0"); + auto res1 = std::make_shared(relu); + res1->set_friendly_name("res1"); + return std::make_shared(ResultVector{res0, res1}, ParameterVector{data0, data1}); } std::shared_ptr EltwiseTwoResultsFunction::initReference() const { auto data0 = std::make_shared(precision, input_shapes[0]); + data0->set_friendly_name("data0"); auto data1 = std::make_shared(precision, input_shapes[1]); + data1->set_friendly_name("data1"); auto sinh0 = std::make_shared(data0); + sinh0->set_friendly_name("sinh0"); auto sinh1 = std::make_shared(data1); + sinh1->set_friendly_name("sinh1"); auto indata0 = std::make_shared(precision, sinh0->get_shape()); auto indata1 = std::make_shared(precision, sinh1->get_shape()); auto add = std::make_shared(indata0, indata1); + add->set_friendly_name("add"); auto hswish = std::make_shared(add); + hswish->set_friendly_name("hswish"); auto subgraph0 = std::make_shared(NodeVector{sinh0, sinh1}, std::make_shared(NodeVector{add, hswish}, ParameterVector{indata0, indata1})); + subgraph0->set_friendly_name("add"); auto indata2 = std::make_shared(precision, subgraph0->get_output_shape(1)); auto relu = std::make_shared(indata2); + relu->set_friendly_name("relu"); auto subgraph1 = std::make_shared(OutputVector{subgraph0->output(1)}, std::make_shared(NodeVector{relu}, ParameterVector{indata2})); + subgraph1->set_friendly_name("relu"); NGRAPH_SUPPRESS_DEPRECATED_START auto& out_tensor0 = subgraph0->get_output_tensor(0); out_tensor0.set_name("add_out"); @@ -277,7 +297,12 @@ std::shared_ptr EltwiseTwoResultsFunction::initReference() const { out_tensor1.set_name("relu_out"); out_tensor1.set_names({"relu_out", "y1"}); NGRAPH_SUPPRESS_DEPRECATED_END - return std::make_shared(OutputVector{subgraph0->output(0), subgraph1->output(0)}, ParameterVector{data0, data1}); + + auto res0 = std::make_shared(subgraph0->output(0)); + res0->set_friendly_name("res0"); + auto res1 = std::make_shared(subgraph1->output(0)); + res1->set_friendly_name("res1"); + return std::make_shared(ResultVector{res0, res1}, ParameterVector{data0, data1}); } std::shared_ptr TwoInputsAndOutputsFunction::initOriginal() const {