Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Additional RTTI Fix #27866

Closed
wants to merge 26 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
87f0fdd
Additional RTTI Fix
Cbaoj Oct 29, 2024
0a8b471
Resolve code style issues
t-jankowski Dec 9, 2024
474f83e
Fix core unit test
t-jankowski Dec 9, 2024
77a5c20
Fix transformations tests
t-jankowski Dec 9, 2024
e32b352
Fix ov_snippets_func_tests
t-jankowski Dec 9, 2024
375b025
Fix ov_lp_transformations_tests
t-jankowski Dec 9, 2024
e9e071a
Merge remote-tracking branch 'upstream/master' into rtti_branch
t-jankowski Dec 9, 2024
440acde
Update RTTI in cpu plugin transformations
t-jankowski Dec 9, 2024
57f3290
Merge remote-tracking branch 'upstream/master' into rtti_branch
t-jankowski Dec 10, 2024
b94f2f4
Remove not needed changes
t-jankowski Dec 10, 2024
5e44aad
Add missing rtti parent
t-jankowski Dec 10, 2024
3df5172
Update RTTI in paddlepaddle FE
t-jankowski Dec 10, 2024
95f0a89
Merge remote-tracking branch 'upstream/master' into rtti_branch
t-jankowski Dec 10, 2024
1684ea7
Update RTTI in tensorflow FE
t-jankowski Dec 11, 2024
53f5c9f
Add OPENVINO_MATCHER_PASS_RTTI macro definition
t-jankowski Dec 11, 2024
671c9b3
Merge remote-tracking branch 'upstream/master' into rtti_branch
t-jankowski Dec 11, 2024
37f5c1c
Use OPENVINO_MATCHER_PASS_RTTI where applicable
t-jankowski Dec 11, 2024
b74bcae
Merge remote-tracking branch 'upstream/master' into rtti_branch
t-jankowski Dec 11, 2024
12b1cb6
Merge remote-tracking branch 'upstream/master' into rtti_branch
t-jankowski Dec 12, 2024
3c79424
Merge remote-tracking branch 'upstream/master' into rtti_branch
t-jankowski Dec 12, 2024
1a8d088
Merge remote-tracking branch 'upstream/master' into rtti_branch
t-jankowski Dec 13, 2024
bf58f2f
Use ov::as_type_ptr where applicable (pytorch fe)
t-jankowski Dec 13, 2024
007d4ca
Merge remote-tracking branch 'upstream/master' into rtti_branch
t-jankowski Dec 13, 2024
5cb00c7
Add missing RTTI to transformations
t-jankowski Dec 16, 2024
278168b
Merge remote-tracking branch 'upstream/master' into rtti_branch
t-jankowski Dec 16, 2024
8f3aefb
Remove doubled RTTI from common/snippets
t-jankowski Dec 16, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ bool AssignAndReadValueTransformation::canBeTransformed(const TransformationCont
return false;
}

const auto readValue = std::dynamic_pointer_cast<op::util::ReadValueBase>(op->get_control_dependencies()[0]);
const auto readValue = ov::as_type_ptr<op::util::ReadValueBase>(op->get_control_dependencies()[0]);
if (!readValue) {
return false;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ void make_matcher_type_relaxed(ov::pass::GraphRewrite* transformation) {
auto p_node = std::make_shared<pass::pattern::op::Label>(element::f32, Shape{}, is_op_type);

ov::graph_rewrite_callback callback = [](ov::pass::pattern::Matcher& m) {
auto l_node = std::dynamic_pointer_cast<BaseOp>(m.get_match_root());
auto l_node = ov::as_type_ptr<BaseOp>(m.get_match_root());
if (!l_node) {
THROW_TRANSFORMATION_EXCEPTION << "unexpected operation type for type relaxed conversion";
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,25 +36,25 @@ bool ov::pass::low_precision::MarkupCanBeQuantized::run_on_model(const std::shar
continue;
}

if (const auto convolution = std::dynamic_pointer_cast<ov::opset1::Convolution>(node)) {
if (const auto convolution = ov::as_type_ptr<ov::opset1::Convolution>(node)) {
if (!ConvolutionTransformation::isQuantizedStatic(convolution, defaultPrecisions)) {
setEmptyPrecisions(convolution);
}
continue;
}
if (const auto convolutionBackpropData = std::dynamic_pointer_cast<ov::opset1::ConvolutionBackpropData>(node)) {
if (const auto convolutionBackpropData = ov::as_type_ptr<ov::opset1::ConvolutionBackpropData>(node)) {
if (!ConvolutionBackpropDataTransformation::isQuantizedStatic(convolutionBackpropData, defaultPrecisions)) {
setEmptyPrecisions(convolutionBackpropData);
}
continue;
}
if (const auto groupConvolution = std::dynamic_pointer_cast<ov::opset1::GroupConvolution>(node)) {
if (const auto groupConvolution = ov::as_type_ptr<ov::opset1::GroupConvolution>(node)) {
if (!GroupConvolutionTransformation::isQuantizedStatic(groupConvolution, defaultPrecisions)) {
setEmptyPrecisions(groupConvolution);
}
continue;
}
if (const auto concat = std::dynamic_pointer_cast<ov::opset1::Concat>(node)) {
if (const auto concat = ov::as_type_ptr<ov::opset1::Concat>(node)) {
if (!ConcatTransformation::isQuantizedStatic(concat)) {
setEmptyPrecisions(concat);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ ov::pass::InitConstMask::InitConstMask(const ov::AxisSet& dims,
pattern::type_matches_any({element::i8, element::u8, element::f16, element::f32, element::f64}));

matcher_pass_callback callback = [=](pattern::Matcher& m) {
auto const_node = std::dynamic_pointer_cast<opset6::Constant>(m.get_match_root());
auto const_node = ov::as_type_ptr<opset6::Constant>(m.get_match_root());
if (!const_node)
return false;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ class ov::pass::init_masks::InitMatMulMask : public MatcherPass {
ov::matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map();
const auto& matmul =
std::dynamic_pointer_cast<opset6::MatMul>(pattern_map.at(matmul_pattern).get_node_shared_ptr());
ov::as_type_ptr<opset6::MatMul>(pattern_map.at(matmul_pattern).get_node_shared_ptr());
if (!matmul)
return false;

Expand Down Expand Up @@ -115,7 +115,7 @@ class ov::pass::init_masks::InitMatMulMask : public MatcherPass {
return false;
}
// 2. Get constant rank to set mask on last dimension
const auto const_op = std::dynamic_pointer_cast<opset6::Constant>(cur_node);
const auto const_op = ov::as_type_ptr<opset6::Constant>(cur_node);
const auto shape_rank = const_op->get_shape().size();
const size_t shift = (matmul->get_transpose_b()) ? 2 : 1;
if (shape_rank < shift) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ class ov::pass::mask_propagation::MatMul : public MatcherPass {
a_mask_row = a_mask.get();
auto b_mask_row = b_mask.get();

const auto matmul_op = std::dynamic_pointer_cast<opset10::MatMul>(m_matmul.get_node_shared_ptr());
const auto matmul_op = ov::as_type_ptr<opset10::MatMul>(m_matmul.get_node_shared_ptr());
const auto transpose_a = matmul_op->get_transpose_a();
const auto transpose_b = matmul_op->get_transpose_b();

Expand Down Expand Up @@ -711,7 +711,7 @@ class ov::pass::mask_propagation::FakeQuantize : public MatcherPass {
m_input_high.get_node_shared_ptr(),
m_output_low.get_node_shared_ptr(),
m_output_high.get_node_shared_ptr()};
auto fq_node = std::dynamic_pointer_cast<opset10::FakeQuantize>(m_output.get_node_shared_ptr());
auto fq_node = ov::as_type_ptr<opset10::FakeQuantize>(m_output.get_node_shared_ptr());
if (!fq_node)
return false;
size_t idx = 0;
Expand Down Expand Up @@ -764,7 +764,7 @@ class ov::pass::mask_propagation::Concat : public MatcherPass {
ov::matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map();
const auto& m_output = pattern_map.at(concat);
auto concat_ptr = std::dynamic_pointer_cast<opset10::Concat>(m_output.get_node_shared_ptr());
auto concat_ptr = ov::as_type_ptr<opset10::Concat>(m_output.get_node_shared_ptr());
if (!concat_ptr) {
return false;
}
Expand Down Expand Up @@ -921,7 +921,7 @@ class ov::pass::mask_propagation::Reduce : public MatcherPass {
// Check reduce operation reduces only dimension without masks
if (auto input_mask = getMask(m_input)) {
auto output_mask = std::make_shared<ov::Mask>(m_output.get_partial_shape().rank().get_length());
const auto constant = std::dynamic_pointer_cast<opset10::Constant>(m_weights.get_node_shared_ptr());
const auto constant = ov::as_type_ptr<opset10::Constant>(m_weights.get_node_shared_ptr());
OPENVINO_ASSERT(!!constant, "Dynamic cast returned a nullptr");
const auto reduce_dims = constant->cast_vector<int64_t>();

Expand Down Expand Up @@ -1134,7 +1134,7 @@ class ov::pass::mask_propagation::Reshape : public MatcherPass {
if (is_type<opset10::GroupConvolution>(inp.get_node()))
return true;

auto constant = std::dynamic_pointer_cast<opset10::Constant>(m_weights.get_node_shared_ptr());
auto constant = ov::as_type_ptr<opset10::Constant>(m_weights.get_node_shared_ptr());
if (!constant) {
constant = ov::util::get_constant_from_source(m_weights.get_node_shared_ptr());
if (!constant) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ static bool not_empty_mask(ov::Mask::Ptr mask) {
}

static bool is_static_reshape_op(std::shared_ptr<ov::Node> node) {
auto reshape_node = std::dynamic_pointer_cast<ov::opset6::Reshape>(node);
auto reshape_node = ov::as_type_ptr<ov::opset6::Reshape>(node);
if (!reshape_node)
return false;

Expand Down Expand Up @@ -224,7 +224,7 @@ bool ov::pass::ShrinkWeights::run_on_model(const std::shared_ptr<ov::Model>& f)
continue;

// TODO: constant can be shared across functions so we need to avoid consumers from other function
auto const_node = std::dynamic_pointer_cast<opset6::Constant>(node);
auto const_node = ov::as_type_ptr<opset6::Constant>(node);
if (!const_node)
continue;

Expand Down
2 changes: 1 addition & 1 deletion src/common/snippets/include/snippets/pass/validate.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ namespace pass {
*/
class Validate: public ov::pass::ModelPass {
public:
OPENVINO_RTTI("Validate", "0");
OPENVINO_RTTI("Validate", "0", ov::pass::ModelPass);
Validate(const std::shared_ptr<ov::pass::PassConfig>& pass_config) : m_pass_config(pass_config) {}

bool run_on_model(const std::shared_ptr<ov::Model>& m) override;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ namespace internal {

class TRANSFORMATIONS_API GatherCompressed : public ov::op::v8::Gather {
public:
OPENVINO_OP("GatherCompressed", "ie_internal_opset");
OPENVINO_OP("GatherCompressed", "ie_internal_opset", ov::op::v8::Gather);

GatherCompressed() = default;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class TRANSFORMATIONS_API GenerateProposalsIEInternal : public op::v9::GenerateP
using Base = op::v9::GenerateProposals;

public:
OPENVINO_OP("GenerateProposalsIEInternal", "ie_internal_opset");
OPENVINO_OP("GenerateProposalsIEInternal", "ie_internal_opset", op::v9::GenerateProposals);

GenerateProposalsIEInternal() = default;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ namespace internal {

class TRANSFORMATIONS_API NonMaxSuppressionIEInternal : public Op {
public:
OPENVINO_OP("NonMaxSuppressionIEInternal", "ie_internal_opset");
OPENVINO_OP("NonMaxSuppressionIEInternal", "ie_internal_opset", Op);

NonMaxSuppressionIEInternal() = default;

Expand Down
2 changes: 1 addition & 1 deletion src/common/transformations/include/ov_ops/rms.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ namespace internal {
/// \note Performs re-scaling invariance and regularizes the summed input according to RMS statistics
class TRANSFORMATIONS_API RMS : public ov::op::Op {
public:
OPENVINO_OP("RMS", "ie_internal_opset");
OPENVINO_OP("RMS", "ie_internal_opset", ov::op::Op);

RMS() = default;
/// \brief Constructs an RMS operation.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API AdaptivePoolToReduce;

class ov::pass::AdaptivePoolToReduce : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("AdaptivePoolToReduce", "0");
OPENVINO_RTTI("AdaptivePoolToReduce", "0", ov::pass::MatcherPass);
AdaptivePoolToReduce();
};
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,6 @@ class TRANSFORMATIONS_API AddFakeQuantizeFusion;
*/
class ov::pass::AddFakeQuantizeFusion : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("AddFakeQuantizeFusion", "0");
OPENVINO_RTTI("AddFakeQuantizeFusion", "0", ov::pass::MatcherPass);
AddFakeQuantizeFusion();
};
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ namespace pass {

class TRANSFORMATIONS_API AlignEltwiseInputRanks : public MatcherPass {
public:
OPENVINO_RTTI("AlignEltwiseInputRanks", "0");
OPENVINO_RTTI("AlignEltwiseInputRanks", "0", MatcherPass);
AlignEltwiseInputRanks();
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,6 @@ class TRANSFORMATIONS_API AUGRUCellFusion;

class ov::pass::AUGRUCellFusion : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("AUGRUCellFusion", "0");
OPENVINO_RTTI("AUGRUCellFusion", "0", ov::pass::MatcherPass);
AUGRUCellFusion();
};
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,6 @@ class TRANSFORMATIONS_API BatchToSpaceFusion;

class ov::pass::BatchToSpaceFusion : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("BatchToSpaceFusion", "0");
OPENVINO_RTTI("BatchToSpaceFusion", "0", ov::pass::MatcherPass);
BatchToSpaceFusion();
};
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,6 @@ class TRANSFORMATIONS_API BinarizeWeights;

class ov::pass::BinarizeWeights : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("BinarizeWeights", "0");
OPENVINO_RTTI("BinarizeWeights", "0", ov::pass::MatcherPass);
BinarizeWeights();
};
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,6 @@ class TRANSFORMATIONS_API BroadcastElementwiseFusion;

class ov::pass::BroadcastElementwiseFusion : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("BroadcastElementwiseFusion", "0");
OPENVINO_RTTI("BroadcastElementwiseFusion", "0", ov::pass::MatcherPass);
BroadcastElementwiseFusion();
};
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,6 @@ class TRANSFORMATIONS_API BroadcastTransition;
*/
class ov::pass::BroadcastTransition : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("BroadcastTransition", "0");
OPENVINO_RTTI("BroadcastTransition", "0", ov::pass::MatcherPass);
BroadcastTransition();
};
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class TRANSFORMATIONS_API ChangePlaceholderTypes;
*/
class ChangePlaceholderTypes : public ModelPass {
public:
OPENVINO_RTTI("ChangePlaceholderTypes", "0");
OPENVINO_RTTI("ChangePlaceholderTypes", "0", ModelPass);
explicit ChangePlaceholderTypes(const std::vector<std::string>& params_with_custom_types)
: m_params_with_custom_types(params_with_custom_types) {}
bool run_on_model(const std::shared_ptr<ov::Model>& model) override;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,6 @@ class TRANSFORMATIONS_API ClampFusion;

class ov::pass::ClampFusion : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ClampFusion", "0");
OPENVINO_RTTI("ClampFusion", "0", ov::pass::MatcherPass);
ClampFusion();
};
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API CommonOptimizations;

class ov::pass::CommonOptimizations : public ov::pass::ModelPass {
public:
OPENVINO_RTTI("CommonOptimizations", "0");
OPENVINO_RTTI("CommonOptimizations", "0", ov::pass::ModelPass);
bool run_on_model(const std::shared_ptr<ov::Model>& f) override;
};
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ bool TRANSFORMATIONS_API is_model_optimized(const std::shared_ptr<ov::Model>& mo
*/
class ov::pass::CompressFloatConstantsImpl : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("CompressFloatConstantsImpl", "0");
OPENVINO_RTTI("CompressFloatConstantsImpl", "0", ov::pass::MatcherPass);
/// @brief Transformation constructor
/// @param postponed If true then the transformation won't compress the constants
/// keeping them in the original type but still will insert Converts. This is
Expand All @@ -41,7 +41,7 @@ class ov::pass::CompressFloatConstantsImpl : public ov::pass::MatcherPass {
*/
class ov::pass::CompressFloatConstants : public ov::pass::GraphRewrite {
public:
OPENVINO_RTTI("CompressFloatConstants", "0");
OPENVINO_RTTI("CompressFloatConstants", "0", ov::pass::GraphRewrite);
/// @brief Transformation constructor
/// @param postponed Postponed compression, see ov::pass::CompressFloatConstantsImpl for details.
CompressFloatConstants(bool postponed = false) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API ConcatFusion;

class ov::pass::ConcatFusion : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConcatFusion", "0");
OPENVINO_RTTI("ConcatFusion", "0", ov::pass::MatcherPass);
ConcatFusion();
};
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ class TRANSFORMATIONS_API ConcatReduceFusion;
*/
class ov::pass::ReplaceConcatReduceByMinOrMax : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ReplaceConcatReduceByMinOrMax", "0");
OPENVINO_RTTI("ReplaceConcatReduceByMinOrMax", "0", ov::pass::MatcherPass);
ReplaceConcatReduceByMinOrMax();
};

Expand All @@ -34,7 +34,7 @@ class ov::pass::ReplaceConcatReduceByMinOrMax : public ov::pass::MatcherPass {
*/
class ov::pass::PullSqueezeThroughEltwise : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("PullSqueezeThroughEltwise", "0");
OPENVINO_RTTI("PullSqueezeThroughEltwise", "0", ov::pass::MatcherPass);
PullSqueezeThroughEltwise();
};

Expand Down Expand Up @@ -76,6 +76,6 @@ class ov::pass::PullSqueezeThroughEltwise : public ov::pass::MatcherPass {

class ov::pass::ConcatReduceFusion : public ov::pass::GraphRewrite {
public:
OPENVINO_RTTI("ConcatReduceFusion", "0");
OPENVINO_RTTI("ConcatReduceFusion", "0", ov::pass::GraphRewrite);
ConcatReduceFusion();
};
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,6 @@ class TRANSFORMATIONS_API ConcatToBroadcast;
*/
class ov::pass::ConcatToBroadcast : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConcatToBroadcast", "0");
OPENVINO_RTTI("ConcatToBroadcast", "0", ov::pass::MatcherPass);
ConcatToBroadcast();
};
Original file line number Diff line number Diff line change
Expand Up @@ -23,24 +23,24 @@ class TRANSFORMATIONS_API GroupConvolutionBackpropDataMultiplyFusion;

class ov::pass::ConvolutionMultiplyFusion : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvolutionMultiplyFusion", "0");
OPENVINO_RTTI("ConvolutionMultiplyFusion", "0", ov::pass::MatcherPass);
ConvolutionMultiplyFusion();
};

class ov::pass::GroupConvolutionMultiplyFusion : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("GroupConvolutionMultiplyFusion", "0");
OPENVINO_RTTI("GroupConvolutionMultiplyFusion", "0", ov::pass::MatcherPass);
GroupConvolutionMultiplyFusion();
};

class ov::pass::ConvolutionBackpropDataMultiplyFusion : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvolutionBackpropDataMultiplyFusion", "0");
OPENVINO_RTTI("ConvolutionBackpropDataMultiplyFusion", "0", ov::pass::MatcherPass);
ConvolutionBackpropDataMultiplyFusion();
};

class ov::pass::GroupConvolutionBackpropDataMultiplyFusion : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("GroupConvolutionBackpropDataMultiplyFusion", "0");
OPENVINO_RTTI("GroupConvolutionBackpropDataMultiplyFusion", "0", ov::pass::MatcherPass);
GroupConvolutionBackpropDataMultiplyFusion();
};
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,6 @@ class TRANSFORMATIONS_API ConvToBinaryConv;
*/
class ov::pass::ConvToBinaryConv : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvToBinaryConv", "0");
OPENVINO_RTTI("ConvToBinaryConv", "0", ov::pass::MatcherPass);
ConvToBinaryConv();
};
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,6 @@ class TRANSFORMATIONS_API ConvertNmsGatherPathToUnsigned;
*/
class ov::pass::ConvertNmsGatherPathToUnsigned : public ov::pass::GraphRewrite {
public:
OPENVINO_RTTI("ConvertNmsGatherPathToUnsigned", "0");
OPENVINO_RTTI("ConvertNmsGatherPathToUnsigned", "0", ov::pass::GraphRewrite);
ConvertNmsGatherPathToUnsigned();
};
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,6 @@ class TRANSFORMATIONS_API ConvertQuantizeDequantize;

class ov::pass::ConvertQuantizeDequantize : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertQuantizeDequantize", "0");
OPENVINO_RTTI("ConvertQuantizeDequantize", "0", ov::pass::MatcherPass);
ConvertQuantizeDequantize();
};
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertU4WeightsZeroPointToScalar;
*/
class ov::pass::ConvertU4WeightsZeroPointToScalar : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("ConvertU4WeightsZeroPointToScalar", "0");
OPENVINO_RTTI("ConvertU4WeightsZeroPointToScalar", "0", ov::pass::MatcherPass);
ConvertU4WeightsZeroPointToScalar();
};
Loading
Loading