diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp index 7790f68df6b819..37905dc63f5b1c 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp @@ -10,7 +10,7 @@ * ConvertConv1DBase detects 1D Convolution / GroupConvolution and replaces * it with the sequence Unsqueeze - 2D Convolution / GroupConvolution - Squeeze. * Unsqueeze adds the additional dimension to Convolution inputs and Squeeze - * removes the additional dimention from the Convolution output. + * removes the additional dimension from the Convolution output. * * Before: * diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_fq_rnn_to_quantized_rnn.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_fq_rnn_to_quantized_rnn.cpp index 9bb4367f6f2d01..c943adfd39ac4f 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_fq_rnn_to_quantized_rnn.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_fq_rnn_to_quantized_rnn.cpp @@ -104,7 +104,7 @@ ov::intel_cpu::ConvertFqRnnToQuantizedRnn::ConvertFqRnnToQuantizedRnn() { const auto& cell_state = pattern_map.at(cell_state_m); const auto& sequence_length = pattern_map.at(sequence_length_m); - // @todo prototype removal of unnecessary fq between two consequtive rnn nodes + // @todo prototype removal of unnecessary fq between two consecutive rnn nodes auto rnn_quantized_tr = std::make_shared>( element::TypeVector{ element::f32, element::f32, element::f32, element::f32, element::f32, element::f32, element::f32 }, element::TypeVector{ element::f32, element::f32, element::f32 }, diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.cpp index b257c127590b7c..03fe3e76f486a5 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.cpp @@ -76,9 +76,9 @@ ov::intel_cpu::NonQuantizedFullyConnectedBiasFusion::NonQuantizedFullyConnectedB this->register_matcher(m, callback); } -//CPU plugin would config LPT not to propogate dequantization scale over bias to follow ONEDNN 3.x scheme. +//CPU plugin would config LPT not to propagate dequantization scale over bias to follow ONEDNN 3.x scheme. //It is a little tricky now to first fuse bias not DQ for pattern "FC + DQ + BIAS". -//todo: Will move the FullyConnnect fusing into CPU and fuse the DQ and BIAS in topology order. +//todo: Will move the FullyConnect fusing into CPU and fuse the DQ and BIAS in topology order. ov::intel_cpu::QuantizedFullyConnectedBiasFusion::QuantizedFullyConnectedBiasFusion() { MATCHER_SCOPE(FullyConnectedBiasFusion); auto input = ngraph::pattern::any_input(); diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/convert_to_cpu_specific_opset.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/convert_to_cpu_specific_opset.hpp index f883f382696f56..3ec6980f420758 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/convert_to_cpu_specific_opset.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/convert_to_cpu_specific_opset.hpp @@ -43,7 +43,7 @@ inline void ConvertToCPUSpecificOpset(std::shared_ptr &nGraphF if (!ov::op::util::has_op_with_type(nGraphFunc)) { CPU_REGISTER_PASS_COMMON(manager, ReshapeFullyConnectedFusion); } - // after transformation "MoveEltwiseUpThroughDataMov" there can be Reshape sequences that should be eliminated or fused + // after transformation "MoveEltwiseUpThroughDataMov" there can be reshaped sequences that should be eliminated or fused CPU_REGISTER_PASS_COMMON(manager, ov::pass::ReshapeSequenceFusion); CPU_REGISTER_PASS_COMMON(manager, ov::pass::ConstantFolding); CPU_REGISTER_PASS_COMMON(manager, ov::pass::ConvertPrecision, precisions_map {{ ngraph::element::i64, ngraph::element::i32 }});