Skip to content

Commit

Permalink
fixing typos in src/plugins/intel_cpu/src/transformations (#18016)
Browse files Browse the repository at this point in the history
  • Loading branch information
kai-waang authored Jun 12, 2023
1 parent 0743e9b commit 2d5e087
Show file tree
Hide file tree
Showing 4 changed files with 5 additions and 5 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
* ConvertConv1DBase detects 1D Convolution / GroupConvolution and replaces
* it with the sequence Unsqueeze - 2D Convolution / GroupConvolution - Squeeze.
* Unsqueeze adds the additional dimension to Convolution inputs and Squeeze
* removes the additional dimention from the Convolution output.
* removes the additional dimension from the Convolution output.
*
* Before:
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ ov::intel_cpu::ConvertFqRnnToQuantizedRnn::ConvertFqRnnToQuantizedRnn() {
const auto& cell_state = pattern_map.at(cell_state_m);
const auto& sequence_length = pattern_map.at(sequence_length_m);

// @todo prototype removal of unnecessary fq between two consequtive rnn nodes
// @todo prototype removal of unnecessary fq between two consecutive rnn nodes
auto rnn_quantized_tr = std::make_shared<op::TypeRelaxed<ngraph::opset9::LSTMSequence>>(
element::TypeVector{ element::f32, element::f32, element::f32, element::f32, element::f32, element::f32, element::f32 },
element::TypeVector{ element::f32, element::f32, element::f32 },
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,9 +76,9 @@ ov::intel_cpu::NonQuantizedFullyConnectedBiasFusion::NonQuantizedFullyConnectedB
this->register_matcher(m, callback);
}

//CPU plugin would config LPT not to propogate dequantization scale over bias to follow ONEDNN 3.x scheme.
//CPU plugin would config LPT not to propagate dequantization scale over bias to follow ONEDNN 3.x scheme.
//It is a little tricky now to first fuse bias not DQ for pattern "FC + DQ + BIAS".
//todo: Will move the FullyConnnect fusing into CPU and fuse the DQ and BIAS in topology order.
//todo: Will move the FullyConnect fusing into CPU and fuse the DQ and BIAS in topology order.
ov::intel_cpu::QuantizedFullyConnectedBiasFusion::QuantizedFullyConnectedBiasFusion() {
MATCHER_SCOPE(FullyConnectedBiasFusion);
auto input = ngraph::pattern::any_input();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ inline void ConvertToCPUSpecificOpset(std::shared_ptr<ngraph::Function> &nGraphF
if (!ov::op::util::has_op_with_type<ngraph::op::FakeQuantize>(nGraphFunc)) {
CPU_REGISTER_PASS_COMMON(manager, ReshapeFullyConnectedFusion);
}
// after transformation "MoveEltwiseUpThroughDataMov" there can be Reshape sequences that should be eliminated or fused
// after transformation "MoveEltwiseUpThroughDataMov" there can be reshaped sequences that should be eliminated or fused
CPU_REGISTER_PASS_COMMON(manager, ov::pass::ReshapeSequenceFusion);
CPU_REGISTER_PASS_COMMON(manager, ov::pass::ConstantFolding);
CPU_REGISTER_PASS_COMMON(manager, ov::pass::ConvertPrecision, precisions_map {{ ngraph::element::i64, ngraph::element::i32 }});
Expand Down

0 comments on commit 2d5e087

Please sign in to comment.